edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
import click
import progressbar
import python_freeipa
from collections import defaultdict
from typing import Any, Dict, List
from .status import Status, print_status
from .utils import ObjectManager
class Groups(ObjectManager):
def __init__(self, *args, agreements, **kwargs):
super().__init__(*args, **kwargs)
self.agreements = agreements
def pull_from_fas(self) -> Dict[str, List[Dict]]:
fas_groups = {}
for fas_name, fas_inst in self.fas_instances.items():
click.echo(f"Pulling group information from FAS ({fas_name})...")
fas_conf = self.config["fas"][fas_name]
groups = fas_inst.send_request(
"/group/list",
req_params={"search": fas_conf["groups"]["search"]},
auth=True,
timeout=240,
)["groups"]
groups.sort(key=lambda g: g["name"])
click.echo(f"Got {len(groups)} groups!")
fas_groups[fas_name] = groups
return fas_groups
def push_to_ipa(
self, groups: Dict[str, List[Dict]], conflicts: Dict[str, List[Dict[str, Any]]],
) -> dict:
added = 0
edited = 0
counter = 0
if not conflicts:
conflicts = {}
skip_conflicts = set(self.config["groups"].get("skip_conflicts", ()))
for fas_name, fas_groups in groups.items():
click.echo(f"Pushing {fas_name} group information to IPA...")
fas_conf = self.config["fas"][fas_name]
# Start by creating the umbrella group, if any
umbrella_group = fas_conf["groups"].get("umbrella")
if umbrella_group:
click.echo(f"Ensuring umbrella group {umbrella_group["name"]} exists...")
name_max_length = max((len(g["name"]) for g in fas_groups))
click.echo(umbrella_group["name"].ljust(name_max_length + 2), nl=False)
status = self._write_group_to_ipa(fas_name, umbrella_group, from_fas=False)
print_status(status)
if status == Status.ADDED:
added += 1
elif status == Status.UPDATED:
edited += 1
umbrella_members = set()
# Start by creating groups
fas_groups = [
g
for g in fas_groups
if g["name"] not in fas_conf["groups"].get("ignore", ())
]
name_max_length = max((len(g["name"]) for g in fas_groups))
for group in progressbar.progressbar(fas_groups, redirect_stdout=True):
counter += 1
group_conflicts = set(conflicts.get(group["name"], ()))
group_skip_conflicts = skip_conflicts & group_conflicts
if group_skip_conflicts:
print_status(
Status.FAILED,
f"[{fas_name}: Skipping group "{group["name"]}' because of conflicts:"
f" {", ".join(group_skip_conflicts)}",
)
continue
self.check_reauth(counter)
click.echo(group["name"].ljust(name_max_length + 2), nl=False)
status = self._write_group_to_ipa(fas_name, group)
print_status(status)
if status == Status.ADDED:
added += 1
elif status == Status.UPDATED:
edited += 1
if umbrella_group and status in (Status.ADDED, Status.UPDATED, Status.UNMODIFIED):
umbrella_members.add(
fas_conf["groups"].get("prefix", "") + group["name"].lower()
)
if umbrella_group:
ipa_group = self.ipa.group_show(umbrella_group["name"])
existing_umbrella_members = set(ipa_group.get("member_group", []))
new_umbrella_members = umbrella_members - existing_umbrella_members
if not new_umbrella_members:
click.echo(f"No new members to add to umbrella group {umbrella_group["name"]}")
else:
click.echo(
f"Adding {len(new_umbrella_members)} new groups to umbrella group"
f" {umbrella_group["name"]}"
)
self.ipa.group_add_member(
umbrella_group["name"], groups=list(new_umbrella_members)
)
click.echo(f"Done with {fas_name}")
# add groups to agreements
click.echo("Recording group requirements in IPA...")
self.agreements.record_group_requirements(groups)
click.echo("Done.")
return dict(groups_added=added, groups_edited=edited, groups_counter=counter,)
def _write_group_to_ipa(self, fas_name: str, group: dict, from_fas: bool = True):
if from_fas:
# transform FAS group info into what IPA expects
name = (
self.config["fas"][fas_name]["groups"].get("prefix", "")
+ group["name"].lower()
)
# calculate the IRC channel (FAS has 2 fields, freeipa-fas has a single one )
# if we have an irc channel defined. try to generate the irc:// uri
# there are a handful of groups that have an IRC server defined (freenode), but
# no channel, which is kind of useless, so we don't handle that case.
irc_channel = group.get("irc_channel")
irc_string = None
if irc_channel:
if irc_channel[0] == "#":
irc_channel = irc_channel[1:]
irc_network = group.get("irc_network").lower()
if "gimp" in irc_network:
irc_string = f"irc://irc.gimp.org/#{irc_channel}"
elif "oftc" in irc_network:
irc_string = f"irc://irc.oftc.net/#{irc_channel}"
else:
# the remainder of the entries here are either blank or
# freenode, so we freenode them all.
irc_string = f"irc://irc.freenode.net/#{irc_channel}"
url = group.get("url")
if not url:
url = None
else:
url = url.strip()
mailing_list = group.get("mailing_list")
if not mailing_list:
mailing_list = None
else:
if "@" not in mailing_list:
mailing_list = f"{mailing_list}@lists.fedoraproject.org"
mailing_list = mailing_list.strip()
mailing_list = mailing_list.rstrip(".")
mailing_list = mailing_list.lower()
group_args = {
"description": group["display_name"].strip(),
"fasurl": url,
"fasmailinglist": mailing_list,
"fasircchannel": irc_string,
}
else:
name = group["name"]
group_args = {
k: v for k, v in group.items() if k in (
"description", "fasurl", "fasmailinglist", "fasircchannel"
)
}
group["fasgroup"] = True
try:
self.ipa.group_add(name, **group_args)
return Status.ADDED
except python_freeipa.exceptions.FreeIPAError as e:
if e.message == 'group with name "%s" already exists' % name:
try:
self.ipa.group_mod(name, **group_args)
except python_freeipa.exceptions.FreeIPAError as e:
if e.message != "no modifications to be performed":
raise
return Status.UNMODIFIED
else:
print(e.message)
print(e)
print(url, mailing_list, irc_string)
return Status.FAILED
except Exception as e:
print(e)
print(url, mailing_list, irc_string)
return Status.FAILED
def find_group_conflicts(
self, fas_groups: Dict[str, List[Dict]]
) -> Dict[str, List[str]]:
"""Compare groups from different FAS instances and flag conflicts."""
click.echo("Checking for conflicts between groups from different FAS instances")
groups_to_conflicts = {}
groupnames_to_fas = defaultdict(set)
for fas_name, group_objs in fas_groups.items():
for group_obj in group_objs:
groupnames_to_fas[group_obj["name"]].add(fas_name)
for group_name, fas_names in sorted(
groupnames_to_fas.items(), key=lambda x: x[0]
):
if len(fas_names) == 1:
continue
groups_to_conflicts[group_name] = group_conflicts = defaultdict(list)
group_conflicts["same_group_name"] = {"fas_names": fas_names}
click.echo("Done checking group conflicts.")
click.echo(f"Found {len(groups_to_conflicts)} groups with conflicts.")
return groups_to_conflicts
| import click
import progressbar
import python_freeipa
from collections import defaultdict
from typing import Any, Dict, List
from .status import Status, print_status
from .utils import ObjectManager
class Groups(ObjectManager):
def __init__(self, *args, agreements, **kwargs):
super().__init__(*args, **kwargs)
self.agreements = agreements
def pull_from_fas(self) -> Dict[str, List[Dict]]:
fas_groups = {}
for fas_name, fas_inst in self.fas_instances.items():
click.echo(f"Pulling group information from FAS ({fas_name})...")
fas_conf = self.config["fas"][fas_name]
groups = fas_inst.send_request(
"/group/list",
req_params={"search": fas_conf["groups"]["search"]},
auth=True,
timeout=240,
)["groups"]
groups.sort(key=lambda g: g["name"])
click.echo(f"Got {len(groups)} groups!")
fas_groups[fas_name] = groups
return fas_groups
def push_to_ipa(
self, groups: Dict[str, List[Dict]], conflicts: Dict[str, List[Dict[str, Any]]],
) -> dict:
added = 0
edited = 0
counter = 0
if not conflicts:
conflicts = {}
skip_conflicts = set(self.config["groups"].get("skip_conflicts", ()))
for fas_name, fas_groups in groups.items():
click.echo(f"Pushing {fas_name} group information to IPA...")
fas_conf = self.config["fas"][fas_name]
# Start by creating the umbrella group, if any
umbrella_group = fas_conf["groups"].get("umbrella")
if umbrella_group:
click.echo(f"Ensuring umbrella group {umbrella_group['name']} exists...")
name_max_length = max((len(g["name"]) for g in fas_groups))
click.echo(umbrella_group["name"].ljust(name_max_length + 2), nl=False)
status = self._write_group_to_ipa(fas_name, umbrella_group, from_fas=False)
print_status(status)
if status == Status.ADDED:
added += 1
elif status == Status.UPDATED:
edited += 1
umbrella_members = set()
# Start by creating groups
fas_groups = [
g
for g in fas_groups
if g["name"] not in fas_conf["groups"].get("ignore", ())
]
name_max_length = max((len(g["name"]) for g in fas_groups))
for group in progressbar.progressbar(fas_groups, redirect_stdout=True):
counter += 1
group_conflicts = set(conflicts.get(group["name"], ()))
group_skip_conflicts = skip_conflicts & group_conflicts
if group_skip_conflicts:
print_status(
Status.FAILED,
f"[{fas_name}: Skipping group '{group['name']}' because of conflicts:"
f" {', '.join(group_skip_conflicts)}",
)
continue
self.check_reauth(counter)
click.echo(group["name"].ljust(name_max_length + 2), nl=False)
status = self._write_group_to_ipa(fas_name, group)
print_status(status)
if status == Status.ADDED:
added += 1
elif status == Status.UPDATED:
edited += 1
if umbrella_group and status in (Status.ADDED, Status.UPDATED, Status.UNMODIFIED):
umbrella_members.add(
fas_conf["groups"].get("prefix", "") + group["name"].lower()
)
if umbrella_group:
ipa_group = self.ipa.group_show(umbrella_group["name"])
existing_umbrella_members = set(ipa_group.get("member_group", []))
new_umbrella_members = umbrella_members - existing_umbrella_members
if not new_umbrella_members:
click.echo(f"No new members to add to umbrella group {umbrella_group['name']}")
else:
click.echo(
f"Adding {len(new_umbrella_members)} new groups to umbrella group"
f" {umbrella_group['name']}"
)
self.ipa.group_add_member(
umbrella_group["name"], groups=list(new_umbrella_members)
)
click.echo(f"Done with {fas_name}")
# add groups to agreements
click.echo("Recording group requirements in IPA...")
self.agreements.record_group_requirements(groups)
click.echo("Done.")
return dict(groups_added=added, groups_edited=edited, groups_counter=counter,)
def _write_group_to_ipa(self, fas_name: str, group: dict, from_fas: bool = True):
if from_fas:
# transform FAS group info into what IPA expects
name = (
self.config["fas"][fas_name]["groups"].get("prefix", "")
+ group["name"].lower()
)
# calculate the IRC channel (FAS has 2 fields, freeipa-fas has a single one )
# if we have an irc channel defined. try to generate the irc:// uri
# there are a handful of groups that have an IRC server defined (freenode), but
# no channel, which is kind of useless, so we don't handle that case.
irc_channel = group.get("irc_channel")
irc_string = None
if irc_channel:
if irc_channel[0] == "#":
irc_channel = irc_channel[1:]
irc_network = group.get("irc_network").lower()
if "gimp" in irc_network:
irc_string = f"irc://irc.gimp.org/#{irc_channel}"
elif "oftc" in irc_network:
irc_string = f"irc://irc.oftc.net/#{irc_channel}"
else:
# the remainder of the entries here are either blank or
# freenode, so we freenode them all.
irc_string = f"irc://irc.freenode.net/#{irc_channel}"
url = group.get("url")
if not url:
url = None
else:
url = url.strip()
mailing_list = group.get("mailing_list")
if not mailing_list:
mailing_list = None
else:
if "@" not in mailing_list:
mailing_list = f"{mailing_list}@lists.fedoraproject.org"
mailing_list = mailing_list.strip()
mailing_list = mailing_list.rstrip(".")
mailing_list = mailing_list.lower()
group_args = {
"description": group["display_name"].strip(),
"fasurl": url,
"fasmailinglist": mailing_list,
"fasircchannel": irc_string,
}
else:
name = group["name"]
group_args = {
k: v for k, v in group.items() if k in (
"description", "fasurl", "fasmailinglist", "fasircchannel"
)
}
group["fasgroup"] = True
try:
self.ipa.group_add(name, **group_args)
return Status.ADDED
except python_freeipa.exceptions.FreeIPAError as e:
if e.message == 'group with name "%s" already exists' % name:
try:
self.ipa.group_mod(name, **group_args)
except python_freeipa.exceptions.FreeIPAError as e:
if e.message != "no modifications to be performed":
raise
return Status.UNMODIFIED
else:
print(e.message)
print(e)
print(url, mailing_list, irc_string)
return Status.FAILED
except Exception as e:
print(e)
print(url, mailing_list, irc_string)
return Status.FAILED
def find_group_conflicts(
self, fas_groups: Dict[str, List[Dict]]
) -> Dict[str, List[str]]:
"""Compare groups from different FAS instances and flag conflicts."""
click.echo("Checking for conflicts between groups from different FAS instances")
groups_to_conflicts = {}
groupnames_to_fas = defaultdict(set)
for fas_name, group_objs in fas_groups.items():
for group_obj in group_objs:
groupnames_to_fas[group_obj["name"]].add(fas_name)
for group_name, fas_names in sorted(
groupnames_to_fas.items(), key=lambda x: x[0]
):
if len(fas_names) == 1:
continue
groups_to_conflicts[group_name] = group_conflicts = defaultdict(list)
group_conflicts["same_group_name"] = {"fas_names": fas_names}
click.echo("Done checking group conflicts.")
click.echo(f"Found {len(groups_to_conflicts)} groups with conflicts.")
return groups_to_conflicts
|
import tensorflow as tf
#import tensorflow_hub as hub
import numpy as np
#import cv2
import zipfile
import json
import lzma
import os
import telenet.dataset_data as tn_data
from telenet.utils import load_image_for_vrd_yolo, mdl_yolo, parse_yolo_results
from telenet.config import get as tn_config
from tqdm import tqdm
VG_PATH = tn_config('paths.vg')
imgcnvdata = tn_data.load_json_xz('vg-imgcnvdata')
zf1 = zipfile.ZipFile(os.path.join(VG_PATH, 'images.zip'), 'r')
zf2 = zipfile.ZipFile(os.path.join(VG_PATH, 'images2.zip'), 'r')
train_imgs = []
test_imgs = []
for obj in imgcnvdata:
(train_imgs,test_imgs)[obj['split']].append(obj)
def load_image(db, index):
obj = db[index]
if obj['dir'] == 1:
imgdata = zf1.read(f"VG_100K/{obj["file"]}")
elif obj['dir'] == 2:
imgdata = zf2.read(f"VG_100K_2/{obj["file"]}")
else:
raise "Bad dir"
img, w, h = load_image_for_vrd_yolo(imgdata)
return obj['id'], img, w, h
def load_train_image(index):
return load_image(train_imgs, index)
def load_test_image(index):
return load_image(test_imgs, index)
train_dataset = tf.data.Dataset.from_tensor_slices(list(range(len(train_imgs)))).map(
lambda x: tf.py_function(func=load_train_image, inp=[x], Tout=[tf.string, tf.float32, tf.float32, tf.float32]),
num_parallel_calls=tf.data.AUTOTUNE).batch(1)
test_dataset = tf.data.Dataset.from_tensor_slices(list(range(len(test_imgs)))).map(
lambda x: tf.py_function(func=load_test_image, inp=[x], Tout=[tf.string, tf.float32, tf.float32, tf.float32]),
num_parallel_calls=tf.data.AUTOTUNE).batch(1)
def convert_dataset(dataset, outfile, outfile2):
res = {}
with zipfile.ZipFile(tn_data.path(outfile), 'w') as zfo:
for names,img,widths,heights in tqdm(dataset):
names = names.numpy()
features,yolodata = mdl_yolo(img)
for imnm,imft,imyl,imw,imh in zip(names,features,yolodata,widths,heights):
imnm = imnm.decode('utf-8')
res[imnm] = parse_yolo_results(np.expand_dims(imyl, axis=0), imw, imh)
with zfo.open(f'{imnm}.npy','w') as f:
np.save(f, imft)
with lzma.open(tn_data.path(outfile2), 'wt', encoding='utf-8') as f:
json.dump(res, f)
convert_dataset(train_dataset, 'vg-yolo-train.zip', 'vg-yolo-train-objs.json.xz')
convert_dataset(test_dataset, 'vg-yolo-test.zip', 'vg-yolo-test-objs.json.xz')
| import tensorflow as tf
#import tensorflow_hub as hub
import numpy as np
#import cv2
import zipfile
import json
import lzma
import os
import telenet.dataset_data as tn_data
from telenet.utils import load_image_for_vrd_yolo, mdl_yolo, parse_yolo_results
from telenet.config import get as tn_config
from tqdm import tqdm
VG_PATH = tn_config('paths.vg')
imgcnvdata = tn_data.load_json_xz('vg-imgcnvdata')
zf1 = zipfile.ZipFile(os.path.join(VG_PATH, 'images.zip'), 'r')
zf2 = zipfile.ZipFile(os.path.join(VG_PATH, 'images2.zip'), 'r')
train_imgs = []
test_imgs = []
for obj in imgcnvdata:
(train_imgs,test_imgs)[obj['split']].append(obj)
def load_image(db, index):
obj = db[index]
if obj['dir'] == 1:
imgdata = zf1.read(f"VG_100K/{obj['file']}")
elif obj['dir'] == 2:
imgdata = zf2.read(f"VG_100K_2/{obj['file']}")
else:
raise "Bad dir"
img, w, h = load_image_for_vrd_yolo(imgdata)
return obj['id'], img, w, h
def load_train_image(index):
return load_image(train_imgs, index)
def load_test_image(index):
return load_image(test_imgs, index)
train_dataset = tf.data.Dataset.from_tensor_slices(list(range(len(train_imgs)))).map(
lambda x: tf.py_function(func=load_train_image, inp=[x], Tout=[tf.string, tf.float32, tf.float32, tf.float32]),
num_parallel_calls=tf.data.AUTOTUNE).batch(1)
test_dataset = tf.data.Dataset.from_tensor_slices(list(range(len(test_imgs)))).map(
lambda x: tf.py_function(func=load_test_image, inp=[x], Tout=[tf.string, tf.float32, tf.float32, tf.float32]),
num_parallel_calls=tf.data.AUTOTUNE).batch(1)
def convert_dataset(dataset, outfile, outfile2):
res = {}
with zipfile.ZipFile(tn_data.path(outfile), 'w') as zfo:
for names,img,widths,heights in tqdm(dataset):
names = names.numpy()
features,yolodata = mdl_yolo(img)
for imnm,imft,imyl,imw,imh in zip(names,features,yolodata,widths,heights):
imnm = imnm.decode('utf-8')
res[imnm] = parse_yolo_results(np.expand_dims(imyl, axis=0), imw, imh)
with zfo.open(f'{imnm}.npy','w') as f:
np.save(f, imft)
with lzma.open(tn_data.path(outfile2), 'wt', encoding='utf-8') as f:
json.dump(res, f)
convert_dataset(train_dataset, 'vg-yolo-train.zip', 'vg-yolo-train-objs.json.xz')
convert_dataset(test_dataset, 'vg-yolo-test.zip', 'vg-yolo-test-objs.json.xz')
|
from typing import List, Tuple, Union, Dict
from .base import Task
from backprop.models import BaseModel, AutoModel
from transformers.optimization import Adafactor
from backprop.utils.datasets import TextToTextDataset
import requests
TASK = "emotion"
DEFAULT_LOCAL_MODEL = "t5-base-qa-summary-emotion"
LOCAL_ALIASES = {
"enlgish": "t5-base-qa-summary-emotion"
}
class Emotion(Task):
"""
Task for emotion detection.
Attributes:
model:
1. Model name
2. Model name on Backprop's emotion endpoint
3. Model object that implements the emotion task
local (optional): Run locally. Defaults to False
api_key (optional): Backprop API key for non-local inference
device (optional): Device to run inference on. Defaults to "cuda" if available.
"""
def __init__(self, model: Union[str, BaseModel] = None,
local: bool = False, api_key: str = None, device: str = None):
models = AutoModel.list_models(task=TASK)
super().__init__(model, local=local, api_key=api_key, device=device,
models=models, task=TASK,
default_local_model=DEFAULT_LOCAL_MODEL,
local_aliases=LOCAL_ALIASES)
@staticmethod
def list_models(return_dict=False, display=False, limit=None):
"""
Returns the list of models that can be used and finetuned with this task.
Args:
return_dict: Default False. True if you want to return in dict form. Otherwise returns list form.
display: Default False. True if you want output printed directly (overrides return_dict, and returns nothing).
limit: Default None. Maximum number of models to return -- leave None to get all models.
"""
return AutoModel.list_models(task=TASK, return_dict=return_dict, display=display, limit=limit, aliases=LOCAL_ALIASES)
def __call__(self, text: Union[str, List[str]]):
"""Perform emotion detection on input text.
Args:
text: string or list of strings to detect emotion from
keep this under a few sentences for best performance.
Returns:
Emotion string or list of emotion strings.
"""
task_input = {
"text": text
}
if self.local:
return self.model(task_input, task="emotion")
else:
task_input["model"] = self.model
res = requests.post("https://api.backprop.co/emotion", json=task_input,
headers={"x-api-key": self.api_key}).json()
if res.get("message"):
raise Exception(f"Failed to make API request: {res["message"]}")
return res["emotion"]
def step(self, batch, batch_idx):
"""
Performs a training step and returns loss.
Args:
batch: Batch output from the dataloader
batch_idx: Batch index.
"""
return self.model.training_step(batch)
def configure_optimizers(self):
"""
Returns default optimizer for text generation (AdaFactor, learning rate 1e-3)
"""
return Adafactor(params=self.model.parameters(), lr=1e-3, scale_parameter=False, relative_step=False)
def finetune(self, params, validation_split: Union[float, Tuple[List[int], List[int]]]=0.15,
max_input_length: int=256, max_output_length: int=32,
epochs: int=20, batch_size: int=None,
optimal_batch_size: int=None, early_stopping_epochs: int=1,
train_dataloader=None, val_dataloader=None, step=None,
configure_optimizers=None):
"""
Finetunes a generative model for sentiment detection.
Note:
input_text and output_text in params must have matching ordering (item 1 of input must match item 1 of output)
Args:
params: Dictionary of model inputs. Contains 'input_text' and 'output_text' keys, with values as lists of input/output data.
max_input_length: Maximum number of tokens (1 token ~ 1 word) in input. Anything higher will be truncated. Max 512.
max_output_length: Maximum number of tokens (1 token ~ 1 word) in output. Anything higher will be truncated. Max 512.
validation_split: Float between 0 and 1 that determines what percentage of the data to use for validation.
epochs: Integer specifying how many training iterations to run.
batch_size: Batch size when training. Leave as None to automatically determine batch size.
optimal_batch_size: Optimal batch size for the model being trained -- defaults to model settings.
early_stopping_epochs: Integer determining how many epochs will run before stopping without an improvement in validation loss.
train_dataloader: Dataloader for providing training data when finetuning. Defaults to inbuilt dataloder.
val_dataloader: Dataloader for providing validation data when finetuning. Defaults to inbuilt dataloader.
step: Function determining how to call model for a training step. Defaults to step defined in this task class.
configure_optimizers: Function that sets up the optimizer for training. Defaults to optimizer defined in this task class.
Examples::
import backprop
emote = backprop.Emotion()
# Provide sentiment data for training
inp = ["I really liked the service I received!", "Meh, it was not impressive."]
out = ["positive", "negative"]
params = {"input_text": inp, "output_text": out}
# Finetune
emote.finetune(params)
"""
inputs = params["input_text"]
outputs = params["output_text"]
assert len(inputs) == len(outputs)
step = step or self.step
configure_optimizers = configure_optimizers or self.configure_optimizers
dataset_params = {
"input": inputs,
"output": outputs,
"max_input_length": max_input_length,
"max_output_length": max_output_length
}
print("Processing data...")
dataset = TextToTextDataset(dataset_params, task=TASK, process_batch=self.model.process_batch, length=len(inputs))
super().finetune(dataset=dataset, validation_split=validation_split,
epochs=epochs, batch_size=batch_size, optimal_batch_size=optimal_batch_size,
early_stopping_epochs=early_stopping_epochs,
train_dataloader=train_dataloader, val_dataloader=val_dataloader,
step=step, configure_optimizers=configure_optimizers) | from typing import List, Tuple, Union, Dict
from .base import Task
from backprop.models import BaseModel, AutoModel
from transformers.optimization import Adafactor
from backprop.utils.datasets import TextToTextDataset
import requests
TASK = "emotion"
DEFAULT_LOCAL_MODEL = "t5-base-qa-summary-emotion"
LOCAL_ALIASES = {
"enlgish": "t5-base-qa-summary-emotion"
}
class Emotion(Task):
"""
Task for emotion detection.
Attributes:
model:
1. Model name
2. Model name on Backprop's emotion endpoint
3. Model object that implements the emotion task
local (optional): Run locally. Defaults to False
api_key (optional): Backprop API key for non-local inference
device (optional): Device to run inference on. Defaults to "cuda" if available.
"""
def __init__(self, model: Union[str, BaseModel] = None,
local: bool = False, api_key: str = None, device: str = None):
models = AutoModel.list_models(task=TASK)
super().__init__(model, local=local, api_key=api_key, device=device,
models=models, task=TASK,
default_local_model=DEFAULT_LOCAL_MODEL,
local_aliases=LOCAL_ALIASES)
@staticmethod
def list_models(return_dict=False, display=False, limit=None):
"""
Returns the list of models that can be used and finetuned with this task.
Args:
return_dict: Default False. True if you want to return in dict form. Otherwise returns list form.
display: Default False. True if you want output printed directly (overrides return_dict, and returns nothing).
limit: Default None. Maximum number of models to return -- leave None to get all models.
"""
return AutoModel.list_models(task=TASK, return_dict=return_dict, display=display, limit=limit, aliases=LOCAL_ALIASES)
def __call__(self, text: Union[str, List[str]]):
"""Perform emotion detection on input text.
Args:
text: string or list of strings to detect emotion from
keep this under a few sentences for best performance.
Returns:
Emotion string or list of emotion strings.
"""
task_input = {
"text": text
}
if self.local:
return self.model(task_input, task="emotion")
else:
task_input["model"] = self.model
res = requests.post("https://api.backprop.co/emotion", json=task_input,
headers={"x-api-key": self.api_key}).json()
if res.get("message"):
raise Exception(f"Failed to make API request: {res['message']}")
return res["emotion"]
def step(self, batch, batch_idx):
"""
Performs a training step and returns loss.
Args:
batch: Batch output from the dataloader
batch_idx: Batch index.
"""
return self.model.training_step(batch)
def configure_optimizers(self):
"""
Returns default optimizer for text generation (AdaFactor, learning rate 1e-3)
"""
return Adafactor(params=self.model.parameters(), lr=1e-3, scale_parameter=False, relative_step=False)
def finetune(self, params, validation_split: Union[float, Tuple[List[int], List[int]]]=0.15,
max_input_length: int=256, max_output_length: int=32,
epochs: int=20, batch_size: int=None,
optimal_batch_size: int=None, early_stopping_epochs: int=1,
train_dataloader=None, val_dataloader=None, step=None,
configure_optimizers=None):
"""
Finetunes a generative model for sentiment detection.
Note:
input_text and output_text in params must have matching ordering (item 1 of input must match item 1 of output)
Args:
params: Dictionary of model inputs. Contains 'input_text' and 'output_text' keys, with values as lists of input/output data.
max_input_length: Maximum number of tokens (1 token ~ 1 word) in input. Anything higher will be truncated. Max 512.
max_output_length: Maximum number of tokens (1 token ~ 1 word) in output. Anything higher will be truncated. Max 512.
validation_split: Float between 0 and 1 that determines what percentage of the data to use for validation.
epochs: Integer specifying how many training iterations to run.
batch_size: Batch size when training. Leave as None to automatically determine batch size.
optimal_batch_size: Optimal batch size for the model being trained -- defaults to model settings.
early_stopping_epochs: Integer determining how many epochs will run before stopping without an improvement in validation loss.
train_dataloader: Dataloader for providing training data when finetuning. Defaults to inbuilt dataloder.
val_dataloader: Dataloader for providing validation data when finetuning. Defaults to inbuilt dataloader.
step: Function determining how to call model for a training step. Defaults to step defined in this task class.
configure_optimizers: Function that sets up the optimizer for training. Defaults to optimizer defined in this task class.
Examples::
import backprop
emote = backprop.Emotion()
# Provide sentiment data for training
inp = ["I really liked the service I received!", "Meh, it was not impressive."]
out = ["positive", "negative"]
params = {"input_text": inp, "output_text": out}
# Finetune
emote.finetune(params)
"""
inputs = params["input_text"]
outputs = params["output_text"]
assert len(inputs) == len(outputs)
step = step or self.step
configure_optimizers = configure_optimizers or self.configure_optimizers
dataset_params = {
"input": inputs,
"output": outputs,
"max_input_length": max_input_length,
"max_output_length": max_output_length
}
print("Processing data...")
dataset = TextToTextDataset(dataset_params, task=TASK, process_batch=self.model.process_batch, length=len(inputs))
super().finetune(dataset=dataset, validation_split=validation_split,
epochs=epochs, batch_size=batch_size, optimal_batch_size=optimal_batch_size,
early_stopping_epochs=early_stopping_epochs,
train_dataloader=train_dataloader, val_dataloader=val_dataloader,
step=step, configure_optimizers=configure_optimizers) |
import datetime
import json
import os
import sys
import time
import googleapiclient
from gam.var import *
import gam
from gam import controlflow
from gam import display
from gam import fileutils
from gam import gapi
from gam.gapi import directory as gapi_directory
from gam.gapi import errors as gapi_errors
from gam.gapi.directory import orgunits as gapi_directory_orgunits
from gam import utils
def _display_cros_command_result(cd, device_id, command_id, times_to_check_status):
print(f'deviceId: {device_id}, commandId: {command_id}')
final_states = {'EXPIRED', 'CANCELLED', 'EXECUTED_BY_CLIENT'}
for _ in range(0, times_to_check_status):
time.sleep(2)
result = gapi.call(cd.customer().devices().chromeos().commands(), 'get',
customerId=GC_Values[GC_CUSTOMER_ID], deviceId=device_id,
commandId=command_id)
display.print_json(result)
if result.get('state') in final_states:
return
def issue_command():
cd = gapi_directory.build()
i, devices = getCrOSDeviceEntity(3, cd)
body = {}
valid_commands = gapi.get_enum_values_minus_unspecified(
cd._rootDesc['schemas']
['DirectoryChromeosdevicesIssueCommandRequest']
['properties']['commandType']['enum'])
command_map = {}
for valid_command in valid_commands:
v = valid_command.lower().replace('_', '')
command_map[v] = valid_command
times_to_check_status = 1
doit = False
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'command':
command = sys.argv[i+1].lower().replace('_', '')
if command not in command_map:
controlflow.system_error_exit(2, f'expected command of ' \
f'{', '.join(valid_commands)} got {command}')
body['commandType'] = command_map[command]
i += 2
if command == 'setvolume':
body['payload'] = json.dumps({'volume': sys.argv[i]})
i += 1
elif myarg == 'timestocheckstatus':
times_to_check_status = int(sys.argv[i+1])
i += 2
elif myarg == 'doit':
doit = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam issuecommand cros')
if 'commandType' not in body:
controlflow.missing_argument_exit('command <CrOSCommand>', 'gam issuecommand cros')
if body['commandType'] == 'WIPE_USERS' and not doit:
controlflow.system_error_exit(2, 'wipe_users command requires admin ' \
'acknowledge user data will be destroyed with the ' \
'doit argument')
if body['commandType'] == 'REMOTE_POWERWASH' and not doit:
controlflow.system_error_exit(2, 'remote_powerwash command requires ' \
'admin acknowledge user data will be destroyed, device will need' \
' to be reconnected to WiFi and re-enrolled with the doit argument')
for device_id in devices:
try:
result = gapi.call(cd.customer().devices().chromeos(), 'issueCommand',
customerId=GC_Values[GC_CUSTOMER_ID], deviceId=device_id,
throw_reasons=[gapi_errors.ErrorReason.FOUR_O_O],
body=body)
except googleapiclient.errors.HttpError:
controlflow.system_error_exit(4, '400 response from Google. This ' \
'usually indicates the devices was not in a state where it will' \
' accept the command. For example, reboot, set_volume and take_a_screenshot' \
' require the device to be in auto-start kiosk app mode.')
command_id = result.get('commandId')
_display_cros_command_result(cd, device_id, command_id, times_to_check_status)
def get_command():
cd = gapi_directory.build()
i, devices = getCrOSDeviceEntity(3, cd)
command_id = None
times_to_check_status = 1
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'commandid':
command_id = sys.argv[i+1]
i += 2
elif myarg == 'timestocheckstatus':
times_to_check_status = int(sys.argv[i+1])
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam getcommand cros')
for device_id in devices:
_display_cros_command_result(cd, device_id, command_id, times_to_check_status)
def doUpdateCros():
cd = gapi_directory.build()
i, devices = getCrOSDeviceEntity(3, cd)
update_body = {}
action_body = {}
orgUnitPath = None
ack_wipe = False
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'user':
update_body['annotatedUser'] = sys.argv[i + 1]
i += 2
elif myarg == 'location':
update_body['annotatedLocation'] = sys.argv[i + 1]
i += 2
elif myarg == 'notes':
update_body['notes'] = sys.argv[i + 1].replace('\\n', '\n')
i += 2
elif myarg in ['tag', 'asset', 'assetid']:
update_body['annotatedAssetId'] = sys.argv[i + 1]
i += 2
elif myarg in ['ou', 'org']:
orgUnitPath = gapi_directory_orgunits.getOrgUnitItem(sys.argv[i + 1])
i += 2
elif myarg == 'action':
action = sys.argv[i + 1].lower().replace('_', '').replace('-', '')
deprovisionReason = None
if action in [
'deprovisionsamemodelreplace',
'deprovisionsamemodelreplacement'
]:
action = 'deprovision'
deprovisionReason = 'same_model_replacement'
elif action in [
'deprovisiondifferentmodelreplace',
'deprovisiondifferentmodelreplacement'
]:
action = 'deprovision'
deprovisionReason = 'different_model_replacement'
elif action in ['deprovisionretiringdevice']:
action = 'deprovision'
deprovisionReason = 'retiring_device'
elif action == 'deprovisionupgradetransfer':
action = 'deprovision'
deprovisionReason = 'upgrade_transfer'
elif action not in ['disable', 'reenable']:
controlflow.system_error_exit(2, f'expected action of ' \
f'deprovision_same_model_replace, ' \
f'deprovision_different_model_replace, ' \
f'deprovision_retiring_device, ' \
f'deprovision_upgrade_transfer, disable or reenable,'
f' got {action}')
action_body = {'action': action}
if deprovisionReason:
action_body['deprovisionReason'] = deprovisionReason
i += 2
elif myarg == 'acknowledgedevicetouchrequirement':
ack_wipe = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam update cros')
i = 0
count = len(devices)
if action_body:
if action_body['action'] == 'deprovision' and not ack_wipe:
print(f'WARNING: Refusing to deprovision {count} devices because '
'acknowledge_device_touch_requirement not specified. ' \
'Deprovisioning a device means the device will have to ' \
'be physically wiped and re-enrolled to be managed by ' \
'your domain again. This requires physical access to ' \
'the device and is very time consuming to perform for ' \
'each device. Please add ' \
'"acknowledge_device_touch_requirement" to the GAM ' \
'command if you understand this and wish to proceed ' \
'with the deprovision. Please also be aware that ' \
'deprovisioning can have an effect on your device ' \
'license count. See ' \
'https://support.google.com/chrome/a/answer/3523633 '\
'for full details.')
sys.exit(3)
for deviceId in devices:
i += 1
cur_count = gam.currentCount(i, count)
print(f' performing action {action} for {deviceId}{cur_count}')
gapi.call(cd.chromeosdevices(),
function='action',
customerId=GC_Values[GC_CUSTOMER_ID],
resourceId=deviceId,
body=action_body)
else:
if update_body:
for deviceId in devices:
i += 1
current_count = gam.currentCount(i, count)
print(f' updating {deviceId}{current_count}')
gapi.call(cd.chromeosdevices(),
'update',
customerId=GC_Values[GC_CUSTOMER_ID],
deviceId=deviceId,
body=update_body)
if orgUnitPath:
# split moves into max 50 devices per batch
for l in range(0, len(devices), 50):
move_body = {'deviceIds': devices[l:l + 50]}
print(f' moving {len(move_body['deviceIds'])} devices to ' \
f'{orgUnitPath}')
gapi.call(cd.chromeosdevices(),
'moveDevicesToOu',
customerId=GC_Values[GC_CUSTOMER_ID],
orgUnitPath=orgUnitPath,
body=move_body)
def doGetCrosInfo():
cd = gapi_directory.build()
i, devices = getCrOSDeviceEntity(3, cd)
downloadfile = None
targetFolder = GC_Values[GC_DRIVE_DIR]
projection = None
fieldsList = []
noLists = False
startDate = endDate = None
listLimit = 0
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'nolists':
noLists = True
i += 1
elif myarg == 'listlimit':
listLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=-1)
i += 2
elif myarg in CROS_START_ARGUMENTS:
startDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg in CROS_END_ARGUMENTS:
endDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg == 'allfields':
projection = 'FULL'
fieldsList = []
i += 1
elif myarg in PROJECTION_CHOICES_MAP:
projection = PROJECTION_CHOICES_MAP[myarg]
if projection == 'FULL':
fieldsList = []
else:
fieldsList = CROS_BASIC_FIELDS_LIST[:]
i += 1
elif myarg in CROS_ARGUMENT_TO_PROPERTY_MAP:
fieldsList.extend(CROS_ARGUMENT_TO_PROPERTY_MAP[myarg])
i += 1
elif myarg == 'fields':
fieldNameList = sys.argv[i + 1]
for field in fieldNameList.lower().replace(',', ' ').split():
if field in CROS_ARGUMENT_TO_PROPERTY_MAP:
fieldsList.extend(CROS_ARGUMENT_TO_PROPERTY_MAP[field])
if field in CROS_ACTIVE_TIME_RANGES_ARGUMENTS + \
CROS_DEVICE_FILES_ARGUMENTS + \
CROS_RECENT_USERS_ARGUMENTS:
projection = 'FULL'
noLists = False
else:
controlflow.invalid_argument_exit(field,
'gam info cros fields')
i += 2
elif myarg == 'downloadfile':
downloadfile = sys.argv[i + 1]
if downloadfile.lower() == 'latest':
downloadfile = downloadfile.lower()
i += 2
elif myarg == 'targetfolder':
targetFolder = os.path.expanduser(sys.argv[i + 1])
if not os.path.isdir(targetFolder):
os.makedirs(targetFolder)
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam info cros')
if fieldsList:
fieldsList.append('deviceId')
fields = ','.join(set(fieldsList)).replace('.', '/')
else:
fields = None
i = 0
device_count = len(devices)
for deviceId in devices:
i += 1
cros = gapi.call(cd.chromeosdevices(),
'get',
customerId=GC_Values[GC_CUSTOMER_ID],
deviceId=deviceId,
projection=projection,
fields=fields)
print(f'CrOS Device: {deviceId} ({i} of {device_count})')
if 'notes' in cros:
cros['notes'] = cros['notes'].replace('\n', '\\n')
if 'autoUpdateExpiration' in cros:
cros['autoUpdateExpiration'] = utils.formatTimestampYMD(
cros['autoUpdateExpiration'])
if 'orgUnitId' in cros:
cros['orgUnitId'] = f"id:{cros["orgUnitId"]}"
_checkTPMVulnerability(cros)
for up in CROS_SCALAR_PROPERTY_PRINT_ORDER:
if up in cros:
if isinstance(cros[up], str):
print(f' {up}: {cros[up]}')
else:
sys.stdout.write(f' {up}:')
display.print_json(cros[up], ' ')
if not noLists:
activeTimeRanges = _filterTimeRanges(
cros.get('activeTimeRanges', []), startDate, endDate)
lenATR = len(activeTimeRanges)
if lenATR:
print(' activeTimeRanges')
num_ranges = min(lenATR, listLimit or lenATR)
for activeTimeRange in activeTimeRanges[:num_ranges]:
active_date = activeTimeRange['date']
active_time = activeTimeRange['activeTime']
duration = utils.formatMilliSeconds(active_time)
minutes = active_time // 60000
print(f' date: {active_date}')
print(f' activeTime: {active_time}')
print(f' duration: {duration}')
print(f' minutes: {minutes}')
recentUsers = cros.get('recentUsers', [])
lenRU = len(recentUsers)
if lenRU:
print(' recentUsers')
num_ranges = min(lenRU, listLimit or lenRU)
for recentUser in recentUsers[:num_ranges]:
useremail = recentUser.get('email')
if not useremail:
if recentUser['type'] == 'USER_TYPE_UNMANAGED':
useremail = 'UnmanagedUser'
else:
useremail = 'Unknown'
print(f' type: {recentUser['type']}')
print(f' email: {useremail}')
deviceFiles = _filterCreateReportTime(cros.get('deviceFiles',
[]), 'createTime',
startDate, endDate)
lenDF = len(deviceFiles)
if lenDF:
num_ranges = min(lenDF, listLimit or lenDF)
print(' deviceFiles')
for deviceFile in deviceFiles[:num_ranges]:
device_type = deviceFile['type']
create_time = deviceFile['createTime']
print(f' {device_type}: {create_time}')
if downloadfile:
deviceFiles = cros.get('deviceFiles', [])
lenDF = len(deviceFiles)
if lenDF:
if downloadfile == 'latest':
deviceFile = deviceFiles[-1]
else:
for deviceFile in deviceFiles:
if deviceFile['createTime'] == downloadfile:
break
else:
print(f'ERROR: file {downloadfile} not ' \
f'available to download.')
deviceFile = None
if deviceFile:
created = deviceFile['createTime']
downloadfile = f'cros-logs-{deviceId}-{created}.zip'
downloadfilename = os.path.join(targetFolder,
downloadfile)
dl_url = deviceFile['downloadUrl']
_, content = cd._http.request(dl_url)
fileutils.write_file(downloadfilename,
content,
mode='wb',
continue_on_error=True)
print(f'Downloaded: {downloadfilename}')
elif downloadfile:
print('ERROR: no files to download.')
cpuStatusReports = _filterCreateReportTime(
cros.get('cpuStatusReports', []), 'reportTime', startDate,
endDate)
lenCSR = len(cpuStatusReports)
if lenCSR:
print(' cpuStatusReports')
num_ranges = min(lenCSR, listLimit or lenCSR)
for cpuStatusReport in cpuStatusReports[:num_ranges]:
print(f' reportTime: {cpuStatusReport['reportTime']}')
print(' cpuTemperatureInfo')
tempInfos = cpuStatusReport.get('cpuTemperatureInfo', [])
for tempInfo in tempInfos:
temp_label = tempInfo['label'].strip()
temperature = tempInfo['temperature']
print(f' {temp_label}: {temperature}')
if 'cpuUtilizationPercentageInfo' in cpuStatusReport:
pct_info = cpuStatusReport['cpuUtilizationPercentageInfo']
util = ','.join([str(x) for x in pct_info])
print(f' cpuUtilizationPercentageInfo: {util}')
diskVolumeReports = cros.get('diskVolumeReports', [])
lenDVR = len(diskVolumeReports)
if lenDVR:
print(' diskVolumeReports')
print(' volumeInfo')
num_ranges = min(lenDVR, listLimit or lenDVR)
for diskVolumeReport in diskVolumeReports[:num_ranges]:
volumeInfo = diskVolumeReport['volumeInfo']
for volume in volumeInfo:
vid = volume['volumeId']
vstorage_free = volume['storageFree']
vstorage_total = volume['storageTotal']
print(f' volumeId: {vid}')
print(f' storageFree: {vstorage_free}')
print(f' storageTotal: {vstorage_total}')
systemRamFreeReports = _filterCreateReportTime(
cros.get('systemRamFreeReports', []), 'reportTime', startDate,
endDate)
lenSRFR = len(systemRamFreeReports)
if lenSRFR:
print(' systemRamFreeReports')
num_ranges = min(lenSRFR, listLimit or lenSRFR)
for systemRamFreeReport in systemRamFreeReports[:num_ranges]:
report_time = systemRamFreeReport['reportTime']
free_info = systemRamFreeReport['systemRamFreeInfo']
free_ram = ','.join(free_info)
print(f' reportTime: {report_time}')
print(f' systemRamFreeInfo: {free_ram}')
def doPrintCrosActivity():
cd = gapi_directory.build()
todrive = False
titles = [
'deviceId', 'annotatedAssetId', 'annotatedLocation', 'serialNumber',
'orgUnitPath'
]
csvRows = []
fieldsList = [
'deviceId', 'annotatedAssetId', 'annotatedLocation', 'serialNumber',
'orgUnitPath'
]
startDate = endDate = None
selectActiveTimeRanges = selectDeviceFiles = selectRecentUsers = False
listLimit = 0
delimiter = ','
orgUnitPath = None
queries = [None]
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg in ['query', 'queries']:
queries = gam.getQueries(myarg, sys.argv[i + 1])
i += 2
elif myarg == 'limittoou':
orgUnitPath = gapi_directory_orgunits.getOrgUnitItem(sys.argv[i + 1])
i += 2
elif myarg == 'todrive':
todrive = True
i += 1
elif myarg in CROS_ACTIVE_TIME_RANGES_ARGUMENTS:
selectActiveTimeRanges = True
i += 1
elif myarg in CROS_DEVICE_FILES_ARGUMENTS:
selectDeviceFiles = True
i += 1
elif myarg in CROS_RECENT_USERS_ARGUMENTS:
selectRecentUsers = True
i += 1
elif myarg == 'both':
selectActiveTimeRanges = selectRecentUsers = True
i += 1
elif myarg == 'all':
selectActiveTimeRanges = selectDeviceFiles = True
selectRecentUsers = True
i += 1
elif myarg in CROS_START_ARGUMENTS:
startDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg in CROS_END_ARGUMENTS:
endDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg == 'listlimit':
listLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=0)
i += 2
elif myarg == 'delimiter':
delimiter = sys.argv[i + 1]
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i],
'gam print crosactivity')
if not selectActiveTimeRanges and \
not selectDeviceFiles and \
not selectRecentUsers:
selectActiveTimeRanges = selectRecentUsers = True
if selectRecentUsers:
fieldsList.append('recentUsers')
display.add_titles_to_csv_file([
'recentUsers.email',
], titles)
if selectActiveTimeRanges:
fieldsList.append('activeTimeRanges')
titles_to_add = [
'activeTimeRanges.date', 'activeTimeRanges.duration',
'activeTimeRanges.minutes'
]
display.add_titles_to_csv_file(titles_to_add, titles)
if selectDeviceFiles:
fieldsList.append('deviceFiles')
titles_to_add = ['deviceFiles.type', 'deviceFiles.createTime']
display.add_titles_to_csv_file(titles_to_add, titles)
fields = f'nextPageToken,chromeosdevices({','.join(fieldsList)})'
for query in queries:
gam.printGettingAllItems('CrOS Devices', query)
page_message = gapi.got_total_items_msg('CrOS Devices', '...\n')
all_cros = gapi.get_all_pages(cd.chromeosdevices(),
'list',
'chromeosdevices',
page_message=page_message,
query=query,
customerId=GC_Values[GC_CUSTOMER_ID],
projection='FULL',
fields=fields,
orgUnitPath=orgUnitPath)
for cros in all_cros:
row = {}
skip_attribs = ['recentUsers', 'activeTimeRanges', 'deviceFiles']
for attrib in cros:
if attrib not in skip_attribs:
row[attrib] = cros[attrib]
if selectActiveTimeRanges:
activeTimeRanges = _filterTimeRanges(
cros.get('activeTimeRanges', []), startDate, endDate)
lenATR = len(activeTimeRanges)
num_ranges = min(lenATR, listLimit or lenATR)
for activeTimeRange in activeTimeRanges[:num_ranges]:
newrow = row.copy()
newrow['activeTimeRanges.date'] = activeTimeRange['date']
active_time = activeTimeRange['activeTime']
newrow['activeTimeRanges.duration'] = \
utils.formatMilliSeconds(active_time)
newrow['activeTimeRanges.minutes'] = \
activeTimeRange['activeTime']//60000
csvRows.append(newrow)
if selectRecentUsers:
recentUsers = cros.get('recentUsers', [])
lenRU = len(recentUsers)
num_ranges = min(lenRU, listLimit or lenRU)
recent_users = []
for recentUser in recentUsers[:num_ranges]:
useremail = recentUser.get('email')
if not useremail:
if recentUser['type'] == 'USER_TYPE_UNMANAGED':
useremail = 'UnmanagedUser'
else:
useremail = 'Unknown'
recent_users.append(useremail)
row['recentUsers.email'] = delimiter.join(recent_users)
csvRows.append(row)
if selectDeviceFiles:
deviceFiles = _filterCreateReportTime(
cros.get('deviceFiles', []), 'createTime', startDate,
endDate)
lenDF = len(deviceFiles)
num_ranges = min(lenDF, listLimit or lenDF)
for deviceFile in deviceFiles[:num_ranges]:
newrow = row.copy()
newrow['deviceFiles.type'] = deviceFile['type']
create_time = deviceFile['createTime']
newrow['deviceFiles.createTime'] = create_time
csvRows.append(newrow)
display.write_csv_file(csvRows, titles, 'CrOS Activity', todrive)
def _checkTPMVulnerability(cros):
if 'tpmVersionInfo' in cros and \
'firmwareVersion' in cros['tpmVersionInfo']:
firmware_version = cros['tpmVersionInfo']['firmwareVersion']
if firmware_version in CROS_TPM_VULN_VERSIONS:
cros['tpmVersionInfo']['tpmVulnerability'] = 'VULNERABLE'
elif firmware_version in CROS_TPM_FIXED_VERSIONS:
cros['tpmVersionInfo']['tpmVulnerability'] = 'UPDATED'
else:
cros['tpmVersionInfo']['tpmVulnerability'] = 'NOT IMPACTED'
def doPrintCrosDevices():
def _getSelectedLists(myarg):
if myarg in CROS_ACTIVE_TIME_RANGES_ARGUMENTS:
selectedLists['activeTimeRanges'] = True
elif myarg in CROS_RECENT_USERS_ARGUMENTS:
selectedLists['recentUsers'] = True
elif myarg in CROS_DEVICE_FILES_ARGUMENTS:
selectedLists['deviceFiles'] = True
elif myarg in CROS_CPU_STATUS_REPORTS_ARGUMENTS:
selectedLists['cpuStatusReports'] = True
elif myarg in CROS_DISK_VOLUME_REPORTS_ARGUMENTS:
selectedLists['diskVolumeReports'] = True
elif myarg in CROS_SYSTEM_RAM_FREE_REPORTS_ARGUMENTS:
selectedLists['systemRamFreeReports'] = True
cd = gapi_directory.build()
todrive = False
fieldsList = []
fieldsTitles = {}
titles = []
csvRows = []
display.add_field_to_csv_file('deviceid', CROS_ARGUMENT_TO_PROPERTY_MAP,
fieldsList, fieldsTitles, titles)
projection = orderBy = sortOrder = orgUnitPath = None
queries = [None]
noLists = sortHeaders = False
selectedLists = {}
startDate = endDate = None
listLimit = 0
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg in ['query', 'queries']:
queries = gam.getQueries(myarg, sys.argv[i + 1])
i += 2
elif myarg == 'limittoou':
orgUnitPath = gapi_directory_orgunits.getOrgUnitItem(sys.argv[i + 1])
i += 2
elif myarg == 'todrive':
todrive = True
i += 1
elif myarg == 'nolists':
noLists = True
selectedLists = {}
i += 1
elif myarg == 'listlimit':
listLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=0)
i += 2
elif myarg in CROS_START_ARGUMENTS:
startDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg in CROS_END_ARGUMENTS:
endDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg == 'orderby':
orderBy = sys.argv[i + 1].lower().replace('_', '')
validOrderBy = [
'location', 'user', 'lastsync', 'notes', 'serialnumber',
'status', 'supportenddate'
]
if orderBy not in validOrderBy:
controlflow.expected_argument_exit('orderby',
', '.join(validOrderBy),
orderBy)
if orderBy == 'location':
orderBy = 'annotatedLocation'
elif orderBy == 'user':
orderBy = 'annotatedUser'
elif orderBy == 'lastsync':
orderBy = 'lastSync'
elif orderBy == 'serialnumber':
orderBy = 'serialNumber'
elif orderBy == 'supportenddate':
orderBy = 'supportEndDate'
i += 2
elif myarg in SORTORDER_CHOICES_MAP:
sortOrder = SORTORDER_CHOICES_MAP[myarg]
i += 1
elif myarg in PROJECTION_CHOICES_MAP:
projection = PROJECTION_CHOICES_MAP[myarg]
sortHeaders = True
if projection == 'FULL':
fieldsList = []
else:
fieldsList = CROS_BASIC_FIELDS_LIST[:]
i += 1
elif myarg == 'allfields':
projection = 'FULL'
sortHeaders = True
fieldsList = []
i += 1
elif myarg == 'sortheaders':
sortHeaders = True
i += 1
elif myarg in CROS_LISTS_ARGUMENTS:
_getSelectedLists(myarg)
i += 1
elif myarg in CROS_ARGUMENT_TO_PROPERTY_MAP:
display.add_field_to_fields_list(myarg,
CROS_ARGUMENT_TO_PROPERTY_MAP,
fieldsList)
i += 1
elif myarg == 'fields':
fieldNameList = sys.argv[i + 1]
for field in fieldNameList.lower().replace(',', ' ').split():
if field in CROS_LISTS_ARGUMENTS:
_getSelectedLists(field)
elif field in CROS_ARGUMENT_TO_PROPERTY_MAP:
display.add_field_to_fields_list(
field, CROS_ARGUMENT_TO_PROPERTY_MAP, fieldsList)
else:
controlflow.invalid_argument_exit(field,
'gam print cros fields')
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam print cros')
if selectedLists:
noLists = False
projection = 'FULL'
for selectList in selectedLists:
display.add_field_to_fields_list(selectList,
CROS_ARGUMENT_TO_PROPERTY_MAP,
fieldsList)
if fieldsList:
fieldsList.append('deviceId')
fields = f'nextPageToken,chromeosdevices({','.join(set(fieldsList))})'.replace(
'.', '/')
else:
fields = None
for query in queries:
gam.printGettingAllItems('CrOS Devices', query)
page_message = gapi.got_total_items_msg('CrOS Devices', '...\n')
all_cros = gapi.get_all_pages(cd.chromeosdevices(),
'list',
'chromeosdevices',
page_message=page_message,
query=query,
customerId=GC_Values[GC_CUSTOMER_ID],
projection=projection,
orgUnitPath=orgUnitPath,
orderBy=orderBy,
sortOrder=sortOrder,
fields=fields)
for cros in all_cros:
_checkTPMVulnerability(cros)
if not noLists and not selectedLists:
for cros in all_cros:
if 'notes' in cros:
cros['notes'] = cros['notes'].replace('\n', '\\n')
if 'autoUpdateExpiration' in cros:
cros['autoUpdateExpiration'] = utils.formatTimestampYMD(
cros['autoUpdateExpiration'])
if 'orgUnitId' in cros:
cros['orgUnitId'] = f"id:{cros["orgUnitId"]}"
for cpuStatusReport in cros.get('cpuStatusReports', []):
tempInfos = cpuStatusReport.get('cpuTemperatureInfo', [])
for tempInfo in tempInfos:
tempInfo['label'] = tempInfo['label'].strip()
display.add_row_titles_to_csv_file(
utils.flatten_json(cros, listLimit=listLimit), csvRows,
titles)
continue
for cros in all_cros:
if 'notes' in cros:
cros['notes'] = cros['notes'].replace('\n', '\\n')
if 'autoUpdateExpiration' in cros:
cros['autoUpdateExpiration'] = utils.formatTimestampYMD(
cros['autoUpdateExpiration'])
if 'orgUnitId' in cros:
cros['orgUnitId'] = f"id:{cros["orgUnitId"]}"
row = {}
for attrib in cros:
if attrib not in {
'kind', 'etag', 'tpmVersionInfo', 'recentUsers',
'activeTimeRanges', 'deviceFiles', 'cpuStatusReports',
'diskVolumeReports', 'systemRamFreeReports'
}:
row[attrib] = cros[attrib]
if selectedLists.get('activeTimeRanges'):
timergs = cros.get('activeTimeRanges', [])
else:
timergs = []
activeTimeRanges = _filterTimeRanges(timergs, startDate, endDate)
if selectedLists.get('recentUsers'):
recentUsers = cros.get('recentUsers', [])
else:
recentUsers = []
if selectedLists.get('deviceFiles'):
device_files = cros.get('deviceFiles', [])
else:
device_files = []
deviceFiles = _filterCreateReportTime(device_files, 'createTime',
startDate, endDate)
if selectedLists.get('cpuStatusReports'):
cpu_reports = cros.get('cpuStatusReports', [])
else:
cpu_reports = []
cpuStatusReports = _filterCreateReportTime(cpu_reports,
'reportTime', startDate,
endDate)
if selectedLists.get('diskVolumeReports'):
diskVolumeReports = cros.get('diskVolumeReports', [])
else:
diskVolumeReports = []
if selectedLists.get('systemRamFreeReports'):
ram_reports = cros.get('systemRamFreeReports', [])
else:
ram_reports = []
systemRamFreeReports = _filterCreateReportTime(
ram_reports, 'reportTime', startDate, endDate)
if noLists or (not activeTimeRanges and \
not recentUsers and \
not deviceFiles and \
not cpuStatusReports and \
not diskVolumeReports and \
not systemRamFreeReports):
display.add_row_titles_to_csv_file(row, csvRows, titles)
continue
lenATR = len(activeTimeRanges)
lenRU = len(recentUsers)
lenDF = len(deviceFiles)
lenCSR = len(cpuStatusReports)
lenDVR = len(diskVolumeReports)
lenSRFR = len(systemRamFreeReports)
max_len = max(lenATR, lenRU, lenDF, lenCSR, lenDVR, lenSRFR)
for i in range(min(max_len, listLimit or max_len)):
nrow = row.copy()
if i < lenATR:
nrow['activeTimeRanges.date'] = \
activeTimeRanges[i]['date']
nrow['activeTimeRanges.activeTime'] = \
str(activeTimeRanges[i]['activeTime'])
active_time = activeTimeRanges[i]['activeTime']
nrow['activeTimeRanges.duration'] = \
utils.formatMilliSeconds(active_time)
nrow['activeTimeRanges.minutes'] = active_time // 60000
if i < lenRU:
nrow['recentUsers.type'] = recentUsers[i]['type']
nrow['recentUsers.email'] = recentUsers[i].get('email')
if not nrow['recentUsers.email']:
if nrow['recentUsers.type'] == 'USER_TYPE_UNMANAGED':
nrow['recentUsers.email'] = 'UnmanagedUser'
else:
nrow['recentUsers.email'] = 'Unknown'
if i < lenDF:
nrow['deviceFiles.type'] = deviceFiles[i]['type']
nrow['deviceFiles.createTime'] = \
deviceFiles[i]['createTime']
if i < lenCSR:
nrow['cpuStatusReports.reportTime'] = \
cpuStatusReports[i]['reportTime']
tempInfos = cpuStatusReports[i].get('cpuTemperatureInfo', [])
for tempInfo in tempInfos:
label = tempInfo['label'].strip()
base = 'cpuStatusReports.cpuTemperatureInfo.'
nrow[f'{base}{label}'] = tempInfo['temperature']
cpu_field = 'cpuUtilizationPercentageInfo'
if cpu_field in cpuStatusReports[i]:
cpu_reports = cpuStatusReports[i][cpu_field]
cpu_pcts = [str(x) for x in cpu_reports]
nrow[f'cpuStatusReports.{cpu_field}'] = ','.join(cpu_pcts)
if i < lenDVR:
volumeInfo = diskVolumeReports[i]['volumeInfo']
j = 0
vfield = 'diskVolumeReports.volumeInfo.'
for volume in volumeInfo:
nrow[f'{vfield}{j}.volumeId'] = \
volume['volumeId']
nrow[f'{vfield}{j}.storageFree'] = \
volume['storageFree']
nrow[f'{vfield}{j}.storageTotal'] = \
volume['storageTotal']
j += 1
if i < lenSRFR:
nrow['systemRamFreeReports.reportTime'] = \
systemRamFreeReports[i]['reportTime']
ram_reports = systemRamFreeReports[i]['systemRamFreeInfo']
ram_info = [str(x) for x in ram_reports]
nrow['systenRamFreeReports.systemRamFreeInfo'] = \
','.join(ram_info)
display.add_row_titles_to_csv_file(nrow, csvRows, titles)
if sortHeaders:
display.sort_csv_titles([
'deviceId',
], titles)
display.write_csv_file(csvRows, titles, 'CrOS', todrive)
def getCrOSDeviceEntity(i, cd):
myarg = sys.argv[i].lower()
if myarg == 'cros_sn':
return i + 2, gam.getUsersToModify('cros_sn', sys.argv[i + 1])
if myarg == 'query':
return i + 2, gam.getUsersToModify('crosquery', sys.argv[i + 1])
if myarg[:6] == 'query:':
query = sys.argv[i][6:]
if query[:12].lower() == 'orgunitpath:':
kwargs = {'orgUnitPath': query[12:]}
else:
kwargs = {'query': query}
fields = 'nextPageToken,chromeosdevices(deviceId)'
devices = gapi.get_all_pages(cd.chromeosdevices(),
'list',
'chromeosdevices',
customerId=GC_Values[GC_CUSTOMER_ID],
fields=fields,
**kwargs)
return i + 1, [device['deviceId'] for device in devices]
return i + 1, sys.argv[i].replace(',', ' ').split()
def _getFilterDate(dateStr):
return datetime.datetime.strptime(dateStr, YYYYMMDD_FORMAT)
def _filterTimeRanges(activeTimeRanges, startDate, endDate):
if startDate is None and endDate is None:
return activeTimeRanges
filteredTimeRanges = []
for timeRange in activeTimeRanges:
activityDate = datetime.datetime.strptime(timeRange['date'],
YYYYMMDD_FORMAT)
if ((startDate is None) or \
(activityDate >= startDate)) and \
((endDate is None) or \
(activityDate <= endDate)):
filteredTimeRanges.append(timeRange)
return filteredTimeRanges
def _filterCreateReportTime(items, timeField, startTime, endTime):
if startTime is None and endTime is None:
return items
filteredItems = []
time_format = '%Y-%m-%dT%H:%M:%S.%fZ'
for item in items:
timeValue = datetime.datetime.strptime(item[timeField], time_format)
if ((startTime is None) or \
(timeValue >= startTime)) and \
((endTime is None) or \
(timeValue <= endTime)):
filteredItems.append(item)
return filteredItems
| import datetime
import json
import os
import sys
import time
import googleapiclient
from gam.var import *
import gam
from gam import controlflow
from gam import display
from gam import fileutils
from gam import gapi
from gam.gapi import directory as gapi_directory
from gam.gapi import errors as gapi_errors
from gam.gapi.directory import orgunits as gapi_directory_orgunits
from gam import utils
def _display_cros_command_result(cd, device_id, command_id, times_to_check_status):
print(f'deviceId: {device_id}, commandId: {command_id}')
final_states = {'EXPIRED', 'CANCELLED', 'EXECUTED_BY_CLIENT'}
for _ in range(0, times_to_check_status):
time.sleep(2)
result = gapi.call(cd.customer().devices().chromeos().commands(), 'get',
customerId=GC_Values[GC_CUSTOMER_ID], deviceId=device_id,
commandId=command_id)
display.print_json(result)
if result.get('state') in final_states:
return
def issue_command():
cd = gapi_directory.build()
i, devices = getCrOSDeviceEntity(3, cd)
body = {}
valid_commands = gapi.get_enum_values_minus_unspecified(
cd._rootDesc['schemas']
['DirectoryChromeosdevicesIssueCommandRequest']
['properties']['commandType']['enum'])
command_map = {}
for valid_command in valid_commands:
v = valid_command.lower().replace('_', '')
command_map[v] = valid_command
times_to_check_status = 1
doit = False
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'command':
command = sys.argv[i+1].lower().replace('_', '')
if command not in command_map:
controlflow.system_error_exit(2, f'expected command of ' \
f'{", ".join(valid_commands)} got {command}')
body['commandType'] = command_map[command]
i += 2
if command == 'setvolume':
body['payload'] = json.dumps({'volume': sys.argv[i]})
i += 1
elif myarg == 'timestocheckstatus':
times_to_check_status = int(sys.argv[i+1])
i += 2
elif myarg == 'doit':
doit = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam issuecommand cros')
if 'commandType' not in body:
controlflow.missing_argument_exit('command <CrOSCommand>', 'gam issuecommand cros')
if body['commandType'] == 'WIPE_USERS' and not doit:
controlflow.system_error_exit(2, 'wipe_users command requires admin ' \
'acknowledge user data will be destroyed with the ' \
'doit argument')
if body['commandType'] == 'REMOTE_POWERWASH' and not doit:
controlflow.system_error_exit(2, 'remote_powerwash command requires ' \
'admin acknowledge user data will be destroyed, device will need' \
' to be reconnected to WiFi and re-enrolled with the doit argument')
for device_id in devices:
try:
result = gapi.call(cd.customer().devices().chromeos(), 'issueCommand',
customerId=GC_Values[GC_CUSTOMER_ID], deviceId=device_id,
throw_reasons=[gapi_errors.ErrorReason.FOUR_O_O],
body=body)
except googleapiclient.errors.HttpError:
controlflow.system_error_exit(4, '400 response from Google. This ' \
'usually indicates the devices was not in a state where it will' \
' accept the command. For example, reboot, set_volume and take_a_screenshot' \
' require the device to be in auto-start kiosk app mode.')
command_id = result.get('commandId')
_display_cros_command_result(cd, device_id, command_id, times_to_check_status)
def get_command():
cd = gapi_directory.build()
i, devices = getCrOSDeviceEntity(3, cd)
command_id = None
times_to_check_status = 1
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'commandid':
command_id = sys.argv[i+1]
i += 2
elif myarg == 'timestocheckstatus':
times_to_check_status = int(sys.argv[i+1])
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam getcommand cros')
for device_id in devices:
_display_cros_command_result(cd, device_id, command_id, times_to_check_status)
def doUpdateCros():
cd = gapi_directory.build()
i, devices = getCrOSDeviceEntity(3, cd)
update_body = {}
action_body = {}
orgUnitPath = None
ack_wipe = False
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'user':
update_body['annotatedUser'] = sys.argv[i + 1]
i += 2
elif myarg == 'location':
update_body['annotatedLocation'] = sys.argv[i + 1]
i += 2
elif myarg == 'notes':
update_body['notes'] = sys.argv[i + 1].replace('\\n', '\n')
i += 2
elif myarg in ['tag', 'asset', 'assetid']:
update_body['annotatedAssetId'] = sys.argv[i + 1]
i += 2
elif myarg in ['ou', 'org']:
orgUnitPath = gapi_directory_orgunits.getOrgUnitItem(sys.argv[i + 1])
i += 2
elif myarg == 'action':
action = sys.argv[i + 1].lower().replace('_', '').replace('-', '')
deprovisionReason = None
if action in [
'deprovisionsamemodelreplace',
'deprovisionsamemodelreplacement'
]:
action = 'deprovision'
deprovisionReason = 'same_model_replacement'
elif action in [
'deprovisiondifferentmodelreplace',
'deprovisiondifferentmodelreplacement'
]:
action = 'deprovision'
deprovisionReason = 'different_model_replacement'
elif action in ['deprovisionretiringdevice']:
action = 'deprovision'
deprovisionReason = 'retiring_device'
elif action == 'deprovisionupgradetransfer':
action = 'deprovision'
deprovisionReason = 'upgrade_transfer'
elif action not in ['disable', 'reenable']:
controlflow.system_error_exit(2, f'expected action of ' \
f'deprovision_same_model_replace, ' \
f'deprovision_different_model_replace, ' \
f'deprovision_retiring_device, ' \
f'deprovision_upgrade_transfer, disable or reenable,'
f' got {action}')
action_body = {'action': action}
if deprovisionReason:
action_body['deprovisionReason'] = deprovisionReason
i += 2
elif myarg == 'acknowledgedevicetouchrequirement':
ack_wipe = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam update cros')
i = 0
count = len(devices)
if action_body:
if action_body['action'] == 'deprovision' and not ack_wipe:
print(f'WARNING: Refusing to deprovision {count} devices because '
'acknowledge_device_touch_requirement not specified. ' \
'Deprovisioning a device means the device will have to ' \
'be physically wiped and re-enrolled to be managed by ' \
'your domain again. This requires physical access to ' \
'the device and is very time consuming to perform for ' \
'each device. Please add ' \
'"acknowledge_device_touch_requirement" to the GAM ' \
'command if you understand this and wish to proceed ' \
'with the deprovision. Please also be aware that ' \
'deprovisioning can have an effect on your device ' \
'license count. See ' \
'https://support.google.com/chrome/a/answer/3523633 '\
'for full details.')
sys.exit(3)
for deviceId in devices:
i += 1
cur_count = gam.currentCount(i, count)
print(f' performing action {action} for {deviceId}{cur_count}')
gapi.call(cd.chromeosdevices(),
function='action',
customerId=GC_Values[GC_CUSTOMER_ID],
resourceId=deviceId,
body=action_body)
else:
if update_body:
for deviceId in devices:
i += 1
current_count = gam.currentCount(i, count)
print(f' updating {deviceId}{current_count}')
gapi.call(cd.chromeosdevices(),
'update',
customerId=GC_Values[GC_CUSTOMER_ID],
deviceId=deviceId,
body=update_body)
if orgUnitPath:
# split moves into max 50 devices per batch
for l in range(0, len(devices), 50):
move_body = {'deviceIds': devices[l:l + 50]}
print(f' moving {len(move_body["deviceIds"])} devices to ' \
f'{orgUnitPath}')
gapi.call(cd.chromeosdevices(),
'moveDevicesToOu',
customerId=GC_Values[GC_CUSTOMER_ID],
orgUnitPath=orgUnitPath,
body=move_body)
def doGetCrosInfo():
cd = gapi_directory.build()
i, devices = getCrOSDeviceEntity(3, cd)
downloadfile = None
targetFolder = GC_Values[GC_DRIVE_DIR]
projection = None
fieldsList = []
noLists = False
startDate = endDate = None
listLimit = 0
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'nolists':
noLists = True
i += 1
elif myarg == 'listlimit':
listLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=-1)
i += 2
elif myarg in CROS_START_ARGUMENTS:
startDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg in CROS_END_ARGUMENTS:
endDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg == 'allfields':
projection = 'FULL'
fieldsList = []
i += 1
elif myarg in PROJECTION_CHOICES_MAP:
projection = PROJECTION_CHOICES_MAP[myarg]
if projection == 'FULL':
fieldsList = []
else:
fieldsList = CROS_BASIC_FIELDS_LIST[:]
i += 1
elif myarg in CROS_ARGUMENT_TO_PROPERTY_MAP:
fieldsList.extend(CROS_ARGUMENT_TO_PROPERTY_MAP[myarg])
i += 1
elif myarg == 'fields':
fieldNameList = sys.argv[i + 1]
for field in fieldNameList.lower().replace(',', ' ').split():
if field in CROS_ARGUMENT_TO_PROPERTY_MAP:
fieldsList.extend(CROS_ARGUMENT_TO_PROPERTY_MAP[field])
if field in CROS_ACTIVE_TIME_RANGES_ARGUMENTS + \
CROS_DEVICE_FILES_ARGUMENTS + \
CROS_RECENT_USERS_ARGUMENTS:
projection = 'FULL'
noLists = False
else:
controlflow.invalid_argument_exit(field,
'gam info cros fields')
i += 2
elif myarg == 'downloadfile':
downloadfile = sys.argv[i + 1]
if downloadfile.lower() == 'latest':
downloadfile = downloadfile.lower()
i += 2
elif myarg == 'targetfolder':
targetFolder = os.path.expanduser(sys.argv[i + 1])
if not os.path.isdir(targetFolder):
os.makedirs(targetFolder)
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam info cros')
if fieldsList:
fieldsList.append('deviceId')
fields = ','.join(set(fieldsList)).replace('.', '/')
else:
fields = None
i = 0
device_count = len(devices)
for deviceId in devices:
i += 1
cros = gapi.call(cd.chromeosdevices(),
'get',
customerId=GC_Values[GC_CUSTOMER_ID],
deviceId=deviceId,
projection=projection,
fields=fields)
print(f'CrOS Device: {deviceId} ({i} of {device_count})')
if 'notes' in cros:
cros['notes'] = cros['notes'].replace('\n', '\\n')
if 'autoUpdateExpiration' in cros:
cros['autoUpdateExpiration'] = utils.formatTimestampYMD(
cros['autoUpdateExpiration'])
if 'orgUnitId' in cros:
cros['orgUnitId'] = f"id:{cros['orgUnitId']}"
_checkTPMVulnerability(cros)
for up in CROS_SCALAR_PROPERTY_PRINT_ORDER:
if up in cros:
if isinstance(cros[up], str):
print(f' {up}: {cros[up]}')
else:
sys.stdout.write(f' {up}:')
display.print_json(cros[up], ' ')
if not noLists:
activeTimeRanges = _filterTimeRanges(
cros.get('activeTimeRanges', []), startDate, endDate)
lenATR = len(activeTimeRanges)
if lenATR:
print(' activeTimeRanges')
num_ranges = min(lenATR, listLimit or lenATR)
for activeTimeRange in activeTimeRanges[:num_ranges]:
active_date = activeTimeRange['date']
active_time = activeTimeRange['activeTime']
duration = utils.formatMilliSeconds(active_time)
minutes = active_time // 60000
print(f' date: {active_date}')
print(f' activeTime: {active_time}')
print(f' duration: {duration}')
print(f' minutes: {minutes}')
recentUsers = cros.get('recentUsers', [])
lenRU = len(recentUsers)
if lenRU:
print(' recentUsers')
num_ranges = min(lenRU, listLimit or lenRU)
for recentUser in recentUsers[:num_ranges]:
useremail = recentUser.get('email')
if not useremail:
if recentUser['type'] == 'USER_TYPE_UNMANAGED':
useremail = 'UnmanagedUser'
else:
useremail = 'Unknown'
print(f' type: {recentUser["type"]}')
print(f' email: {useremail}')
deviceFiles = _filterCreateReportTime(cros.get('deviceFiles',
[]), 'createTime',
startDate, endDate)
lenDF = len(deviceFiles)
if lenDF:
num_ranges = min(lenDF, listLimit or lenDF)
print(' deviceFiles')
for deviceFile in deviceFiles[:num_ranges]:
device_type = deviceFile['type']
create_time = deviceFile['createTime']
print(f' {device_type}: {create_time}')
if downloadfile:
deviceFiles = cros.get('deviceFiles', [])
lenDF = len(deviceFiles)
if lenDF:
if downloadfile == 'latest':
deviceFile = deviceFiles[-1]
else:
for deviceFile in deviceFiles:
if deviceFile['createTime'] == downloadfile:
break
else:
print(f'ERROR: file {downloadfile} not ' \
f'available to download.')
deviceFile = None
if deviceFile:
created = deviceFile['createTime']
downloadfile = f'cros-logs-{deviceId}-{created}.zip'
downloadfilename = os.path.join(targetFolder,
downloadfile)
dl_url = deviceFile['downloadUrl']
_, content = cd._http.request(dl_url)
fileutils.write_file(downloadfilename,
content,
mode='wb',
continue_on_error=True)
print(f'Downloaded: {downloadfilename}')
elif downloadfile:
print('ERROR: no files to download.')
cpuStatusReports = _filterCreateReportTime(
cros.get('cpuStatusReports', []), 'reportTime', startDate,
endDate)
lenCSR = len(cpuStatusReports)
if lenCSR:
print(' cpuStatusReports')
num_ranges = min(lenCSR, listLimit or lenCSR)
for cpuStatusReport in cpuStatusReports[:num_ranges]:
print(f' reportTime: {cpuStatusReport["reportTime"]}')
print(' cpuTemperatureInfo')
tempInfos = cpuStatusReport.get('cpuTemperatureInfo', [])
for tempInfo in tempInfos:
temp_label = tempInfo['label'].strip()
temperature = tempInfo['temperature']
print(f' {temp_label}: {temperature}')
if 'cpuUtilizationPercentageInfo' in cpuStatusReport:
pct_info = cpuStatusReport['cpuUtilizationPercentageInfo']
util = ','.join([str(x) for x in pct_info])
print(f' cpuUtilizationPercentageInfo: {util}')
diskVolumeReports = cros.get('diskVolumeReports', [])
lenDVR = len(diskVolumeReports)
if lenDVR:
print(' diskVolumeReports')
print(' volumeInfo')
num_ranges = min(lenDVR, listLimit or lenDVR)
for diskVolumeReport in diskVolumeReports[:num_ranges]:
volumeInfo = diskVolumeReport['volumeInfo']
for volume in volumeInfo:
vid = volume['volumeId']
vstorage_free = volume['storageFree']
vstorage_total = volume['storageTotal']
print(f' volumeId: {vid}')
print(f' storageFree: {vstorage_free}')
print(f' storageTotal: {vstorage_total}')
systemRamFreeReports = _filterCreateReportTime(
cros.get('systemRamFreeReports', []), 'reportTime', startDate,
endDate)
lenSRFR = len(systemRamFreeReports)
if lenSRFR:
print(' systemRamFreeReports')
num_ranges = min(lenSRFR, listLimit or lenSRFR)
for systemRamFreeReport in systemRamFreeReports[:num_ranges]:
report_time = systemRamFreeReport['reportTime']
free_info = systemRamFreeReport['systemRamFreeInfo']
free_ram = ','.join(free_info)
print(f' reportTime: {report_time}')
print(f' systemRamFreeInfo: {free_ram}')
def doPrintCrosActivity():
cd = gapi_directory.build()
todrive = False
titles = [
'deviceId', 'annotatedAssetId', 'annotatedLocation', 'serialNumber',
'orgUnitPath'
]
csvRows = []
fieldsList = [
'deviceId', 'annotatedAssetId', 'annotatedLocation', 'serialNumber',
'orgUnitPath'
]
startDate = endDate = None
selectActiveTimeRanges = selectDeviceFiles = selectRecentUsers = False
listLimit = 0
delimiter = ','
orgUnitPath = None
queries = [None]
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg in ['query', 'queries']:
queries = gam.getQueries(myarg, sys.argv[i + 1])
i += 2
elif myarg == 'limittoou':
orgUnitPath = gapi_directory_orgunits.getOrgUnitItem(sys.argv[i + 1])
i += 2
elif myarg == 'todrive':
todrive = True
i += 1
elif myarg in CROS_ACTIVE_TIME_RANGES_ARGUMENTS:
selectActiveTimeRanges = True
i += 1
elif myarg in CROS_DEVICE_FILES_ARGUMENTS:
selectDeviceFiles = True
i += 1
elif myarg in CROS_RECENT_USERS_ARGUMENTS:
selectRecentUsers = True
i += 1
elif myarg == 'both':
selectActiveTimeRanges = selectRecentUsers = True
i += 1
elif myarg == 'all':
selectActiveTimeRanges = selectDeviceFiles = True
selectRecentUsers = True
i += 1
elif myarg in CROS_START_ARGUMENTS:
startDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg in CROS_END_ARGUMENTS:
endDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg == 'listlimit':
listLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=0)
i += 2
elif myarg == 'delimiter':
delimiter = sys.argv[i + 1]
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i],
'gam print crosactivity')
if not selectActiveTimeRanges and \
not selectDeviceFiles and \
not selectRecentUsers:
selectActiveTimeRanges = selectRecentUsers = True
if selectRecentUsers:
fieldsList.append('recentUsers')
display.add_titles_to_csv_file([
'recentUsers.email',
], titles)
if selectActiveTimeRanges:
fieldsList.append('activeTimeRanges')
titles_to_add = [
'activeTimeRanges.date', 'activeTimeRanges.duration',
'activeTimeRanges.minutes'
]
display.add_titles_to_csv_file(titles_to_add, titles)
if selectDeviceFiles:
fieldsList.append('deviceFiles')
titles_to_add = ['deviceFiles.type', 'deviceFiles.createTime']
display.add_titles_to_csv_file(titles_to_add, titles)
fields = f'nextPageToken,chromeosdevices({",".join(fieldsList)})'
for query in queries:
gam.printGettingAllItems('CrOS Devices', query)
page_message = gapi.got_total_items_msg('CrOS Devices', '...\n')
all_cros = gapi.get_all_pages(cd.chromeosdevices(),
'list',
'chromeosdevices',
page_message=page_message,
query=query,
customerId=GC_Values[GC_CUSTOMER_ID],
projection='FULL',
fields=fields,
orgUnitPath=orgUnitPath)
for cros in all_cros:
row = {}
skip_attribs = ['recentUsers', 'activeTimeRanges', 'deviceFiles']
for attrib in cros:
if attrib not in skip_attribs:
row[attrib] = cros[attrib]
if selectActiveTimeRanges:
activeTimeRanges = _filterTimeRanges(
cros.get('activeTimeRanges', []), startDate, endDate)
lenATR = len(activeTimeRanges)
num_ranges = min(lenATR, listLimit or lenATR)
for activeTimeRange in activeTimeRanges[:num_ranges]:
newrow = row.copy()
newrow['activeTimeRanges.date'] = activeTimeRange['date']
active_time = activeTimeRange['activeTime']
newrow['activeTimeRanges.duration'] = \
utils.formatMilliSeconds(active_time)
newrow['activeTimeRanges.minutes'] = \
activeTimeRange['activeTime']//60000
csvRows.append(newrow)
if selectRecentUsers:
recentUsers = cros.get('recentUsers', [])
lenRU = len(recentUsers)
num_ranges = min(lenRU, listLimit or lenRU)
recent_users = []
for recentUser in recentUsers[:num_ranges]:
useremail = recentUser.get('email')
if not useremail:
if recentUser['type'] == 'USER_TYPE_UNMANAGED':
useremail = 'UnmanagedUser'
else:
useremail = 'Unknown'
recent_users.append(useremail)
row['recentUsers.email'] = delimiter.join(recent_users)
csvRows.append(row)
if selectDeviceFiles:
deviceFiles = _filterCreateReportTime(
cros.get('deviceFiles', []), 'createTime', startDate,
endDate)
lenDF = len(deviceFiles)
num_ranges = min(lenDF, listLimit or lenDF)
for deviceFile in deviceFiles[:num_ranges]:
newrow = row.copy()
newrow['deviceFiles.type'] = deviceFile['type']
create_time = deviceFile['createTime']
newrow['deviceFiles.createTime'] = create_time
csvRows.append(newrow)
display.write_csv_file(csvRows, titles, 'CrOS Activity', todrive)
def _checkTPMVulnerability(cros):
if 'tpmVersionInfo' in cros and \
'firmwareVersion' in cros['tpmVersionInfo']:
firmware_version = cros['tpmVersionInfo']['firmwareVersion']
if firmware_version in CROS_TPM_VULN_VERSIONS:
cros['tpmVersionInfo']['tpmVulnerability'] = 'VULNERABLE'
elif firmware_version in CROS_TPM_FIXED_VERSIONS:
cros['tpmVersionInfo']['tpmVulnerability'] = 'UPDATED'
else:
cros['tpmVersionInfo']['tpmVulnerability'] = 'NOT IMPACTED'
def doPrintCrosDevices():
def _getSelectedLists(myarg):
if myarg in CROS_ACTIVE_TIME_RANGES_ARGUMENTS:
selectedLists['activeTimeRanges'] = True
elif myarg in CROS_RECENT_USERS_ARGUMENTS:
selectedLists['recentUsers'] = True
elif myarg in CROS_DEVICE_FILES_ARGUMENTS:
selectedLists['deviceFiles'] = True
elif myarg in CROS_CPU_STATUS_REPORTS_ARGUMENTS:
selectedLists['cpuStatusReports'] = True
elif myarg in CROS_DISK_VOLUME_REPORTS_ARGUMENTS:
selectedLists['diskVolumeReports'] = True
elif myarg in CROS_SYSTEM_RAM_FREE_REPORTS_ARGUMENTS:
selectedLists['systemRamFreeReports'] = True
cd = gapi_directory.build()
todrive = False
fieldsList = []
fieldsTitles = {}
titles = []
csvRows = []
display.add_field_to_csv_file('deviceid', CROS_ARGUMENT_TO_PROPERTY_MAP,
fieldsList, fieldsTitles, titles)
projection = orderBy = sortOrder = orgUnitPath = None
queries = [None]
noLists = sortHeaders = False
selectedLists = {}
startDate = endDate = None
listLimit = 0
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg in ['query', 'queries']:
queries = gam.getQueries(myarg, sys.argv[i + 1])
i += 2
elif myarg == 'limittoou':
orgUnitPath = gapi_directory_orgunits.getOrgUnitItem(sys.argv[i + 1])
i += 2
elif myarg == 'todrive':
todrive = True
i += 1
elif myarg == 'nolists':
noLists = True
selectedLists = {}
i += 1
elif myarg == 'listlimit':
listLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=0)
i += 2
elif myarg in CROS_START_ARGUMENTS:
startDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg in CROS_END_ARGUMENTS:
endDate = _getFilterDate(sys.argv[i + 1])
i += 2
elif myarg == 'orderby':
orderBy = sys.argv[i + 1].lower().replace('_', '')
validOrderBy = [
'location', 'user', 'lastsync', 'notes', 'serialnumber',
'status', 'supportenddate'
]
if orderBy not in validOrderBy:
controlflow.expected_argument_exit('orderby',
', '.join(validOrderBy),
orderBy)
if orderBy == 'location':
orderBy = 'annotatedLocation'
elif orderBy == 'user':
orderBy = 'annotatedUser'
elif orderBy == 'lastsync':
orderBy = 'lastSync'
elif orderBy == 'serialnumber':
orderBy = 'serialNumber'
elif orderBy == 'supportenddate':
orderBy = 'supportEndDate'
i += 2
elif myarg in SORTORDER_CHOICES_MAP:
sortOrder = SORTORDER_CHOICES_MAP[myarg]
i += 1
elif myarg in PROJECTION_CHOICES_MAP:
projection = PROJECTION_CHOICES_MAP[myarg]
sortHeaders = True
if projection == 'FULL':
fieldsList = []
else:
fieldsList = CROS_BASIC_FIELDS_LIST[:]
i += 1
elif myarg == 'allfields':
projection = 'FULL'
sortHeaders = True
fieldsList = []
i += 1
elif myarg == 'sortheaders':
sortHeaders = True
i += 1
elif myarg in CROS_LISTS_ARGUMENTS:
_getSelectedLists(myarg)
i += 1
elif myarg in CROS_ARGUMENT_TO_PROPERTY_MAP:
display.add_field_to_fields_list(myarg,
CROS_ARGUMENT_TO_PROPERTY_MAP,
fieldsList)
i += 1
elif myarg == 'fields':
fieldNameList = sys.argv[i + 1]
for field in fieldNameList.lower().replace(',', ' ').split():
if field in CROS_LISTS_ARGUMENTS:
_getSelectedLists(field)
elif field in CROS_ARGUMENT_TO_PROPERTY_MAP:
display.add_field_to_fields_list(
field, CROS_ARGUMENT_TO_PROPERTY_MAP, fieldsList)
else:
controlflow.invalid_argument_exit(field,
'gam print cros fields')
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam print cros')
if selectedLists:
noLists = False
projection = 'FULL'
for selectList in selectedLists:
display.add_field_to_fields_list(selectList,
CROS_ARGUMENT_TO_PROPERTY_MAP,
fieldsList)
if fieldsList:
fieldsList.append('deviceId')
fields = f'nextPageToken,chromeosdevices({",".join(set(fieldsList))})'.replace(
'.', '/')
else:
fields = None
for query in queries:
gam.printGettingAllItems('CrOS Devices', query)
page_message = gapi.got_total_items_msg('CrOS Devices', '...\n')
all_cros = gapi.get_all_pages(cd.chromeosdevices(),
'list',
'chromeosdevices',
page_message=page_message,
query=query,
customerId=GC_Values[GC_CUSTOMER_ID],
projection=projection,
orgUnitPath=orgUnitPath,
orderBy=orderBy,
sortOrder=sortOrder,
fields=fields)
for cros in all_cros:
_checkTPMVulnerability(cros)
if not noLists and not selectedLists:
for cros in all_cros:
if 'notes' in cros:
cros['notes'] = cros['notes'].replace('\n', '\\n')
if 'autoUpdateExpiration' in cros:
cros['autoUpdateExpiration'] = utils.formatTimestampYMD(
cros['autoUpdateExpiration'])
if 'orgUnitId' in cros:
cros['orgUnitId'] = f"id:{cros['orgUnitId']}"
for cpuStatusReport in cros.get('cpuStatusReports', []):
tempInfos = cpuStatusReport.get('cpuTemperatureInfo', [])
for tempInfo in tempInfos:
tempInfo['label'] = tempInfo['label'].strip()
display.add_row_titles_to_csv_file(
utils.flatten_json(cros, listLimit=listLimit), csvRows,
titles)
continue
for cros in all_cros:
if 'notes' in cros:
cros['notes'] = cros['notes'].replace('\n', '\\n')
if 'autoUpdateExpiration' in cros:
cros['autoUpdateExpiration'] = utils.formatTimestampYMD(
cros['autoUpdateExpiration'])
if 'orgUnitId' in cros:
cros['orgUnitId'] = f"id:{cros['orgUnitId']}"
row = {}
for attrib in cros:
if attrib not in {
'kind', 'etag', 'tpmVersionInfo', 'recentUsers',
'activeTimeRanges', 'deviceFiles', 'cpuStatusReports',
'diskVolumeReports', 'systemRamFreeReports'
}:
row[attrib] = cros[attrib]
if selectedLists.get('activeTimeRanges'):
timergs = cros.get('activeTimeRanges', [])
else:
timergs = []
activeTimeRanges = _filterTimeRanges(timergs, startDate, endDate)
if selectedLists.get('recentUsers'):
recentUsers = cros.get('recentUsers', [])
else:
recentUsers = []
if selectedLists.get('deviceFiles'):
device_files = cros.get('deviceFiles', [])
else:
device_files = []
deviceFiles = _filterCreateReportTime(device_files, 'createTime',
startDate, endDate)
if selectedLists.get('cpuStatusReports'):
cpu_reports = cros.get('cpuStatusReports', [])
else:
cpu_reports = []
cpuStatusReports = _filterCreateReportTime(cpu_reports,
'reportTime', startDate,
endDate)
if selectedLists.get('diskVolumeReports'):
diskVolumeReports = cros.get('diskVolumeReports', [])
else:
diskVolumeReports = []
if selectedLists.get('systemRamFreeReports'):
ram_reports = cros.get('systemRamFreeReports', [])
else:
ram_reports = []
systemRamFreeReports = _filterCreateReportTime(
ram_reports, 'reportTime', startDate, endDate)
if noLists or (not activeTimeRanges and \
not recentUsers and \
not deviceFiles and \
not cpuStatusReports and \
not diskVolumeReports and \
not systemRamFreeReports):
display.add_row_titles_to_csv_file(row, csvRows, titles)
continue
lenATR = len(activeTimeRanges)
lenRU = len(recentUsers)
lenDF = len(deviceFiles)
lenCSR = len(cpuStatusReports)
lenDVR = len(diskVolumeReports)
lenSRFR = len(systemRamFreeReports)
max_len = max(lenATR, lenRU, lenDF, lenCSR, lenDVR, lenSRFR)
for i in range(min(max_len, listLimit or max_len)):
nrow = row.copy()
if i < lenATR:
nrow['activeTimeRanges.date'] = \
activeTimeRanges[i]['date']
nrow['activeTimeRanges.activeTime'] = \
str(activeTimeRanges[i]['activeTime'])
active_time = activeTimeRanges[i]['activeTime']
nrow['activeTimeRanges.duration'] = \
utils.formatMilliSeconds(active_time)
nrow['activeTimeRanges.minutes'] = active_time // 60000
if i < lenRU:
nrow['recentUsers.type'] = recentUsers[i]['type']
nrow['recentUsers.email'] = recentUsers[i].get('email')
if not nrow['recentUsers.email']:
if nrow['recentUsers.type'] == 'USER_TYPE_UNMANAGED':
nrow['recentUsers.email'] = 'UnmanagedUser'
else:
nrow['recentUsers.email'] = 'Unknown'
if i < lenDF:
nrow['deviceFiles.type'] = deviceFiles[i]['type']
nrow['deviceFiles.createTime'] = \
deviceFiles[i]['createTime']
if i < lenCSR:
nrow['cpuStatusReports.reportTime'] = \
cpuStatusReports[i]['reportTime']
tempInfos = cpuStatusReports[i].get('cpuTemperatureInfo', [])
for tempInfo in tempInfos:
label = tempInfo['label'].strip()
base = 'cpuStatusReports.cpuTemperatureInfo.'
nrow[f'{base}{label}'] = tempInfo['temperature']
cpu_field = 'cpuUtilizationPercentageInfo'
if cpu_field in cpuStatusReports[i]:
cpu_reports = cpuStatusReports[i][cpu_field]
cpu_pcts = [str(x) for x in cpu_reports]
nrow[f'cpuStatusReports.{cpu_field}'] = ','.join(cpu_pcts)
if i < lenDVR:
volumeInfo = diskVolumeReports[i]['volumeInfo']
j = 0
vfield = 'diskVolumeReports.volumeInfo.'
for volume in volumeInfo:
nrow[f'{vfield}{j}.volumeId'] = \
volume['volumeId']
nrow[f'{vfield}{j}.storageFree'] = \
volume['storageFree']
nrow[f'{vfield}{j}.storageTotal'] = \
volume['storageTotal']
j += 1
if i < lenSRFR:
nrow['systemRamFreeReports.reportTime'] = \
systemRamFreeReports[i]['reportTime']
ram_reports = systemRamFreeReports[i]['systemRamFreeInfo']
ram_info = [str(x) for x in ram_reports]
nrow['systenRamFreeReports.systemRamFreeInfo'] = \
','.join(ram_info)
display.add_row_titles_to_csv_file(nrow, csvRows, titles)
if sortHeaders:
display.sort_csv_titles([
'deviceId',
], titles)
display.write_csv_file(csvRows, titles, 'CrOS', todrive)
def getCrOSDeviceEntity(i, cd):
myarg = sys.argv[i].lower()
if myarg == 'cros_sn':
return i + 2, gam.getUsersToModify('cros_sn', sys.argv[i + 1])
if myarg == 'query':
return i + 2, gam.getUsersToModify('crosquery', sys.argv[i + 1])
if myarg[:6] == 'query:':
query = sys.argv[i][6:]
if query[:12].lower() == 'orgunitpath:':
kwargs = {'orgUnitPath': query[12:]}
else:
kwargs = {'query': query}
fields = 'nextPageToken,chromeosdevices(deviceId)'
devices = gapi.get_all_pages(cd.chromeosdevices(),
'list',
'chromeosdevices',
customerId=GC_Values[GC_CUSTOMER_ID],
fields=fields,
**kwargs)
return i + 1, [device['deviceId'] for device in devices]
return i + 1, sys.argv[i].replace(',', ' ').split()
def _getFilterDate(dateStr):
return datetime.datetime.strptime(dateStr, YYYYMMDD_FORMAT)
def _filterTimeRanges(activeTimeRanges, startDate, endDate):
if startDate is None and endDate is None:
return activeTimeRanges
filteredTimeRanges = []
for timeRange in activeTimeRanges:
activityDate = datetime.datetime.strptime(timeRange['date'],
YYYYMMDD_FORMAT)
if ((startDate is None) or \
(activityDate >= startDate)) and \
((endDate is None) or \
(activityDate <= endDate)):
filteredTimeRanges.append(timeRange)
return filteredTimeRanges
def _filterCreateReportTime(items, timeField, startTime, endTime):
if startTime is None and endTime is None:
return items
filteredItems = []
time_format = '%Y-%m-%dT%H:%M:%S.%fZ'
for item in items:
timeValue = datetime.datetime.strptime(item[timeField], time_format)
if ((startTime is None) or \
(timeValue >= startTime)) and \
((endTime is None) or \
(timeValue <= endTime)):
filteredItems.append(item)
return filteredItems
|
#!/usr/bin/env python3
import os
import gym
import torch
import datetime
import argparse
import numpy as np
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import Independent, Normal
from tianshou.policy import PGPolicy
from tianshou.utils import BasicLogger
from tianshou.env import SubprocVectorEnv
from tianshou.utils.net.common import Net
from tianshou.trainer import onpolicy_trainer
from tianshou.utils.net.continuous import ActorProb
from tianshou.data import Collector, ReplayBuffer, VectorReplayBuffer
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='HalfCheetah-v3')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--buffer-size', type=int, default=4096)
parser.add_argument('--hidden-sizes', type=int, nargs='*', default=[64, 64])
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--epoch', type=int, default=100)
parser.add_argument('--step-per-epoch', type=int, default=30000)
parser.add_argument('--step-per-collect', type=int, default=2048)
parser.add_argument('--repeat-per-collect', type=int, default=1)
# batch-size >> step-per-collect means caculating all data in one singe forward.
parser.add_argument('--batch-size', type=int, default=99999)
parser.add_argument('--training-num', type=int, default=64)
parser.add_argument('--test-num', type=int, default=10)
parser.add_argument('--logdir', type=str, default='log')
parser.add_argument('--render', type=float, default=0.)
parser.add_argument(
'--device', type=str,
default='cuda' if torch.cuda.is_available() else 'cpu')
parser.add_argument('--resume-path', type=str, default=None)
# reinforce special
parser.add_argument('--rew-norm', type=int, default=True)
# "clip" option also works well.
parser.add_argument('--action-bound-method', type=str, default="tanh")
parser.add_argument('--lr-decay', type=int, default=True)
return parser.parse_args()
def test_reinforce(args=get_args()):
env = gym.make(args.task)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
args.max_action = env.action_space.high[0]
print("Observations shape:", args.state_shape)
print("Actions shape:", args.action_shape)
print("Action range:", np.min(env.action_space.low),
np.max(env.action_space.high))
# train_envs = gym.make(args.task)
train_envs = SubprocVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.training_num)],
norm_obs=True)
# test_envs = gym.make(args.task)
test_envs = SubprocVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.test_num)],
norm_obs=True, obs_rms=train_envs.obs_rms, update_obs_rms=False)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_envs.seed(args.seed)
test_envs.seed(args.seed)
# model
net_a = Net(args.state_shape, hidden_sizes=args.hidden_sizes,
activation=nn.Tanh, device=args.device)
actor = ActorProb(net_a, args.action_shape, max_action=args.max_action,
unbounded=True, device=args.device).to(args.device)
torch.nn.init.constant_(actor.sigma_param, -0.5)
for m in actor.modules():
if isinstance(m, torch.nn.Linear):
# orthogonal initialization
torch.nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
torch.nn.init.zeros_(m.bias)
# do last policy layer scaling, this will make initial actions have (close to)
# 0 mean and std, and will help boost performances,
# see https://arxiv.org/abs/2006.05990, Fig.24 for details
for m in actor.mu.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.zeros_(m.bias)
m.weight.data.copy_(0.01 * m.weight.data)
optim = torch.optim.Adam(actor.parameters(), lr=args.lr)
lr_scheduler = None
if args.lr_decay:
# decay learning rate to 0 linearly
max_update_num = np.ceil(
args.step_per_epoch / args.step_per_collect) * args.epoch
lr_scheduler = LambdaLR(
optim, lr_lambda=lambda epoch: 1 - epoch / max_update_num)
def dist(*logits):
return Independent(Normal(*logits), 1)
policy = PGPolicy(actor, optim, dist, discount_factor=args.gamma,
reward_normalization=args.rew_norm, action_scaling=True,
action_bound_method=args.action_bound_method,
lr_scheduler=lr_scheduler, action_space=env.action_space)
# collector
if args.training_num > 1:
buffer = VectorReplayBuffer(args.buffer_size, len(train_envs))
else:
buffer = ReplayBuffer(args.buffer_size)
train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)
test_collector = Collector(policy, test_envs)
# log
t0 = datetime.datetime.now().strftime("%m%d_%H%M%S")
log_file = f'seed_{args.seed}_{t0}-{args.task.replace('-', '_')}_reinforce'
log_path = os.path.join(args.logdir, args.task, 'reinforce', log_file)
writer = SummaryWriter(log_path)
writer.add_text("args", str(args))
logger = BasicLogger(writer, update_interval=10)
def save_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))
# trainer
result = onpolicy_trainer(
policy, train_collector, test_collector, args.epoch, args.step_per_epoch,
args.repeat_per_collect, args.test_num, args.batch_size,
step_per_collect=args.step_per_collect, save_fn=save_fn, logger=logger,
test_in_train=False)
# Let's watch its performance!
policy.eval()
test_envs.seed(args.seed)
test_collector.reset()
result = test_collector.collect(n_episode=args.test_num, render=args.render)
print(f'Final reward: {result['rews'].mean()}, length: {result['lens'].mean()}')
if __name__ == '__main__':
test_reinforce()
| #!/usr/bin/env python3
import os
import gym
import torch
import datetime
import argparse
import numpy as np
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import Independent, Normal
from tianshou.policy import PGPolicy
from tianshou.utils import BasicLogger
from tianshou.env import SubprocVectorEnv
from tianshou.utils.net.common import Net
from tianshou.trainer import onpolicy_trainer
from tianshou.utils.net.continuous import ActorProb
from tianshou.data import Collector, ReplayBuffer, VectorReplayBuffer
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='HalfCheetah-v3')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--buffer-size', type=int, default=4096)
parser.add_argument('--hidden-sizes', type=int, nargs='*', default=[64, 64])
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--epoch', type=int, default=100)
parser.add_argument('--step-per-epoch', type=int, default=30000)
parser.add_argument('--step-per-collect', type=int, default=2048)
parser.add_argument('--repeat-per-collect', type=int, default=1)
# batch-size >> step-per-collect means caculating all data in one singe forward.
parser.add_argument('--batch-size', type=int, default=99999)
parser.add_argument('--training-num', type=int, default=64)
parser.add_argument('--test-num', type=int, default=10)
parser.add_argument('--logdir', type=str, default='log')
parser.add_argument('--render', type=float, default=0.)
parser.add_argument(
'--device', type=str,
default='cuda' if torch.cuda.is_available() else 'cpu')
parser.add_argument('--resume-path', type=str, default=None)
# reinforce special
parser.add_argument('--rew-norm', type=int, default=True)
# "clip" option also works well.
parser.add_argument('--action-bound-method', type=str, default="tanh")
parser.add_argument('--lr-decay', type=int, default=True)
return parser.parse_args()
def test_reinforce(args=get_args()):
env = gym.make(args.task)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
args.max_action = env.action_space.high[0]
print("Observations shape:", args.state_shape)
print("Actions shape:", args.action_shape)
print("Action range:", np.min(env.action_space.low),
np.max(env.action_space.high))
# train_envs = gym.make(args.task)
train_envs = SubprocVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.training_num)],
norm_obs=True)
# test_envs = gym.make(args.task)
test_envs = SubprocVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.test_num)],
norm_obs=True, obs_rms=train_envs.obs_rms, update_obs_rms=False)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_envs.seed(args.seed)
test_envs.seed(args.seed)
# model
net_a = Net(args.state_shape, hidden_sizes=args.hidden_sizes,
activation=nn.Tanh, device=args.device)
actor = ActorProb(net_a, args.action_shape, max_action=args.max_action,
unbounded=True, device=args.device).to(args.device)
torch.nn.init.constant_(actor.sigma_param, -0.5)
for m in actor.modules():
if isinstance(m, torch.nn.Linear):
# orthogonal initialization
torch.nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
torch.nn.init.zeros_(m.bias)
# do last policy layer scaling, this will make initial actions have (close to)
# 0 mean and std, and will help boost performances,
# see https://arxiv.org/abs/2006.05990, Fig.24 for details
for m in actor.mu.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.zeros_(m.bias)
m.weight.data.copy_(0.01 * m.weight.data)
optim = torch.optim.Adam(actor.parameters(), lr=args.lr)
lr_scheduler = None
if args.lr_decay:
# decay learning rate to 0 linearly
max_update_num = np.ceil(
args.step_per_epoch / args.step_per_collect) * args.epoch
lr_scheduler = LambdaLR(
optim, lr_lambda=lambda epoch: 1 - epoch / max_update_num)
def dist(*logits):
return Independent(Normal(*logits), 1)
policy = PGPolicy(actor, optim, dist, discount_factor=args.gamma,
reward_normalization=args.rew_norm, action_scaling=True,
action_bound_method=args.action_bound_method,
lr_scheduler=lr_scheduler, action_space=env.action_space)
# collector
if args.training_num > 1:
buffer = VectorReplayBuffer(args.buffer_size, len(train_envs))
else:
buffer = ReplayBuffer(args.buffer_size)
train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)
test_collector = Collector(policy, test_envs)
# log
t0 = datetime.datetime.now().strftime("%m%d_%H%M%S")
log_file = f'seed_{args.seed}_{t0}-{args.task.replace("-", "_")}_reinforce'
log_path = os.path.join(args.logdir, args.task, 'reinforce', log_file)
writer = SummaryWriter(log_path)
writer.add_text("args", str(args))
logger = BasicLogger(writer, update_interval=10)
def save_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))
# trainer
result = onpolicy_trainer(
policy, train_collector, test_collector, args.epoch, args.step_per_epoch,
args.repeat_per_collect, args.test_num, args.batch_size,
step_per_collect=args.step_per_collect, save_fn=save_fn, logger=logger,
test_in_train=False)
# Let's watch its performance!
policy.eval()
test_envs.seed(args.seed)
test_collector.reset()
result = test_collector.collect(n_episode=args.test_num, render=args.render)
print(f'Final reward: {result["rews"].mean()}, length: {result["lens"].mean()}')
if __name__ == '__main__':
test_reinforce()
|
# -*- coding: utf8 -*-
# @rhming
import json
import re,time,logging
import requests
import login
from random import randint
# from utils import jsonencode as json
from utils.toutiao_reward import TouTiao
from utils.jifen import encrypt_req_params, encrypt_free_login_params
class blindBox:
def _init_(self, mobile, password):
self.session.headers = requests.structures.CaseInsensitiveDict({
"origin": "https://m.jf.10010.com",
"user-agent": self.useragent,
"content-type": "application/json",
"accept": "*/*",
"referer": "https://m.jf.10010.com/cms/yuech/unicom-integral-ui/yuech-Blindbox/shake/index.html?jump=sign",
})
self.clientVersion = self.version.split('@')[1]
self.toutiao = TouTiao(mobile)
self.activityId = 'Ac-9b71780cb87844b9ac3ab5d34b11dd24'
# def getUrlParam(self, name, value):
# # 'Ac-f4557b3ac6004a48b1187e32ea343ca8'
# return re.findall(name + r'=([^&]+)', value)[0]
def openPlatLineNew(self, to_url, retry=3):
try:
url = f'https://m.client.10010.com/mobileService/openPlatform/openPlatLineNew.htm?to_url={to_url}'
_ = self.session.get(url=url, headers={
"Origin": "https://img.client.10010.com",
"X-Requested-With": "com.sinovatech.unicom.ui"
})
# self.global_config['cookie'] = self.session.cookies.get_dict()
# self.global_config['cookie']['_jf_t'] = str(self.timestamp)
# self.saveCookie(f'{self.mobile}WoGame', self.global_config)
if not self.session.cookies.get('_jf_id', ''):
raise Exception('未获取到_jf_id')
except Exception as e:
logging.info(e)
if retry > 0:
time.sleep(5)
self.openPlatLineNew(to_url, retry - 1)
else:
raise Exception("[BlindBox]获取登录配置失败, 结束执行任务")
def freeLoginRock(self):
url = 'https://m.jf.10010.com/jf-yuech/p/freeLoginRock'
data = {
'activityId': self.activityId,
'userCookie': self.session.cookies.get('_jf_id'),
'userNumber': self.mobile,
'time': self.timestamp
}
data = encrypt_free_login_params(data)
# logging.info(data)
# return
resp = self.session.post(url=url, json=data)
data = resp.json()
# logging.info(json.dumps(data))
token = data['data']['token'] # type: dict
token.update({"t": self.now_date})
# token.update({
# 'activityInfos': data['data']['activityInfos']['activityVOs'][0]['activityTimesInfo']
# })
self.saveCookie(f'{self.mobile}JFToken', token)
self.session.headers.update({
"authorization": f"Bearer {token["access_token"]}"
})
return token
def minusGameTimes(self, params, token={}, retry=1):
url = 'https://m.jf.10010.com/jf-yuech/api/gameResultV2/minusGameTimes'
data = {
'params': encrypt_req_params(params, self.session.cookies.get('_jf_id'))
}
resp = self.session.post(url=url, json=data)
try:
data = resp.json()
# logging.info(data)
# token['activityInfos']['advertTimes'] = data['data']['advertTimes']
# token['activityInfos']['freeTimes'] = data['data']['freeTimes']
# self.saveCookie(f'{self.mobile}JFToken', token)
return data['data']['resultId'], data['data']['freeTimes'], data['data']['advertTimes']
except:
if retry > 0:
self.freeLoginRock()
return self.minusGameTimes(params, token, retry - 1)
return
def luckDrawForPrize(self, resultId):
url = 'https://m.jf.10010.com/jf-yuech/api/gameResultV2/luckDrawForPrize'
data = {
'params': encrypt_req_params({
'activityId': self.activityId,
'resultId': resultId # 'Ga-16d4aa3caa3040e88c276db953a0464c'
}, self.session.cookies.get('_jf_id'))
}
resp = self.session.post(url=url, json=data)
data = resp.json()
logging.info(f"获得 {data["data"].get("status",None)} {data["data"].get("prizeName",None)}")
def numIntegralQuery(self):
url = 'https://m.jf.10010.com/jf-yuech/api/integralLogs/numIntegralQuery'
params = {
'activityId': self.activityId,
'serviceNo': self.mobile,
'userType': 0
}
resp = self.session.get(url=url, params=params)
data = resp.json()
logging.info(data)
return data
def run(self, client, user):
self.session=client
self.mobile=user['username']
self.version='android@8.0805'
self.timestamp=time.time() * 1000
self.now_date=time.strftime('%Y-%m-%d', time.localtime(self.timestamp / 1000))
self.saveCookie=login.saveData
self.readCookie=login.readData
self.useragent='Mozilla/5.0 (Linux; Android 9; RMX1901 Build/QKQ1.190918.001; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.186 Mobile Safari/537.36; unicom{version:android@8.0805,desmobile:' + str(self.mobile) + '};devicetype{deviceBrand:Realme,deviceModel:RMX1901};{yw_code:}'
self._init_(user['username'], user['password'])
to_url = f'https://m.jf.10010.com/jf-order/avoidLogin/forActive/yyyqd&yw_code=&desmobile={self.mobile}&version={self.version}'
self.openPlatLineNew(to_url)
token = self.readCookie(f'{self.mobile}JFToken')
if (
token
or not isinstance(token, dict)
or token.get('t', '') != self.now_date
or not token.get('access_token', '')
# or not token.get('activityInfos', '')
):
token = self.freeLoginRock()
self.session.headers.update({
"authorization": f"Bearer {token["access_token"]}"
})
params = {
'activityId': self.activityId,
'currentTimes': 1,
'type': '免费'
}
resultId, freeTimes, advertTimes = self.minusGameTimes(params)
if advertTimes == 0:
logging.info('机会已用完')
return
if freeTimes == 1:
# params = {
# 'activityId': self.activityId,
# 'currentTimes': 1,
# 'type': '免费'
# }
# resultId, _, __ = self.minusGameTimes(params)
if resultId:
self.luckDrawForPrize(resultId)
else:
options = {
'arguments1': 'AC20200611152252',
'arguments2': '',
'codeId': 945689604,
'channelName': 'android-签到小游戏摇摇乐不倒翁-激励视频',
'remark': '签到小游戏翻倍得积分',
'ecs_token': self.session.cookies.get('ecs_token')
}
orderId = self.toutiao.reward(options)
params = {
'activityId': self.activityId,
'currentTimes': advertTimes,
'type': '广告',
'orderId': orderId,
'phoneType': 'android',
'version': round(float(self.clientVersion), 4)
}
resultId, _, __ = self.minusGameTimes(params)
if resultId:
self.luckDrawForPrize(resultId)
if __name__ == '__main__':
pass
| # -*- coding: utf8 -*-
# @rhming
import json
import re,time,logging
import requests
import login
from random import randint
# from utils import jsonencode as json
from utils.toutiao_reward import TouTiao
from utils.jifen import encrypt_req_params, encrypt_free_login_params
class blindBox:
def _init_(self, mobile, password):
self.session.headers = requests.structures.CaseInsensitiveDict({
"origin": "https://m.jf.10010.com",
"user-agent": self.useragent,
"content-type": "application/json",
"accept": "*/*",
"referer": "https://m.jf.10010.com/cms/yuech/unicom-integral-ui/yuech-Blindbox/shake/index.html?jump=sign",
})
self.clientVersion = self.version.split('@')[1]
self.toutiao = TouTiao(mobile)
self.activityId = 'Ac-9b71780cb87844b9ac3ab5d34b11dd24'
# def getUrlParam(self, name, value):
# # 'Ac-f4557b3ac6004a48b1187e32ea343ca8'
# return re.findall(name + r'=([^&]+)', value)[0]
def openPlatLineNew(self, to_url, retry=3):
try:
url = f'https://m.client.10010.com/mobileService/openPlatform/openPlatLineNew.htm?to_url={to_url}'
_ = self.session.get(url=url, headers={
"Origin": "https://img.client.10010.com",
"X-Requested-With": "com.sinovatech.unicom.ui"
})
# self.global_config['cookie'] = self.session.cookies.get_dict()
# self.global_config['cookie']['_jf_t'] = str(self.timestamp)
# self.saveCookie(f'{self.mobile}WoGame', self.global_config)
if not self.session.cookies.get('_jf_id', ''):
raise Exception('未获取到_jf_id')
except Exception as e:
logging.info(e)
if retry > 0:
time.sleep(5)
self.openPlatLineNew(to_url, retry - 1)
else:
raise Exception("[BlindBox]获取登录配置失败, 结束执行任务")
def freeLoginRock(self):
url = 'https://m.jf.10010.com/jf-yuech/p/freeLoginRock'
data = {
'activityId': self.activityId,
'userCookie': self.session.cookies.get('_jf_id'),
'userNumber': self.mobile,
'time': self.timestamp
}
data = encrypt_free_login_params(data)
# logging.info(data)
# return
resp = self.session.post(url=url, json=data)
data = resp.json()
# logging.info(json.dumps(data))
token = data['data']['token'] # type: dict
token.update({"t": self.now_date})
# token.update({
# 'activityInfos': data['data']['activityInfos']['activityVOs'][0]['activityTimesInfo']
# })
self.saveCookie(f'{self.mobile}JFToken', token)
self.session.headers.update({
"authorization": f"Bearer {token['access_token']}"
})
return token
def minusGameTimes(self, params, token={}, retry=1):
url = 'https://m.jf.10010.com/jf-yuech/api/gameResultV2/minusGameTimes'
data = {
'params': encrypt_req_params(params, self.session.cookies.get('_jf_id'))
}
resp = self.session.post(url=url, json=data)
try:
data = resp.json()
# logging.info(data)
# token['activityInfos']['advertTimes'] = data['data']['advertTimes']
# token['activityInfos']['freeTimes'] = data['data']['freeTimes']
# self.saveCookie(f'{self.mobile}JFToken', token)
return data['data']['resultId'], data['data']['freeTimes'], data['data']['advertTimes']
except:
if retry > 0:
self.freeLoginRock()
return self.minusGameTimes(params, token, retry - 1)
return
def luckDrawForPrize(self, resultId):
url = 'https://m.jf.10010.com/jf-yuech/api/gameResultV2/luckDrawForPrize'
data = {
'params': encrypt_req_params({
'activityId': self.activityId,
'resultId': resultId # 'Ga-16d4aa3caa3040e88c276db953a0464c'
}, self.session.cookies.get('_jf_id'))
}
resp = self.session.post(url=url, json=data)
data = resp.json()
logging.info(f"获得 {data['data'].get('status',None)} {data['data'].get('prizeName',None)}")
def numIntegralQuery(self):
url = 'https://m.jf.10010.com/jf-yuech/api/integralLogs/numIntegralQuery'
params = {
'activityId': self.activityId,
'serviceNo': self.mobile,
'userType': 0
}
resp = self.session.get(url=url, params=params)
data = resp.json()
logging.info(data)
return data
def run(self, client, user):
self.session=client
self.mobile=user['username']
self.version='android@8.0805'
self.timestamp=time.time() * 1000
self.now_date=time.strftime('%Y-%m-%d', time.localtime(self.timestamp / 1000))
self.saveCookie=login.saveData
self.readCookie=login.readData
self.useragent='Mozilla/5.0 (Linux; Android 9; RMX1901 Build/QKQ1.190918.001; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.186 Mobile Safari/537.36; unicom{version:android@8.0805,desmobile:' + str(self.mobile) + '};devicetype{deviceBrand:Realme,deviceModel:RMX1901};{yw_code:}'
self._init_(user['username'], user['password'])
to_url = f'https://m.jf.10010.com/jf-order/avoidLogin/forActive/yyyqd&yw_code=&desmobile={self.mobile}&version={self.version}'
self.openPlatLineNew(to_url)
token = self.readCookie(f'{self.mobile}JFToken')
if (
token
or not isinstance(token, dict)
or token.get('t', '') != self.now_date
or not token.get('access_token', '')
# or not token.get('activityInfos', '')
):
token = self.freeLoginRock()
self.session.headers.update({
"authorization": f"Bearer {token['access_token']}"
})
params = {
'activityId': self.activityId,
'currentTimes': 1,
'type': '免费'
}
resultId, freeTimes, advertTimes = self.minusGameTimes(params)
if advertTimes == 0:
logging.info('机会已用完')
return
if freeTimes == 1:
# params = {
# 'activityId': self.activityId,
# 'currentTimes': 1,
# 'type': '免费'
# }
# resultId, _, __ = self.minusGameTimes(params)
if resultId:
self.luckDrawForPrize(resultId)
else:
options = {
'arguments1': 'AC20200611152252',
'arguments2': '',
'codeId': 945689604,
'channelName': 'android-签到小游戏摇摇乐不倒翁-激励视频',
'remark': '签到小游戏翻倍得积分',
'ecs_token': self.session.cookies.get('ecs_token')
}
orderId = self.toutiao.reward(options)
params = {
'activityId': self.activityId,
'currentTimes': advertTimes,
'type': '广告',
'orderId': orderId,
'phoneType': 'android',
'version': round(float(self.clientVersion), 4)
}
resultId, _, __ = self.minusGameTimes(params)
if resultId:
self.luckDrawForPrize(resultId)
if __name__ == '__main__':
pass
|
import errno
import json
import logging
import os
import os.path
from collections import defaultdict, namedtuple
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from itertools import groupby
import attr
from memoized import memoized
from sqlalchemy import (
Column,
Integer,
String,
Text,
bindparam,
func,
)
from sqlalchemy.exc import IntegrityError
from corehq.apps.hqwebapp.encoders import LazyEncoder
from corehq.apps.tzmigration.planning import Base, DiffDB, PlanningDiff as Diff
from corehq.apps.tzmigration.timezonemigration import MISSING, json_diff
from corehq.util.datadog.gauges import datadog_counter
from corehq.util.log import with_progress_bar
from .diff import filter_form_diffs
log = logging.getLogger(__name__)
def init_state_db(domain, state_dir):
db_filepath = _get_state_db_filepath(domain, state_dir)
db_dir = os.path.dirname(db_filepath)
if os.path.isdir(state_dir) and not os.path.isdir(db_dir):
os.mkdir(db_dir)
return StateDB.init(domain, db_filepath)
def open_state_db(domain, state_dir, *, readonly=True):
"""Open state db in read-only mode"""
db_filepath = _get_state_db_filepath(domain, state_dir)
if not os.path.exists(db_filepath):
raise Error(f"not found: {db_filepath}")
return StateDB.open(domain, db_filepath, readonly=readonly)
def delete_state_db(domain, state_dir):
db_filepath = _get_state_db_filepath(domain, state_dir)
try:
os.remove(db_filepath)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def _get_state_db_filepath(domain, state_dir):
return os.path.join(state_dir, "db", '{}-couch-sql.db'.format(domain))
class StateDB(DiffDB):
@classmethod
def init(cls, domain, path):
is_new_db = not os.path.exists(path)
db = super(StateDB, cls).init(domain, path)
if is_new_db:
db._set_kv("db_unique_id", datetime.utcnow().strftime("%Y%m%d-%H%M%S.%f"))
else:
db._migrate()
return db
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.is_rebuild = False
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
self.engine.dispose()
@contextmanager
def session(self, session=None):
if session is not None:
yield session
return
session = self.Session()
try:
yield session
session.commit()
finally:
session.close()
@property
@memoized
def unique_id(self):
with self.session() as session:
return self._get_kv("db_unique_id", session).value
def get(self, name, default=None):
with self.session() as session:
kv = self._get_kv(f"kv-{name}", session)
if kv is None:
return default
return json.loads(kv.value)
def set(self, name, value):
self._upsert(KeyValue, KeyValue.key, f"kv-{name}", json.dumps(value))
def update_cases(self, case_records):
"""Update case total and processed form counts
:param case_records: iterable of objects, each having the attributes:
- id: case id
- total_forms: number of forms known to update the case.
- processed_forms: number of forms updating the case that
have been processed.
:returns: list of three-tuples `(case_id, total_forms, processed_forms)`
"""
params = [
{"case": rec.id, "total": rec.total_forms, "proc": rec.processed_forms}
for rec in case_records
]
with self.session() as session:
session.execute(
"""
REPLACE INTO {table} (case_id, total_forms, processed_forms)
VALUES (
:case,
MAX(COALESCE((
SELECT total_forms
FROM {table}
WHERE case_id = :case
), 0), :total),
COALESCE((
SELECT processed_forms
FROM {table}
WHERE case_id = :case
), 0) + :proc
)
""".format(table=CaseForms.__tablename__),
params,
)
case_ids = [p["case"] for p in params]
query = session.query(CaseForms).filter(CaseForms.case_id.in_(case_ids))
result = [(c.case_id, c.total_forms, c.processed_forms) for c in query]
assert len(case_ids) == len(result), (case_ids, result)
return result
def add_processed_forms(self, cases):
"""Increment processed forms count for each of the given cases
:param cases: dict `{<case_id>: <processed_form_count>, ...}`
:returns: list of three-tuples `(case_id, total_forms, processed_forms)`
where `total_forms` is `None` for unknown cases.
"""
case_col = CaseForms.case_id
proc_col = CaseForms.processed_forms
params = [{"case": c, "proc": p} for c, p in cases.items()]
with self.session() as session:
session.execute(
CaseForms.__table__.update()
.where(case_col == bindparam("case"))
.values({proc_col: proc_col + bindparam("proc")}),
params,
)
query = session.query(CaseForms).filter(case_col.in_(cases))
case_forms = {cf.case_id: cf for cf in query}
def make_result(case_id):
case = case_forms.get(case_id)
if case is None:
return (case_id, None, None)
return (case_id, case.total_forms, case.processed_forms)
return [make_result(case_id) for case_id in cases]
def iter_cases_with_unprocessed_forms(self):
query = self.Session().query(
CaseForms.case_id,
CaseForms.total_forms,
).filter(CaseForms.total_forms > CaseForms.processed_forms)
for case_id, total_forms in iter_large(query, CaseForms.case_id):
yield case_id, total_forms
def get_forms_count(self, case_id):
with self.session() as session:
query = session.query(CaseForms.total_forms).filter_by(case_id=case_id)
return query.scalar() or 0
def add_cases_to_diff(self, case_ids):
if not case_ids:
return
with self.session() as session:
session.execute(
f"INSERT OR IGNORE INTO {CaseToDiff.__tablename__} (id) VALUES (:id)",
[{"id": x} for x in case_ids],
)
def add_diffed_cases(self, case_ids):
if not case_ids:
return
with self.session() as session:
session.execute(
f"INSERT OR IGNORE INTO {DiffedCase.__tablename__} (id) VALUES (:id)",
[{"id": x} for x in case_ids],
)
(
session.query(CaseToDiff)
.filter(CaseToDiff.id.in_(case_ids))
.delete(synchronize_session=False)
)
def iter_undiffed_case_ids(self):
query = self.Session().query(CaseToDiff.id)
for case_id, in iter_large(query, CaseToDiff.id):
yield case_id
def count_undiffed_cases(self):
with self.session() as session:
return session.query(CaseToDiff).count()
def iter_case_ids_with_diffs(self):
query = (
self.Session().query(DocDiffs.doc_id)
.filter(DocDiffs.kind == "CommCareCase")
)
for doc_id, in iter_large(query, DocDiffs.doc_id):
yield doc_id
def count_case_ids_with_diffs(self):
with self.session() as session:
return (
session.query(DocDiffs.doc_id)
.filter(DocDiffs.kind == "CommCareCase")
.count()
)
def add_problem_form(self, form_id):
"""Add form to be migrated with "unprocessed" forms
A "problem" form is an error form with normal doctype (XFormInstance)
"""
with self.session() as session:
session.add(ProblemForm(id=form_id))
def iter_problem_forms(self):
query = self.Session().query(ProblemForm.id)
for form_id, in iter_large(query, ProblemForm.id):
yield form_id
def add_no_action_case_form(self, form_id):
try:
with self.session() as session:
session.add(NoActionCaseForm(id=form_id))
except IntegrityError:
pass
else:
self.get_no_action_case_forms.reset_cache(self)
@memoized
def get_no_action_case_forms(self):
"""Get the set of form ids that touch cases without actions"""
return {x for x, in self.Session().query(NoActionCaseForm.id)}
def set_resume_state(self, key, value):
resume_key = "resume-{}".format(key)
self._upsert(KeyValue, KeyValue.key, resume_key, json.dumps(value))
@contextmanager
def pop_resume_state(self, key, default):
resume_key = "resume-{}".format(key)
with self.session() as session:
kv = self._get_kv(resume_key, session)
if kv is None:
self._set_kv(resume_key, RESUME_NOT_ALLOWED, session)
yield default
elif self.is_rebuild:
yield default
elif kv.value == RESUME_NOT_ALLOWED:
raise ResumeError("previous session did not save resume state")
else:
yield json.loads(kv.value)
kv.value = RESUME_NOT_ALLOWED
def _get_kv(self, key, session):
return session.query(KeyValue).get(key)
def _set_kv(self, key, value, session=None):
with self.session(session) as session:
session.add(KeyValue(key=key, value=value))
def _upsert(self, model, key_field, key, value, incr=False):
with self.session() as session:
updated = (
session.query(model)
.filter(key_field == key)
.update(
{model.value: (model.value + value) if incr else value},
synchronize_session=False,
)
)
if not updated:
obj = model(value=value)
key_field.__set__(obj, key)
session.add(obj)
else:
assert updated == 1, (key, updated)
def add_missing_docs(self, kind, doc_ids):
with self.session() as session:
session.bulk_save_objects([
MissingDoc(kind=kind, doc_id=doc_id)
for doc_id in doc_ids
])
def delete_missing_docs(self, kind):
with self.session() as session:
(
session.query(MissingDoc)
.filter_by(kind=kind)
.delete(synchronize_session=False)
)
def doc_not_missing(self, kind, doc_id):
with self.session() as session:
(
session.query(MissingDoc.doc_id)
.filter_by(kind=kind, doc_id=doc_id)
.delete(synchronize_session=False)
)
def save_form_diffs(self, couch_json, sql_json):
diffs = json_diff(couch_json, sql_json, track_list_indices=False)
diffs = filter_form_diffs(couch_json, sql_json, diffs)
dd_count = partial(datadog_counter, tags=["domain:" + self.domain])
dd_count("commcare.couchsqlmigration.form.diffed")
doc_type = couch_json["doc_type"]
doc_id = couch_json["_id"]
self.add_diffs(doc_type, doc_id, diffs)
if diffs:
dd_count("commcare.couchsqlmigration.form.has_diff")
def replace_case_diffs(self, case_diffs, **kw):
diffs_by_doc = defaultdict(list)
for kind, doc_id, diffs in case_diffs:
assert all(isinstance(d.path, (list, tuple)) for d in diffs), diffs
if kind == "stock state":
case_id = doc_id.split("/", 1)[0]
diffs = [
d._replace(path={"stock_id": doc_id, "path": d.path})
for d in diffs
]
diffs_by_doc[("CommCareCase", case_id)].extend(diffs)
else:
diffs_by_doc[(kind, doc_id)].extend(diffs)
for (doc_type, case_id), diffs in diffs_by_doc.items():
self.add_diffs(doc_type, case_id, diffs, **kw)
def add_diffs(self, kind, doc_id, diffs, *, session=None, _model=None):
if _model is None:
_model = DocDiffs
to_dict = _model.diff_to_dict
assert kind != "stock state", ("stock state diffs should be "
"combined with other diffs for the same case")
if diffs:
diff_json = json.dumps([to_dict(d) for d in diffs], cls=LazyEncoder)
with self.session(session) as session:
session.execute(
f"""
REPLACE INTO {_model.__tablename__} (kind, doc_id, diffs)
VALUES (:kind, :doc_id, :diffs)
""",
[{"kind": kind, "doc_id": doc_id, "diffs": diff_json}],
)
else:
with self.session(session) as session:
session.query(_model).filter(
_model.kind == kind,
_model.doc_id == doc_id,
).delete(synchronize_session=False)
def replace_case_changes(self, changes):
self.replace_case_diffs(changes, _model=DocChanges)
def iter_diffs(self, *, _model=None):
if _model is None:
_model = DocDiffs
with self.session() as session:
for kind, in list(session.query(_model.kind).distinct()):
query = session.query(_model).filter_by(kind=kind)
for doc in iter_large(query, _model.doc_id):
for data in json.loads(doc.diffs):
yield _model.dict_to_diff(doc.kind, doc.doc_id, data)
def iter_changes(self):
return self.iter_diffs(_model=DocChanges)
def iter_doc_diffs(self, kind=None, _model=None):
"""Iterate over diffs of the given kind
"stock state" diffs cannot be queried directly with this method.
They are grouped with diffs of the corresponding case
(kind="CommCareCase", doc_id=<case_id>).
:yeilds: two-tuples `(doc_id, diffs)`. The diffs yielded here are
`PlanningDiff` objects, which should not be confused with json
diffs (`<PlanningDiff>.json_diff`).
"""
if _model is None:
_model = DocDiffs
with self.session() as session:
query = session.query(_model)
if kind is not None:
query = query.filter_by(kind=kind)
for doc in iter_large(query, _model.doc_id):
yield doc.kind, doc.doc_id, [
_model.dict_to_diff(doc.kind, doc.doc_id, data)
for data in json.loads(doc.diffs)
]
def iter_doc_changes(self, kind=None):
return self.iter_doc_diffs(kind, _model=DocChanges)
def get_diffs(self):
"""DEPRECATED use iter_diffs(); the result may be very large"""
return list(self.iter_diffs())
def set_counter(self, kind, value):
self._upsert(DocCount, DocCount.kind, kind, value)
def get_doc_counts(self):
"""Returns a dict of counts by kind
Values are `Counts` objects having `total` and `missing`
fields:
- total: number of items counted with `increment_counter`.
- missing: count of ids found in Couch but not in SQL.
- diffs: count of docs with diffs.
"""
with self.session() as session:
totals = {dc.kind: dc.value for dc in session.query(DocCount)}
diffs = dict(session.query(
DocDiffs.kind,
func.count(DocDiffs.doc_id),
).group_by(DocDiffs.kind))
missing = dict(session.query(
MissingDoc.kind,
func.count(MissingDoc.doc_id),
).group_by(MissingDoc.kind))
changes = dict(session.query(
DocChanges.kind,
func.count(DocChanges.doc_id),
).group_by(DocChanges.kind))
return {kind: Counts(
total=totals.get(kind, 0),
diffs=diffs.get(kind, 0),
missing=missing.get(kind, 0),
changes=changes.get(kind, 0),
) for kind in set(totals) | set(missing) | set(diffs)}
def iter_missing_doc_ids(self, kind):
with self.session() as session:
query = (
session.query(MissingDoc.doc_id)
.filter(MissingDoc.kind == kind)
)
yield from (x for x, in iter_large(query, MissingDoc.doc_id))
def get_diff_stats(self):
raise NotImplementedError("use get_doc_counts")
def clone_casediff_data_from(self, casediff_state_path):
"""Copy casediff state into this state db
model analysis
- CaseForms - casediff r/w
- Diff - deprecated
- KeyValue - casediff r/w, main r/w (different keys)
- DocCount - casediff w, main r
- DocDiffs - casediff w (case and stock kinds), main r/w
- DocChanges - casediff w (case and stock kinds), main r/w
- MissingDoc - casediff w, main r
- NoActionCaseForm - main r/w
- ProblemForm - main r/w
"""
def quote(value):
assert isinstance(value, str) and "'" not in value, repr(value)
return f"'{value}'"
def quotelist(values):
return f"({", ".join(quote(v) for v in values)})"
def is_id(column):
return column.key == "id" and isinstance(column.type, Integer)
def copy(model, session, where_expr=None):
log.info("copying casediff data: %s", model.__name__)
where = f"WHERE {where_expr}" if where_expr else ""
fields = ", ".join(c.key for c in model.__table__.columns if not is_id(c))
session.execute(f"DELETE FROM main.{model.__tablename__} {where}")
session.execute(f"""
INSERT INTO main.{model.__tablename__} ({fields})
SELECT {fields} FROM cddb.{model.__tablename__} {where}
""")
log.info("checking casediff data preconditions...")
casediff_db = type(self).open(self.domain, casediff_state_path)
with casediff_db.session() as cddb:
expect_casediff_kinds = {
"CommCareCase",
"CommCareCase-Deleted",
"stock state",
}
casediff_kinds = {k for k, in cddb.query(DocDiffs.kind).distinct()}
casediff_kinds.update(k for k, in cddb.query(DocChanges.kind).distinct())
assert not casediff_kinds - expect_casediff_kinds, casediff_kinds
resume_keys = [
key for key, in cddb.query(KeyValue.key)
.filter(KeyValue.key.startswith("resume-"))
]
assert all("Case" in key for key in resume_keys), resume_keys
count_kinds = [k for k, in cddb.query(DocCount.kind).distinct()]
assert all("CommCareCase" in k for k in count_kinds), count_kinds
missing_kinds = [m for m, in cddb.query(MissingDoc.kind).distinct()]
assert all("CommCareCase" in k for k in missing_kinds), missing_kinds
casediff_db.close()
with self.session() as session:
session.execute(f"ATTACH DATABASE {quote(casediff_state_path)} AS cddb")
copy(CaseForms, session)
copy(Diff, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(DocDiffs, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(DocChanges, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(KeyValue, session, f"key IN {quotelist(resume_keys)}")
copy(DocCount, session)
copy(MissingDoc, session)
def _migrate(self):
with self.session() as session:
self._migrate_diff_to_docdiffs(session)
def _migrate_diff_to_docdiffs(self, session):
if session.query(session.query(DocDiffs).exists()).scalar():
return # already migrated
if not session.query(session.query(Diff).exists()).scalar():
return # nothing to migrate
log.info("migrating PlanningDiff to DocDiffs...")
base_query = session.query(Diff).filter(Diff.kind != "stock state")
count = base_query.count()
query = base_query.order_by(Diff.kind, Diff.doc_id)
items = with_progress_bar(query, count, oneline="concise", prefix="main diffs")
for (kind, doc_id), diffs in groupby(items, lambda d: (d.kind, d.doc_id)):
diffs = [d.json_diff for d in diffs]
self.add_diffs(kind, doc_id, diffs, session=session)
# "stock state" diffs must be migrated after "CommCareCase"
# diffs since it will probably replace some of them
self._migrate_stock_state_diffs(session)
def _migrate_stock_state_diffs(self, session):
def get_case_diffs(case_id):
case_diffs = session.query(Diff).filter_by(doc_id=case_id)
return [d.json_diff for d in case_diffs]
query = session.query(Diff).filter_by(kind="stock state")
count = query.count()
stock_state_diffs = with_progress_bar(
query, count, oneline="concise", prefix="stock state cases")
diffs_by_doc = defaultdict(list)
for stock_diff in stock_state_diffs:
case_id, x, x = stock_diff.doc_id.split("/")
key = ("CommCareCase", case_id)
jsdiff = stock_diff.json_diff
stock_json_diff = jsdiff._replace(path={
"stock_id": stock_diff.doc_id,
"path": jsdiff.path,
})
if key not in diffs_by_doc:
diffs_by_doc[key].extend(get_case_diffs(case_id))
diffs_by_doc[key].append(stock_json_diff)
for (doc_type, case_id), diffs in diffs_by_doc.items():
self.add_diffs(doc_type, case_id, diffs, session=session)
def vacuum(self):
with self.session() as session:
session.execute("VACUUM")
class Error(Exception):
pass
class ResumeError(Exception):
pass
RESUME_NOT_ALLOWED = "RESUME_NOT_ALLOWED"
class CaseForms(Base):
__tablename__ = "caseforms"
case_id = Column(String(50), nullable=False, primary_key=True)
total_forms = Column(Integer, nullable=False)
processed_forms = Column(Integer, nullable=False, default=0)
class CaseToDiff(Base):
__tablename__ = 'case_to_diff'
id = Column(String(50), nullable=False, primary_key=True)
class DiffedCase(Base):
__tablename__ = 'diffed_case'
id = Column(String(50), nullable=False, primary_key=True)
class DocCount(Base):
__tablename__ = 'doc_count'
kind = Column(String(50), primary_key=True)
value = Column(Integer, nullable=False)
class DocDiffs(Base):
__tablename__ = 'doc_diffs'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
diffs = Column(Text(), nullable=False)
def diff_to_dict(diff):
data = {"type": diff.diff_type, "path": diff.path}
if diff.old_value is not MISSING:
data["old_value"] = diff.old_value
if diff.new_value is not MISSING:
data["new_value"] = diff.new_value
return data
def dict_to_diff(kind, doc_id, data, *, _make_diff=Diff):
def json_or_none(data, key):
return json.dumps(data[key]) if key in data else None
path = data["path"]
if len(path) == 2 and isinstance(path, dict):
assert path.keys() == {"stock_id", "path"}, path
assert path["stock_id"].startswith(doc_id + "/"), (doc_id, path)
kind = "stock state"
doc_id = path["stock_id"]
path = path["path"]
return _make_diff(
kind=kind,
doc_id=doc_id,
diff_type=data["type"],
path=json.dumps(path),
old_value=json_or_none(data, "old_value"),
new_value=json_or_none(data, "new_value"),
)
class DocChanges(Base):
__tablename__ = 'doc_changes'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
diffs = Column(Text(), nullable=False)
def diff_to_dict(diff):
data = DocDiffs.diff_to_dict(diff)
data["reason"] = diff.reason
return data
def dict_to_diff(kind, doc_id, data):
def change(**kw):
for key in ["path", "old_value", "new_value"]:
kw[key] = MISSING if kw[key] is None else json.loads(kw[key])
return Change(reason=data["reason"], **kw)
return DocDiffs.dict_to_diff(kind, doc_id, data, _make_diff=change)
@attr.s
class Change:
kind = attr.ib()
doc_id = attr.ib()
reason = attr.ib()
diff_type = attr.ib()
path = attr.ib()
old_value = attr.ib()
new_value = attr.ib()
@property
def json_diff(self):
return self
def _replace(self, **data):
cls = type(self)
for att in attr.fields(cls):
if att.name not in data:
data[att.name] = getattr(self, att.name)
return cls(**data)
class KeyValue(Base):
__tablename__ = "keyvalue"
key = Column(String(50), nullable=False, primary_key=True)
value = Column(Text(), nullable=False)
class MissingDoc(Base):
__tablename__ = 'missing_doc'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
class NoActionCaseForm(Base):
__tablename__ = "noactioncaseform"
id = Column(String(50), nullable=False, primary_key=True)
class ProblemForm(Base):
__tablename__ = "problemform"
id = Column(String(50), nullable=False, primary_key=True)
@attr.s
class Counts:
total = attr.ib(default=0)
diffs = attr.ib(default=0)
missing = attr.ib(default=0)
changes = attr.ib(default=0)
def iter_large(query, pk_attr, maxrq=1000):
"""Specialized windowed query generator using WHERE/LIMIT
Iterate over a dataset that is too large to fetch at once. Results
are ordered by `pk_attr`.
Adapted from https://github.com/sqlalchemy/sqlalchemy/wiki/WindowedRangeQuery
"""
first_id = None
while True:
qry = query
if first_id is not None:
qry = query.filter(pk_attr > first_id)
rec = None
for rec in qry.order_by(pk_attr).limit(maxrq):
yield rec
if rec is None:
break
first_id = getattr(rec, pk_attr.name)
| import errno
import json
import logging
import os
import os.path
from collections import defaultdict, namedtuple
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from itertools import groupby
import attr
from memoized import memoized
from sqlalchemy import (
Column,
Integer,
String,
Text,
bindparam,
func,
)
from sqlalchemy.exc import IntegrityError
from corehq.apps.hqwebapp.encoders import LazyEncoder
from corehq.apps.tzmigration.planning import Base, DiffDB, PlanningDiff as Diff
from corehq.apps.tzmigration.timezonemigration import MISSING, json_diff
from corehq.util.datadog.gauges import datadog_counter
from corehq.util.log import with_progress_bar
from .diff import filter_form_diffs
log = logging.getLogger(__name__)
def init_state_db(domain, state_dir):
db_filepath = _get_state_db_filepath(domain, state_dir)
db_dir = os.path.dirname(db_filepath)
if os.path.isdir(state_dir) and not os.path.isdir(db_dir):
os.mkdir(db_dir)
return StateDB.init(domain, db_filepath)
def open_state_db(domain, state_dir, *, readonly=True):
"""Open state db in read-only mode"""
db_filepath = _get_state_db_filepath(domain, state_dir)
if not os.path.exists(db_filepath):
raise Error(f"not found: {db_filepath}")
return StateDB.open(domain, db_filepath, readonly=readonly)
def delete_state_db(domain, state_dir):
db_filepath = _get_state_db_filepath(domain, state_dir)
try:
os.remove(db_filepath)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def _get_state_db_filepath(domain, state_dir):
return os.path.join(state_dir, "db", '{}-couch-sql.db'.format(domain))
class StateDB(DiffDB):
@classmethod
def init(cls, domain, path):
is_new_db = not os.path.exists(path)
db = super(StateDB, cls).init(domain, path)
if is_new_db:
db._set_kv("db_unique_id", datetime.utcnow().strftime("%Y%m%d-%H%M%S.%f"))
else:
db._migrate()
return db
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.is_rebuild = False
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
self.engine.dispose()
@contextmanager
def session(self, session=None):
if session is not None:
yield session
return
session = self.Session()
try:
yield session
session.commit()
finally:
session.close()
@property
@memoized
def unique_id(self):
with self.session() as session:
return self._get_kv("db_unique_id", session).value
def get(self, name, default=None):
with self.session() as session:
kv = self._get_kv(f"kv-{name}", session)
if kv is None:
return default
return json.loads(kv.value)
def set(self, name, value):
self._upsert(KeyValue, KeyValue.key, f"kv-{name}", json.dumps(value))
def update_cases(self, case_records):
"""Update case total and processed form counts
:param case_records: iterable of objects, each having the attributes:
- id: case id
- total_forms: number of forms known to update the case.
- processed_forms: number of forms updating the case that
have been processed.
:returns: list of three-tuples `(case_id, total_forms, processed_forms)`
"""
params = [
{"case": rec.id, "total": rec.total_forms, "proc": rec.processed_forms}
for rec in case_records
]
with self.session() as session:
session.execute(
"""
REPLACE INTO {table} (case_id, total_forms, processed_forms)
VALUES (
:case,
MAX(COALESCE((
SELECT total_forms
FROM {table}
WHERE case_id = :case
), 0), :total),
COALESCE((
SELECT processed_forms
FROM {table}
WHERE case_id = :case
), 0) + :proc
)
""".format(table=CaseForms.__tablename__),
params,
)
case_ids = [p["case"] for p in params]
query = session.query(CaseForms).filter(CaseForms.case_id.in_(case_ids))
result = [(c.case_id, c.total_forms, c.processed_forms) for c in query]
assert len(case_ids) == len(result), (case_ids, result)
return result
def add_processed_forms(self, cases):
"""Increment processed forms count for each of the given cases
:param cases: dict `{<case_id>: <processed_form_count>, ...}`
:returns: list of three-tuples `(case_id, total_forms, processed_forms)`
where `total_forms` is `None` for unknown cases.
"""
case_col = CaseForms.case_id
proc_col = CaseForms.processed_forms
params = [{"case": c, "proc": p} for c, p in cases.items()]
with self.session() as session:
session.execute(
CaseForms.__table__.update()
.where(case_col == bindparam("case"))
.values({proc_col: proc_col + bindparam("proc")}),
params,
)
query = session.query(CaseForms).filter(case_col.in_(cases))
case_forms = {cf.case_id: cf for cf in query}
def make_result(case_id):
case = case_forms.get(case_id)
if case is None:
return (case_id, None, None)
return (case_id, case.total_forms, case.processed_forms)
return [make_result(case_id) for case_id in cases]
def iter_cases_with_unprocessed_forms(self):
query = self.Session().query(
CaseForms.case_id,
CaseForms.total_forms,
).filter(CaseForms.total_forms > CaseForms.processed_forms)
for case_id, total_forms in iter_large(query, CaseForms.case_id):
yield case_id, total_forms
def get_forms_count(self, case_id):
with self.session() as session:
query = session.query(CaseForms.total_forms).filter_by(case_id=case_id)
return query.scalar() or 0
def add_cases_to_diff(self, case_ids):
if not case_ids:
return
with self.session() as session:
session.execute(
f"INSERT OR IGNORE INTO {CaseToDiff.__tablename__} (id) VALUES (:id)",
[{"id": x} for x in case_ids],
)
def add_diffed_cases(self, case_ids):
if not case_ids:
return
with self.session() as session:
session.execute(
f"INSERT OR IGNORE INTO {DiffedCase.__tablename__} (id) VALUES (:id)",
[{"id": x} for x in case_ids],
)
(
session.query(CaseToDiff)
.filter(CaseToDiff.id.in_(case_ids))
.delete(synchronize_session=False)
)
def iter_undiffed_case_ids(self):
query = self.Session().query(CaseToDiff.id)
for case_id, in iter_large(query, CaseToDiff.id):
yield case_id
def count_undiffed_cases(self):
with self.session() as session:
return session.query(CaseToDiff).count()
def iter_case_ids_with_diffs(self):
query = (
self.Session().query(DocDiffs.doc_id)
.filter(DocDiffs.kind == "CommCareCase")
)
for doc_id, in iter_large(query, DocDiffs.doc_id):
yield doc_id
def count_case_ids_with_diffs(self):
with self.session() as session:
return (
session.query(DocDiffs.doc_id)
.filter(DocDiffs.kind == "CommCareCase")
.count()
)
def add_problem_form(self, form_id):
"""Add form to be migrated with "unprocessed" forms
A "problem" form is an error form with normal doctype (XFormInstance)
"""
with self.session() as session:
session.add(ProblemForm(id=form_id))
def iter_problem_forms(self):
query = self.Session().query(ProblemForm.id)
for form_id, in iter_large(query, ProblemForm.id):
yield form_id
def add_no_action_case_form(self, form_id):
try:
with self.session() as session:
session.add(NoActionCaseForm(id=form_id))
except IntegrityError:
pass
else:
self.get_no_action_case_forms.reset_cache(self)
@memoized
def get_no_action_case_forms(self):
"""Get the set of form ids that touch cases without actions"""
return {x for x, in self.Session().query(NoActionCaseForm.id)}
def set_resume_state(self, key, value):
resume_key = "resume-{}".format(key)
self._upsert(KeyValue, KeyValue.key, resume_key, json.dumps(value))
@contextmanager
def pop_resume_state(self, key, default):
resume_key = "resume-{}".format(key)
with self.session() as session:
kv = self._get_kv(resume_key, session)
if kv is None:
self._set_kv(resume_key, RESUME_NOT_ALLOWED, session)
yield default
elif self.is_rebuild:
yield default
elif kv.value == RESUME_NOT_ALLOWED:
raise ResumeError("previous session did not save resume state")
else:
yield json.loads(kv.value)
kv.value = RESUME_NOT_ALLOWED
def _get_kv(self, key, session):
return session.query(KeyValue).get(key)
def _set_kv(self, key, value, session=None):
with self.session(session) as session:
session.add(KeyValue(key=key, value=value))
def _upsert(self, model, key_field, key, value, incr=False):
with self.session() as session:
updated = (
session.query(model)
.filter(key_field == key)
.update(
{model.value: (model.value + value) if incr else value},
synchronize_session=False,
)
)
if not updated:
obj = model(value=value)
key_field.__set__(obj, key)
session.add(obj)
else:
assert updated == 1, (key, updated)
def add_missing_docs(self, kind, doc_ids):
with self.session() as session:
session.bulk_save_objects([
MissingDoc(kind=kind, doc_id=doc_id)
for doc_id in doc_ids
])
def delete_missing_docs(self, kind):
with self.session() as session:
(
session.query(MissingDoc)
.filter_by(kind=kind)
.delete(synchronize_session=False)
)
def doc_not_missing(self, kind, doc_id):
with self.session() as session:
(
session.query(MissingDoc.doc_id)
.filter_by(kind=kind, doc_id=doc_id)
.delete(synchronize_session=False)
)
def save_form_diffs(self, couch_json, sql_json):
diffs = json_diff(couch_json, sql_json, track_list_indices=False)
diffs = filter_form_diffs(couch_json, sql_json, diffs)
dd_count = partial(datadog_counter, tags=["domain:" + self.domain])
dd_count("commcare.couchsqlmigration.form.diffed")
doc_type = couch_json["doc_type"]
doc_id = couch_json["_id"]
self.add_diffs(doc_type, doc_id, diffs)
if diffs:
dd_count("commcare.couchsqlmigration.form.has_diff")
def replace_case_diffs(self, case_diffs, **kw):
diffs_by_doc = defaultdict(list)
for kind, doc_id, diffs in case_diffs:
assert all(isinstance(d.path, (list, tuple)) for d in diffs), diffs
if kind == "stock state":
case_id = doc_id.split("/", 1)[0]
diffs = [
d._replace(path={"stock_id": doc_id, "path": d.path})
for d in diffs
]
diffs_by_doc[("CommCareCase", case_id)].extend(diffs)
else:
diffs_by_doc[(kind, doc_id)].extend(diffs)
for (doc_type, case_id), diffs in diffs_by_doc.items():
self.add_diffs(doc_type, case_id, diffs, **kw)
def add_diffs(self, kind, doc_id, diffs, *, session=None, _model=None):
if _model is None:
_model = DocDiffs
to_dict = _model.diff_to_dict
assert kind != "stock state", ("stock state diffs should be "
"combined with other diffs for the same case")
if diffs:
diff_json = json.dumps([to_dict(d) for d in diffs], cls=LazyEncoder)
with self.session(session) as session:
session.execute(
f"""
REPLACE INTO {_model.__tablename__} (kind, doc_id, diffs)
VALUES (:kind, :doc_id, :diffs)
""",
[{"kind": kind, "doc_id": doc_id, "diffs": diff_json}],
)
else:
with self.session(session) as session:
session.query(_model).filter(
_model.kind == kind,
_model.doc_id == doc_id,
).delete(synchronize_session=False)
def replace_case_changes(self, changes):
self.replace_case_diffs(changes, _model=DocChanges)
def iter_diffs(self, *, _model=None):
if _model is None:
_model = DocDiffs
with self.session() as session:
for kind, in list(session.query(_model.kind).distinct()):
query = session.query(_model).filter_by(kind=kind)
for doc in iter_large(query, _model.doc_id):
for data in json.loads(doc.diffs):
yield _model.dict_to_diff(doc.kind, doc.doc_id, data)
def iter_changes(self):
return self.iter_diffs(_model=DocChanges)
def iter_doc_diffs(self, kind=None, _model=None):
"""Iterate over diffs of the given kind
"stock state" diffs cannot be queried directly with this method.
They are grouped with diffs of the corresponding case
(kind="CommCareCase", doc_id=<case_id>).
:yeilds: two-tuples `(doc_id, diffs)`. The diffs yielded here are
`PlanningDiff` objects, which should not be confused with json
diffs (`<PlanningDiff>.json_diff`).
"""
if _model is None:
_model = DocDiffs
with self.session() as session:
query = session.query(_model)
if kind is not None:
query = query.filter_by(kind=kind)
for doc in iter_large(query, _model.doc_id):
yield doc.kind, doc.doc_id, [
_model.dict_to_diff(doc.kind, doc.doc_id, data)
for data in json.loads(doc.diffs)
]
def iter_doc_changes(self, kind=None):
return self.iter_doc_diffs(kind, _model=DocChanges)
def get_diffs(self):
"""DEPRECATED use iter_diffs(); the result may be very large"""
return list(self.iter_diffs())
def set_counter(self, kind, value):
self._upsert(DocCount, DocCount.kind, kind, value)
def get_doc_counts(self):
"""Returns a dict of counts by kind
Values are `Counts` objects having `total` and `missing`
fields:
- total: number of items counted with `increment_counter`.
- missing: count of ids found in Couch but not in SQL.
- diffs: count of docs with diffs.
"""
with self.session() as session:
totals = {dc.kind: dc.value for dc in session.query(DocCount)}
diffs = dict(session.query(
DocDiffs.kind,
func.count(DocDiffs.doc_id),
).group_by(DocDiffs.kind))
missing = dict(session.query(
MissingDoc.kind,
func.count(MissingDoc.doc_id),
).group_by(MissingDoc.kind))
changes = dict(session.query(
DocChanges.kind,
func.count(DocChanges.doc_id),
).group_by(DocChanges.kind))
return {kind: Counts(
total=totals.get(kind, 0),
diffs=diffs.get(kind, 0),
missing=missing.get(kind, 0),
changes=changes.get(kind, 0),
) for kind in set(totals) | set(missing) | set(diffs)}
def iter_missing_doc_ids(self, kind):
with self.session() as session:
query = (
session.query(MissingDoc.doc_id)
.filter(MissingDoc.kind == kind)
)
yield from (x for x, in iter_large(query, MissingDoc.doc_id))
def get_diff_stats(self):
raise NotImplementedError("use get_doc_counts")
def clone_casediff_data_from(self, casediff_state_path):
"""Copy casediff state into this state db
model analysis
- CaseForms - casediff r/w
- Diff - deprecated
- KeyValue - casediff r/w, main r/w (different keys)
- DocCount - casediff w, main r
- DocDiffs - casediff w (case and stock kinds), main r/w
- DocChanges - casediff w (case and stock kinds), main r/w
- MissingDoc - casediff w, main r
- NoActionCaseForm - main r/w
- ProblemForm - main r/w
"""
def quote(value):
assert isinstance(value, str) and "'" not in value, repr(value)
return f"'{value}'"
def quotelist(values):
return f"({', '.join(quote(v) for v in values)})"
def is_id(column):
return column.key == "id" and isinstance(column.type, Integer)
def copy(model, session, where_expr=None):
log.info("copying casediff data: %s", model.__name__)
where = f"WHERE {where_expr}" if where_expr else ""
fields = ", ".join(c.key for c in model.__table__.columns if not is_id(c))
session.execute(f"DELETE FROM main.{model.__tablename__} {where}")
session.execute(f"""
INSERT INTO main.{model.__tablename__} ({fields})
SELECT {fields} FROM cddb.{model.__tablename__} {where}
""")
log.info("checking casediff data preconditions...")
casediff_db = type(self).open(self.domain, casediff_state_path)
with casediff_db.session() as cddb:
expect_casediff_kinds = {
"CommCareCase",
"CommCareCase-Deleted",
"stock state",
}
casediff_kinds = {k for k, in cddb.query(DocDiffs.kind).distinct()}
casediff_kinds.update(k for k, in cddb.query(DocChanges.kind).distinct())
assert not casediff_kinds - expect_casediff_kinds, casediff_kinds
resume_keys = [
key for key, in cddb.query(KeyValue.key)
.filter(KeyValue.key.startswith("resume-"))
]
assert all("Case" in key for key in resume_keys), resume_keys
count_kinds = [k for k, in cddb.query(DocCount.kind).distinct()]
assert all("CommCareCase" in k for k in count_kinds), count_kinds
missing_kinds = [m for m, in cddb.query(MissingDoc.kind).distinct()]
assert all("CommCareCase" in k for k in missing_kinds), missing_kinds
casediff_db.close()
with self.session() as session:
session.execute(f"ATTACH DATABASE {quote(casediff_state_path)} AS cddb")
copy(CaseForms, session)
copy(Diff, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(DocDiffs, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(DocChanges, session, f"kind IN {quotelist(expect_casediff_kinds)}")
copy(KeyValue, session, f"key IN {quotelist(resume_keys)}")
copy(DocCount, session)
copy(MissingDoc, session)
def _migrate(self):
with self.session() as session:
self._migrate_diff_to_docdiffs(session)
def _migrate_diff_to_docdiffs(self, session):
if session.query(session.query(DocDiffs).exists()).scalar():
return # already migrated
if not session.query(session.query(Diff).exists()).scalar():
return # nothing to migrate
log.info("migrating PlanningDiff to DocDiffs...")
base_query = session.query(Diff).filter(Diff.kind != "stock state")
count = base_query.count()
query = base_query.order_by(Diff.kind, Diff.doc_id)
items = with_progress_bar(query, count, oneline="concise", prefix="main diffs")
for (kind, doc_id), diffs in groupby(items, lambda d: (d.kind, d.doc_id)):
diffs = [d.json_diff for d in diffs]
self.add_diffs(kind, doc_id, diffs, session=session)
# "stock state" diffs must be migrated after "CommCareCase"
# diffs since it will probably replace some of them
self._migrate_stock_state_diffs(session)
def _migrate_stock_state_diffs(self, session):
def get_case_diffs(case_id):
case_diffs = session.query(Diff).filter_by(doc_id=case_id)
return [d.json_diff for d in case_diffs]
query = session.query(Diff).filter_by(kind="stock state")
count = query.count()
stock_state_diffs = with_progress_bar(
query, count, oneline="concise", prefix="stock state cases")
diffs_by_doc = defaultdict(list)
for stock_diff in stock_state_diffs:
case_id, x, x = stock_diff.doc_id.split("/")
key = ("CommCareCase", case_id)
jsdiff = stock_diff.json_diff
stock_json_diff = jsdiff._replace(path={
"stock_id": stock_diff.doc_id,
"path": jsdiff.path,
})
if key not in diffs_by_doc:
diffs_by_doc[key].extend(get_case_diffs(case_id))
diffs_by_doc[key].append(stock_json_diff)
for (doc_type, case_id), diffs in diffs_by_doc.items():
self.add_diffs(doc_type, case_id, diffs, session=session)
def vacuum(self):
with self.session() as session:
session.execute("VACUUM")
class Error(Exception):
pass
class ResumeError(Exception):
pass
RESUME_NOT_ALLOWED = "RESUME_NOT_ALLOWED"
class CaseForms(Base):
__tablename__ = "caseforms"
case_id = Column(String(50), nullable=False, primary_key=True)
total_forms = Column(Integer, nullable=False)
processed_forms = Column(Integer, nullable=False, default=0)
class CaseToDiff(Base):
__tablename__ = 'case_to_diff'
id = Column(String(50), nullable=False, primary_key=True)
class DiffedCase(Base):
__tablename__ = 'diffed_case'
id = Column(String(50), nullable=False, primary_key=True)
class DocCount(Base):
__tablename__ = 'doc_count'
kind = Column(String(50), primary_key=True)
value = Column(Integer, nullable=False)
class DocDiffs(Base):
__tablename__ = 'doc_diffs'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
diffs = Column(Text(), nullable=False)
def diff_to_dict(diff):
data = {"type": diff.diff_type, "path": diff.path}
if diff.old_value is not MISSING:
data["old_value"] = diff.old_value
if diff.new_value is not MISSING:
data["new_value"] = diff.new_value
return data
def dict_to_diff(kind, doc_id, data, *, _make_diff=Diff):
def json_or_none(data, key):
return json.dumps(data[key]) if key in data else None
path = data["path"]
if len(path) == 2 and isinstance(path, dict):
assert path.keys() == {"stock_id", "path"}, path
assert path["stock_id"].startswith(doc_id + "/"), (doc_id, path)
kind = "stock state"
doc_id = path["stock_id"]
path = path["path"]
return _make_diff(
kind=kind,
doc_id=doc_id,
diff_type=data["type"],
path=json.dumps(path),
old_value=json_or_none(data, "old_value"),
new_value=json_or_none(data, "new_value"),
)
class DocChanges(Base):
__tablename__ = 'doc_changes'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
diffs = Column(Text(), nullable=False)
def diff_to_dict(diff):
data = DocDiffs.diff_to_dict(diff)
data["reason"] = diff.reason
return data
def dict_to_diff(kind, doc_id, data):
def change(**kw):
for key in ["path", "old_value", "new_value"]:
kw[key] = MISSING if kw[key] is None else json.loads(kw[key])
return Change(reason=data["reason"], **kw)
return DocDiffs.dict_to_diff(kind, doc_id, data, _make_diff=change)
@attr.s
class Change:
kind = attr.ib()
doc_id = attr.ib()
reason = attr.ib()
diff_type = attr.ib()
path = attr.ib()
old_value = attr.ib()
new_value = attr.ib()
@property
def json_diff(self):
return self
def _replace(self, **data):
cls = type(self)
for att in attr.fields(cls):
if att.name not in data:
data[att.name] = getattr(self, att.name)
return cls(**data)
class KeyValue(Base):
__tablename__ = "keyvalue"
key = Column(String(50), nullable=False, primary_key=True)
value = Column(Text(), nullable=False)
class MissingDoc(Base):
__tablename__ = 'missing_doc'
kind = Column(String(50), nullable=False, primary_key=True)
doc_id = Column(String(50), nullable=False, primary_key=True)
class NoActionCaseForm(Base):
__tablename__ = "noactioncaseform"
id = Column(String(50), nullable=False, primary_key=True)
class ProblemForm(Base):
__tablename__ = "problemform"
id = Column(String(50), nullable=False, primary_key=True)
@attr.s
class Counts:
total = attr.ib(default=0)
diffs = attr.ib(default=0)
missing = attr.ib(default=0)
changes = attr.ib(default=0)
def iter_large(query, pk_attr, maxrq=1000):
"""Specialized windowed query generator using WHERE/LIMIT
Iterate over a dataset that is too large to fetch at once. Results
are ordered by `pk_attr`.
Adapted from https://github.com/sqlalchemy/sqlalchemy/wiki/WindowedRangeQuery
"""
first_id = None
while True:
qry = query
if first_id is not None:
qry = query.filter(pk_attr > first_id)
rec = None
for rec in qry.order_by(pk_attr).limit(maxrq):
yield rec
if rec is None:
break
first_id = getattr(rec, pk_attr.name)
|
import os
import contextlib
import tarfile
import json
import numpy as np
import PIL
import torch
from common_utils import get_tmp_dir
import pickle
import random
from itertools import cycle
from torchvision.io.video import write_video
import unittest.mock
import hashlib
from distutils import dir_util
import re
def mock_class_attribute(stack, target, new):
mock = unittest.mock.patch(target, new_callable=unittest.mock.PropertyMock, return_value=new)
stack.enter_context(mock)
return mock
def compute_md5(file):
with open(file, "rb") as fh:
return hashlib.md5(fh.read()).hexdigest()
def make_tar(root, name, *files, compression=None):
ext = ".tar"
mode = "w"
if compression is not None:
ext = f"{ext}.{compression}"
mode = f"{mode}:{compression}"
name = os.path.splitext(name)[0] + ext
archive = os.path.join(root, name)
with tarfile.open(archive, mode) as fh:
for file in files:
fh.add(os.path.join(root, file), arcname=file)
return name, compute_md5(archive)
def clean_dir(root, *keep):
pattern = re.compile(f"({f")|(".join(keep)})")
for file_or_dir in os.listdir(root):
if pattern.search(file_or_dir):
continue
file_or_dir = os.path.join(root, file_or_dir)
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
dir_util.remove_tree(file_or_dir)
@contextlib.contextmanager
def mnist_root(num_images, cls_name):
def _encode(v):
return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1]
def _make_image_file(filename, num_images):
img = torch.randint(0, 256, size=(28 * 28 * num_images,), dtype=torch.uint8)
with open(filename, "wb") as f:
f.write(_encode(2051)) # magic header
f.write(_encode(num_images))
f.write(_encode(28))
f.write(_encode(28))
f.write(img.numpy().tobytes())
def _make_label_file(filename, num_images):
labels = torch.zeros((num_images,), dtype=torch.uint8)
with open(filename, "wb") as f:
f.write(_encode(2049)) # magic header
f.write(_encode(num_images))
f.write(labels.numpy().tobytes())
with get_tmp_dir() as tmp_dir:
raw_dir = os.path.join(tmp_dir, cls_name, "raw")
os.makedirs(raw_dir)
_make_image_file(os.path.join(raw_dir, "train-images-idx3-ubyte"), num_images)
_make_label_file(os.path.join(raw_dir, "train-labels-idx1-ubyte"), num_images)
_make_image_file(os.path.join(raw_dir, "t10k-images-idx3-ubyte"), num_images)
_make_label_file(os.path.join(raw_dir, "t10k-labels-idx1-ubyte"), num_images)
yield tmp_dir
@contextlib.contextmanager
def cifar_root(version):
def _get_version_params(version):
if version == 'CIFAR10':
return {
'base_folder': 'cifar-10-batches-py',
'train_files': ['data_batch_{}'.format(batch) for batch in range(1, 6)],
'test_file': 'test_batch',
'target_key': 'labels',
'meta_file': 'batches.meta',
'classes_key': 'label_names',
}
elif version == 'CIFAR100':
return {
'base_folder': 'cifar-100-python',
'train_files': ['train'],
'test_file': 'test',
'target_key': 'fine_labels',
'meta_file': 'meta',
'classes_key': 'fine_label_names',
}
else:
raise ValueError
def _make_pickled_file(obj, file):
with open(file, 'wb') as fh:
pickle.dump(obj, fh, 2)
def _make_data_file(file, target_key):
obj = {
'data': np.zeros((1, 32 * 32 * 3), dtype=np.uint8),
target_key: [0]
}
_make_pickled_file(obj, file)
def _make_meta_file(file, classes_key):
obj = {
classes_key: ['fakedata'],
}
_make_pickled_file(obj, file)
params = _get_version_params(version)
with get_tmp_dir() as root:
base_folder = os.path.join(root, params['base_folder'])
os.mkdir(base_folder)
for file in list(params['train_files']) + [params['test_file']]:
_make_data_file(os.path.join(base_folder, file), params['target_key'])
_make_meta_file(os.path.join(base_folder, params['meta_file']),
params['classes_key'])
yield root
@contextlib.contextmanager
def widerface_root():
"""
Generates a dataset with the following folder structure and returns the path root:
<root>
└── widerface
├── wider_face_split
├── WIDER_train
├── WIDER_val
└── WIDER_test
The dataset consist of
1 image for each dataset split (train, val, test) and annotation files
for each split
"""
def _make_image(file):
PIL.Image.fromarray(np.zeros((32, 32, 3), dtype=np.uint8)).save(file)
def _make_train_archive(root):
extracted_dir = os.path.join(root, 'WIDER_train', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_1.jpg'))
def _make_val_archive(root):
extracted_dir = os.path.join(root, 'WIDER_val', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_2.jpg'))
def _make_test_archive(root):
extracted_dir = os.path.join(root, 'WIDER_test', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_3.jpg'))
def _make_annotations_archive(root):
train_bbox_contents = '0--Parade/0_Parade_marchingband_1_1.jpg\n1\n449 330 122 149 0 0 0 0 0 0\n'
val_bbox_contents = '0--Parade/0_Parade_marchingband_1_2.jpg\n1\n501 160 285 443 0 0 0 0 0 0\n'
test_filelist_contents = '0--Parade/0_Parade_marchingband_1_3.jpg\n'
extracted_dir = os.path.join(root, 'wider_face_split')
os.mkdir(extracted_dir)
# bbox training file
bbox_file = os.path.join(extracted_dir, "wider_face_train_bbx_gt.txt")
with open(bbox_file, "w") as txt_file:
txt_file.write(train_bbox_contents)
# bbox validation file
bbox_file = os.path.join(extracted_dir, "wider_face_val_bbx_gt.txt")
with open(bbox_file, "w") as txt_file:
txt_file.write(val_bbox_contents)
# test filelist file
filelist_file = os.path.join(extracted_dir, "wider_face_test_filelist.txt")
with open(filelist_file, "w") as txt_file:
txt_file.write(test_filelist_contents)
with get_tmp_dir() as root:
root_base = os.path.join(root, "widerface")
os.mkdir(root_base)
_make_train_archive(root_base)
_make_val_archive(root_base)
_make_test_archive(root_base)
_make_annotations_archive(root_base)
yield root
@contextlib.contextmanager
def places365_root(split="train-standard", small=False):
VARIANTS = {
"train-standard": "standard",
"train-challenge": "challenge",
"val": "standard",
}
# {split: file}
DEVKITS = {
"train-standard": "filelist_places365-standard.tar",
"train-challenge": "filelist_places365-challenge.tar",
"val": "filelist_places365-standard.tar",
}
CATEGORIES = "categories_places365.txt"
# {split: file}
FILE_LISTS = {
"train-standard": "places365_train_standard.txt",
"train-challenge": "places365_train_challenge.txt",
"val": "places365_train_standard.txt",
}
# {(split, small): (archive, folder_default, folder_renamed)}
IMAGES = {
("train-standard", False): ("train_large_places365standard.tar", "data_large", "data_large_standard"),
("train-challenge", False): ("train_large_places365challenge.tar", "data_large", "data_large_challenge"),
("val", False): ("val_large.tar", "val_large", "val_large"),
("train-standard", True): ("train_256_places365standard.tar", "data_256", "data_256_standard"),
("train-challenge", True): ("train_256_places365challenge.tar", "data_256", "data_256_challenge"),
("val", True): ("val_256.tar", "val_256", "val_256"),
}
# (class, idx)
CATEGORIES_CONTENT = (("/a/airfield", 0), ("/a/apartment_building/outdoor", 8), ("/b/badlands", 30))
# (file, idx)
FILE_LIST_CONTENT = (
("Places365_val_00000001.png", 0),
*((f"{category}/Places365_train_00000001.png", idx) for category, idx in CATEGORIES_CONTENT),
)
def mock_target(attr, partial="torchvision.datasets.places365.Places365"):
return f"{partial}.{attr}"
def make_txt(root, name, seq):
file = os.path.join(root, name)
with open(file, "w") as fh:
for string, idx in seq:
fh.write(f"{string} {idx}\n")
return name, compute_md5(file)
def make_categories_txt(root, name):
return make_txt(root, name, CATEGORIES_CONTENT)
def make_file_list_txt(root, name):
return make_txt(root, name, FILE_LIST_CONTENT)
def make_image(file, size):
os.makedirs(os.path.dirname(file), exist_ok=True)
PIL.Image.fromarray(np.zeros((*size, 3), dtype=np.uint8)).save(file)
def make_devkit_archive(stack, root, split):
archive = DEVKITS[split]
files = []
meta = make_categories_txt(root, CATEGORIES)
mock_class_attribute(stack, mock_target("_CATEGORIES_META"), meta)
files.append(meta[0])
meta = {split: make_file_list_txt(root, FILE_LISTS[split])}
mock_class_attribute(stack, mock_target("_FILE_LIST_META"), meta)
files.extend([item[0] for item in meta.values()])
meta = {VARIANTS[split]: make_tar(root, archive, *files)}
mock_class_attribute(stack, mock_target("_DEVKIT_META"), meta)
def make_images_archive(stack, root, split, small):
archive, folder_default, folder_renamed = IMAGES[(split, small)]
image_size = (256, 256) if small else (512, random.randint(512, 1024))
files, idcs = zip(*FILE_LIST_CONTENT)
images = [file.lstrip("/").replace("/", os.sep) for file in files]
for image in images:
make_image(os.path.join(root, folder_default, image), image_size)
meta = {(split, small): make_tar(root, archive, folder_default)}
mock_class_attribute(stack, mock_target("_IMAGES_META"), meta)
return [(os.path.join(root, folder_renamed, image), idx) for image, idx in zip(images, idcs)]
with contextlib.ExitStack() as stack, get_tmp_dir() as root:
make_devkit_archive(stack, root, split)
class_to_idx = dict(CATEGORIES_CONTENT)
classes = list(class_to_idx.keys())
data = {"class_to_idx": class_to_idx, "classes": classes}
data["imgs"] = make_images_archive(stack, root, split, small)
clean_dir(root, ".tar$")
yield root, data
| import os
import contextlib
import tarfile
import json
import numpy as np
import PIL
import torch
from common_utils import get_tmp_dir
import pickle
import random
from itertools import cycle
from torchvision.io.video import write_video
import unittest.mock
import hashlib
from distutils import dir_util
import re
def mock_class_attribute(stack, target, new):
mock = unittest.mock.patch(target, new_callable=unittest.mock.PropertyMock, return_value=new)
stack.enter_context(mock)
return mock
def compute_md5(file):
with open(file, "rb") as fh:
return hashlib.md5(fh.read()).hexdigest()
def make_tar(root, name, *files, compression=None):
ext = ".tar"
mode = "w"
if compression is not None:
ext = f"{ext}.{compression}"
mode = f"{mode}:{compression}"
name = os.path.splitext(name)[0] + ext
archive = os.path.join(root, name)
with tarfile.open(archive, mode) as fh:
for file in files:
fh.add(os.path.join(root, file), arcname=file)
return name, compute_md5(archive)
def clean_dir(root, *keep):
pattern = re.compile(f"({f')|('.join(keep)})")
for file_or_dir in os.listdir(root):
if pattern.search(file_or_dir):
continue
file_or_dir = os.path.join(root, file_or_dir)
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
dir_util.remove_tree(file_or_dir)
@contextlib.contextmanager
def mnist_root(num_images, cls_name):
def _encode(v):
return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1]
def _make_image_file(filename, num_images):
img = torch.randint(0, 256, size=(28 * 28 * num_images,), dtype=torch.uint8)
with open(filename, "wb") as f:
f.write(_encode(2051)) # magic header
f.write(_encode(num_images))
f.write(_encode(28))
f.write(_encode(28))
f.write(img.numpy().tobytes())
def _make_label_file(filename, num_images):
labels = torch.zeros((num_images,), dtype=torch.uint8)
with open(filename, "wb") as f:
f.write(_encode(2049)) # magic header
f.write(_encode(num_images))
f.write(labels.numpy().tobytes())
with get_tmp_dir() as tmp_dir:
raw_dir = os.path.join(tmp_dir, cls_name, "raw")
os.makedirs(raw_dir)
_make_image_file(os.path.join(raw_dir, "train-images-idx3-ubyte"), num_images)
_make_label_file(os.path.join(raw_dir, "train-labels-idx1-ubyte"), num_images)
_make_image_file(os.path.join(raw_dir, "t10k-images-idx3-ubyte"), num_images)
_make_label_file(os.path.join(raw_dir, "t10k-labels-idx1-ubyte"), num_images)
yield tmp_dir
@contextlib.contextmanager
def cifar_root(version):
def _get_version_params(version):
if version == 'CIFAR10':
return {
'base_folder': 'cifar-10-batches-py',
'train_files': ['data_batch_{}'.format(batch) for batch in range(1, 6)],
'test_file': 'test_batch',
'target_key': 'labels',
'meta_file': 'batches.meta',
'classes_key': 'label_names',
}
elif version == 'CIFAR100':
return {
'base_folder': 'cifar-100-python',
'train_files': ['train'],
'test_file': 'test',
'target_key': 'fine_labels',
'meta_file': 'meta',
'classes_key': 'fine_label_names',
}
else:
raise ValueError
def _make_pickled_file(obj, file):
with open(file, 'wb') as fh:
pickle.dump(obj, fh, 2)
def _make_data_file(file, target_key):
obj = {
'data': np.zeros((1, 32 * 32 * 3), dtype=np.uint8),
target_key: [0]
}
_make_pickled_file(obj, file)
def _make_meta_file(file, classes_key):
obj = {
classes_key: ['fakedata'],
}
_make_pickled_file(obj, file)
params = _get_version_params(version)
with get_tmp_dir() as root:
base_folder = os.path.join(root, params['base_folder'])
os.mkdir(base_folder)
for file in list(params['train_files']) + [params['test_file']]:
_make_data_file(os.path.join(base_folder, file), params['target_key'])
_make_meta_file(os.path.join(base_folder, params['meta_file']),
params['classes_key'])
yield root
@contextlib.contextmanager
def widerface_root():
"""
Generates a dataset with the following folder structure and returns the path root:
<root>
└── widerface
├── wider_face_split
├── WIDER_train
├── WIDER_val
└── WIDER_test
The dataset consist of
1 image for each dataset split (train, val, test) and annotation files
for each split
"""
def _make_image(file):
PIL.Image.fromarray(np.zeros((32, 32, 3), dtype=np.uint8)).save(file)
def _make_train_archive(root):
extracted_dir = os.path.join(root, 'WIDER_train', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_1.jpg'))
def _make_val_archive(root):
extracted_dir = os.path.join(root, 'WIDER_val', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_2.jpg'))
def _make_test_archive(root):
extracted_dir = os.path.join(root, 'WIDER_test', 'images', '0--Parade')
os.makedirs(extracted_dir)
_make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_3.jpg'))
def _make_annotations_archive(root):
train_bbox_contents = '0--Parade/0_Parade_marchingband_1_1.jpg\n1\n449 330 122 149 0 0 0 0 0 0\n'
val_bbox_contents = '0--Parade/0_Parade_marchingband_1_2.jpg\n1\n501 160 285 443 0 0 0 0 0 0\n'
test_filelist_contents = '0--Parade/0_Parade_marchingband_1_3.jpg\n'
extracted_dir = os.path.join(root, 'wider_face_split')
os.mkdir(extracted_dir)
# bbox training file
bbox_file = os.path.join(extracted_dir, "wider_face_train_bbx_gt.txt")
with open(bbox_file, "w") as txt_file:
txt_file.write(train_bbox_contents)
# bbox validation file
bbox_file = os.path.join(extracted_dir, "wider_face_val_bbx_gt.txt")
with open(bbox_file, "w") as txt_file:
txt_file.write(val_bbox_contents)
# test filelist file
filelist_file = os.path.join(extracted_dir, "wider_face_test_filelist.txt")
with open(filelist_file, "w") as txt_file:
txt_file.write(test_filelist_contents)
with get_tmp_dir() as root:
root_base = os.path.join(root, "widerface")
os.mkdir(root_base)
_make_train_archive(root_base)
_make_val_archive(root_base)
_make_test_archive(root_base)
_make_annotations_archive(root_base)
yield root
@contextlib.contextmanager
def places365_root(split="train-standard", small=False):
VARIANTS = {
"train-standard": "standard",
"train-challenge": "challenge",
"val": "standard",
}
# {split: file}
DEVKITS = {
"train-standard": "filelist_places365-standard.tar",
"train-challenge": "filelist_places365-challenge.tar",
"val": "filelist_places365-standard.tar",
}
CATEGORIES = "categories_places365.txt"
# {split: file}
FILE_LISTS = {
"train-standard": "places365_train_standard.txt",
"train-challenge": "places365_train_challenge.txt",
"val": "places365_train_standard.txt",
}
# {(split, small): (archive, folder_default, folder_renamed)}
IMAGES = {
("train-standard", False): ("train_large_places365standard.tar", "data_large", "data_large_standard"),
("train-challenge", False): ("train_large_places365challenge.tar", "data_large", "data_large_challenge"),
("val", False): ("val_large.tar", "val_large", "val_large"),
("train-standard", True): ("train_256_places365standard.tar", "data_256", "data_256_standard"),
("train-challenge", True): ("train_256_places365challenge.tar", "data_256", "data_256_challenge"),
("val", True): ("val_256.tar", "val_256", "val_256"),
}
# (class, idx)
CATEGORIES_CONTENT = (("/a/airfield", 0), ("/a/apartment_building/outdoor", 8), ("/b/badlands", 30))
# (file, idx)
FILE_LIST_CONTENT = (
("Places365_val_00000001.png", 0),
*((f"{category}/Places365_train_00000001.png", idx) for category, idx in CATEGORIES_CONTENT),
)
def mock_target(attr, partial="torchvision.datasets.places365.Places365"):
return f"{partial}.{attr}"
def make_txt(root, name, seq):
file = os.path.join(root, name)
with open(file, "w") as fh:
for string, idx in seq:
fh.write(f"{string} {idx}\n")
return name, compute_md5(file)
def make_categories_txt(root, name):
return make_txt(root, name, CATEGORIES_CONTENT)
def make_file_list_txt(root, name):
return make_txt(root, name, FILE_LIST_CONTENT)
def make_image(file, size):
os.makedirs(os.path.dirname(file), exist_ok=True)
PIL.Image.fromarray(np.zeros((*size, 3), dtype=np.uint8)).save(file)
def make_devkit_archive(stack, root, split):
archive = DEVKITS[split]
files = []
meta = make_categories_txt(root, CATEGORIES)
mock_class_attribute(stack, mock_target("_CATEGORIES_META"), meta)
files.append(meta[0])
meta = {split: make_file_list_txt(root, FILE_LISTS[split])}
mock_class_attribute(stack, mock_target("_FILE_LIST_META"), meta)
files.extend([item[0] for item in meta.values()])
meta = {VARIANTS[split]: make_tar(root, archive, *files)}
mock_class_attribute(stack, mock_target("_DEVKIT_META"), meta)
def make_images_archive(stack, root, split, small):
archive, folder_default, folder_renamed = IMAGES[(split, small)]
image_size = (256, 256) if small else (512, random.randint(512, 1024))
files, idcs = zip(*FILE_LIST_CONTENT)
images = [file.lstrip("/").replace("/", os.sep) for file in files]
for image in images:
make_image(os.path.join(root, folder_default, image), image_size)
meta = {(split, small): make_tar(root, archive, folder_default)}
mock_class_attribute(stack, mock_target("_IMAGES_META"), meta)
return [(os.path.join(root, folder_renamed, image), idx) for image, idx in zip(images, idcs)]
with contextlib.ExitStack() as stack, get_tmp_dir() as root:
make_devkit_archive(stack, root, split)
class_to_idx = dict(CATEGORIES_CONTENT)
classes = list(class_to_idx.keys())
data = {"class_to_idx": class_to_idx, "classes": classes}
data["imgs"] = make_images_archive(stack, root, split, small)
clean_dir(root, ".tar$")
yield root, data
|
import json, requests, datetime
item = 'HOT_POTATO_BOOK'
r = requests.get(f"https://api.hypixel.net/skyblock/bazaar/product?key=7e8355c8-a50b-4473-ba41-b03d0473a0d8&productId={item}").json()
for i in r['product_info']['week_historic']:
time = datetime.datetime.fromtimestamp(i['timestamp']/1000).strftime("%a, %H:%M")
print(f"{time} ---> Sells: {i["sells"]:,} >>> Buys: {i["buys"]:,}") | import json, requests, datetime
item = 'HOT_POTATO_BOOK'
r = requests.get(f"https://api.hypixel.net/skyblock/bazaar/product?key=7e8355c8-a50b-4473-ba41-b03d0473a0d8&productId={item}").json()
for i in r['product_info']['week_historic']:
time = datetime.datetime.fromtimestamp(i['timestamp']/1000).strftime("%a, %H:%M")
print(f"{time} ---> Sells: {i['sells']:,} >>> Buys: {i['buys']:,}") |
"""Definition and setup of the SpaceX Binary Sensors for Home Assistant."""
import logging
import time
import datetime
from homeassistant.util.dt import as_local, utc_from_timestamp
from homeassistant.components.sensor import ENTITY_ID_FORMAT, DEVICE_CLASS_TIMESTAMP
from homeassistant.const import LENGTH_KILOMETERS, SPEED_KILOMETERS_PER_HOUR, ATTR_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from . import SpaceXUpdateCoordinator
from .const import ATTR_IDENTIFIERS, ATTR_MANUFACTURER, ATTR_MODEL, DOMAIN, COORDINATOR
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the sensor platforms."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
sensors = []
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Mission",
"spacex_next_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Day",
"spacex_next_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Time",
"spacex_next_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Countdown",
"spacex_next_launch_countdown",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Site",
"spacex_next_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Rocket",
"spacex_next_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Payload",
"spacex_next_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Confirmed Launch Day",
"spacex_next_confirmed_launch_day",
"mdi:calendar",
"spacexlaunch"
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Confirmed Launch Time",
"spacex_next_confirmed_launch_time",
"mdi:clock-outline",
"spacexlaunch"
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Mission",
"spacex_latest_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Day",
"spacex_latest_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Time",
"spacex_latest_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Site",
"spacex_latest_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Rocket",
"spacex_latest_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Payload",
"spacex_latest_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Speed",
"spacex_starman_speed",
"mdi:account-star",
"spacexstarman",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Distance",
"spacex_starman_distance",
"mdi:map-marker-distance",
"spacexstarman",
)
)
async_add_entities(sensors)
class SpaceXSensor(CoordinatorEntity):
"""Defines a SpaceX Binary sensor."""
def __init__(
self,
coordinator: SpaceXUpdateCoordinator,
name: str,
entity_id: str,
icon:str,
device_identifier:str,
):
"""Initialize Entities."""
super().__init__(coordinator=coordinator)
self._name = name
self._unique_id = f"spacex_{entity_id}"
self._state = None
self._icon = icon
self._kind = entity_id
self._device_identifier = device_identifier
self._unit_of_measure = None
self.attrs = {}
if self._kind == "spacex_starman_speed":
self._unit_of_measure = SPEED_KILOMETERS_PER_HOUR
elif self._kind == "spacex_starman_distance":
self._unit_of_measure = LENGTH_KILOMETERS
@property
def unique_id(self):
"""Return the unique Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def name(self):
"""Return the friendly name of this entity."""
return self._name
@property
def icon(self):
"""Return the icon for this entity."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement for this entity."""
return self._unit_of_measure
@property
def device_state_attributes(self):
"""Return the attributes."""
coordinator_data = self.coordinator.data
starman_data = coordinator_data["starman"]
launch_data = coordinator_data["next_launch"]
latest_launch_data = coordinator_data["latest_launch"]
if self._kind == "spacex_next_launch_mission":
self.attrs["mission_patch"] = launch_data["links"].get("patch",{}).get("large")
if launch_data.get("details"):
self.attrs["details"] = launch_data["details"][0:255]
if len(launch_data["details"]) > 255:
self.attrs["details2"] = launch_data["details"][255:510]
else:
self.attrs["details2"] = ""
if len(launch_data["details"]) > 510:
self.attrs["details3"] = launch_data["details"][510:765]
else:
self.attrs["details3"] = ""
self.attrs["video_link"] = launch_data["links"].get("webcast")
elif self._kind == "spacex_next_launch_day":
self.attrs["launch_date_unix"] = launch_data["date_unix"]
self.attrs["launch_date_utc"] = launch_data["date_utc"]
elif self._kind == "spacex_next_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_next_launch_countdown":
if launch_data["tbd"]:
self.attrs["t0_countdown"] = "NA"
else:
t0_countdown = int(launch_data["date_unix"]) - int(time.time())
day = t0_countdown // (24 * 3600)
t0_countdown = t0_countdown % (24 * 3600)
hour = t0_countdown // 3600
t0_countdown %= 3600
minutes = t0_countdown // 60
t0_countdown %= 60
seconds = t0_countdown
countdown_string = ""
if day > 0:
countdown_string = f"{day} days, "
if hour > 0:
countdown_string = f"{countdown_string}{hour} hours, "
if minutes > 0:
countdown_string = f"{countdown_string}{minutes} minutes, "
countdown_string = f"{countdown_string}{seconds} seconds until the launch of {launch_data["name"]}."
self.attrs["t0_countdown"] = countdown_string
elif self._kind == "spacex_next_confirmed_launch_day":
if launch_data["tbd"]:
self.attrs["launch_date_unix"] = "NA"
self.attrs["launch_date_utc"] = "NA"
else:
self.attrs["launch_date_unix"] = launch_data["date_unix"]
self.attrs["launch_date_utc"] = launch_data["date_utc"]
elif self._kind == "spacex_next_confirmed_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_next_launch_site":
self.attrs["short_name"] = launch_data["launch_site"]["name"]
elif self._kind == "spacex_next_launch_rocket":
core_counter = 1
for this_core in launch_data["cores_detail"]:
if this_core.get("details"):
self.attrs["core_" + str(core_counter) + "_serial"] = this_core["details"].get("serial")
self.attrs["core_" + str(core_counter) + "_block"] = this_core["details"].get("block")
self.attrs["core_" + str(core_counter) + "_flight"] = this_core.get(
"flight"
)
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core.get("landing_attempt")
if this_core.get("landpad"):
self.attrs["core_" + str(core_counter) + "_lz"] = this_core["landpad"][
"name"
]
self.attrs["core_" + str(core_counter) + "_lz_long"] = this_core["landpad"][
"full_name"
]
else:
self.attrs["core_" + str(core_counter) + "_lz"] = "NA"
self.attrs["core_" + str(core_counter) + "_lz_long"] = "NA"
core_counter = core_counter + 1
if launch_data.get("fairings"):
self.attrs["fairings_reused"] = launch_data.get("fairings",{}).get(
"reused"
)
else:
self.attrs["fairings_reused"] = "NA"
elif self._kind == "spacex_next_launch_payload":
if len(launch_data["payloads_detail"]):
if len(launch_data["payloads_detail"][0]["nationalities"]):
self.attrs["nationality"] = launch_data["payloads_detail"][0]["nationalities"][0]
else:
self.attrs["nationality"] = "NA"
if len(launch_data["payloads_detail"][0]["manufacturers"]):
self.attrs["manufacturer"] = launch_data["payloads_detail"][0]["manufacturers"][0]
else:
self.attrs["manufacturer"] = "NA"
self.attrs["payload_type"] = launch_data["payloads_detail"][0]["type"]
self.attrs["payload_mass"] = (
str(
launch_data["payloads_detail"][0]["mass_kg"]
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
launch_data["payloads_detail"][0]["mass_lbs"]
)
+ " lbs"
)
self.attrs["orbit"] = launch_data["payloads_detail"][0]["orbit"]
elif self._kind == "spacex_latest_launch_mission":
self.attrs["mission_patch"] = latest_launch_data["links"].get("patch",{}).get("large")
if latest_launch_data.get("details"):
self.attrs["details"] = latest_launch_data["details"][0:255]
if len(latest_launch_data["details"]) > 255:
self.attrs["details2"] = latest_launch_data["details"][255:510]
else:
self.attrs["details2"] = ""
if len(latest_launch_data["details"]) > 510:
self.attrs["details3"] = latest_launch_data["details"][510:765]
else:
self.attrs["details3"] = ""
self.attrs["video_link"] = latest_launch_data["links"].get("webcast")
elif self._kind == "spacex_latest_launch_day":
self.attrs["launch_date_unix"] = latest_launch_data["date_unix"]
self.attrs["launch_date_utc"] = latest_launch_data["date_utc"]
elif self._kind == "spacex_latest_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_latest_launch_site":
self.attrs["short_name"] = latest_launch_data["launch_site"]["name"]
elif self._kind == "spacex_latest_launch_rocket":
core_counter = 1
for this_core in latest_launch_data["cores_detail"]:
self.attrs["core_" + str(core_counter) + "_serial"] = this_core["details"][
"serial"
]
self.attrs["core_" + str(core_counter) + "_flight"] = this_core[
"flight"
]
self.attrs["core_" + str(core_counter) + "_block"] = this_core["details"][
"block"
]
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core["landing_attempt"]
self.attrs["core_" + str(core_counter) + "_lz"] = this_core["landpad"][
"name"
]
self.attrs["core_" + str(core_counter) + "_lz_long"] = this_core["landpad"][
"full_name"
]
core_counter = core_counter + 1
if latest_launch_data.get("fairings"):
self.attrs["fairings_reused"] = latest_launch_data["fairings"].get(
"reused"
)
elif self._kind == "spacex_latest_launch_payload":
if len(latest_launch_data["payloads_detail"]):
if len(latest_launch_data["payloads_detail"][0]["nationalities"]):
self.attrs["nationality"] = latest_launch_data["payloads_detail"][0]["nationalities"][0]
else:
self.attrs["nationality"] = "NA"
if len(latest_launch_data["payloads_detail"][0]["manufacturers"]):
self.attrs["manufacturer"] = latest_launch_data["payloads_detail"][0]["manufacturers"][0]
else:
self.attrs["manufacturer"] = "NA"
self.attrs["payload_type"] = latest_launch_data["payloads_detail"][0]["type"]
self.attrs["payload_mass"] = (
str(
latest_launch_data["payloads_detail"][0]["mass_kg"]
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
latest_launch_data["payloads_detail"][0]["mass_lbs"]
)
+ " lbs"
)
self.attrs["orbit"] = latest_launch_data["payloads_detail"][0]["orbit"]
elif self._kind == "spacex_starman_speed":
self.attrs["machspeed"] = float(starman_data["speed_kph"]) / 1235
elif self._kind == "spacex_starman_distance":
self.attrs["au_distance"] = float(starman_data["earth_distance_km"]) / (1.496 * (10**8))
return self.attrs
@property
def device_info(self):
"""Define the device based on device_identifier."""
device_name = "SpaceX Launches"
device_model = "Launch"
if self._device_identifier != "spacexlaunch":
device_name = "SpaceX Starman"
device_model = "Starman"
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_identifier)},
ATTR_NAME: device_name,
ATTR_MANUFACTURER: "SpaceX",
ATTR_MODEL: device_model,
}
@property
def state(self):
"""Return the state."""
coordinator_data = self.coordinator.data
starman_data = coordinator_data["starman"]
launch_data = coordinator_data["next_launch"]
latest_launch_data = coordinator_data["latest_launch"]
if self._kind == "spacex_next_launch_mission":
self._state = launch_data["name"]
elif self._kind == "spacex_next_launch_day":
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_next_launch_time":
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_next_launch_countdown":
if launch_data["tbd"]:
self._state = None
else:
t0_countdown = int(launch_data["date_unix"]) - int(time.time())
self._state = str(datetime.timedelta(seconds=t0_countdown))
elif self._kind == "spacex_next_confirmed_launch_day":
if launch_data["tbd"]:
self._state = None
else:
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_next_confirmed_launch_time":
if launch_data["tbd"]:
self._state = None
else:
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_next_launch_site":
self._state = launch_data["launch_site"]["full_name"]
elif self._kind == "spacex_next_launch_rocket":
self._state = launch_data["rocket"]["name"]
elif self._kind == "spacex_next_launch_payload":
self._state = launch_data["payloads_detail"][0]["name"]
elif self._kind == "spacex_latest_launch_mission":
self._state = latest_launch_data["name"]
elif self._kind == "spacex_latest_launch_day":
self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_latest_launch_time":
self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_latest_launch_site":
self._state = latest_launch_data["launch_site"]["full_name"]
elif self._kind == "spacex_latest_launch_rocket":
self._state = latest_launch_data["rocket"]["name"]
elif self._kind == "spacex_latest_launch_payload":
self._state = latest_launch_data["payloads_detail"][0]["name"]
elif self._kind == "spacex_starman_speed":
self._state = int(starman_data["speed_kph"])
self._unit_of_measure = SPEED_KILOMETERS_PER_HOUR
elif self._kind == "spacex_starman_distance":
self._state = int(starman_data["earth_distance_km"])
self._unit_of_measure = LENGTH_KILOMETERS
return self._state
async def async_update(self):
"""Update SpaceX Binary Sensor Entity."""
await self.coordinator.async_request_refresh()
_LOGGER.debug("Updating state of the sensors.")
async def async_added_to_hass(self):
"""Subscribe to updates."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
| """Definition and setup of the SpaceX Binary Sensors for Home Assistant."""
import logging
import time
import datetime
from homeassistant.util.dt import as_local, utc_from_timestamp
from homeassistant.components.sensor import ENTITY_ID_FORMAT, DEVICE_CLASS_TIMESTAMP
from homeassistant.const import LENGTH_KILOMETERS, SPEED_KILOMETERS_PER_HOUR, ATTR_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from . import SpaceXUpdateCoordinator
from .const import ATTR_IDENTIFIERS, ATTR_MANUFACTURER, ATTR_MODEL, DOMAIN, COORDINATOR
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the sensor platforms."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
sensors = []
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Mission",
"spacex_next_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Day",
"spacex_next_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Time",
"spacex_next_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Countdown",
"spacex_next_launch_countdown",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Site",
"spacex_next_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Rocket",
"spacex_next_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Payload",
"spacex_next_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Confirmed Launch Day",
"spacex_next_confirmed_launch_day",
"mdi:calendar",
"spacexlaunch"
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Confirmed Launch Time",
"spacex_next_confirmed_launch_time",
"mdi:clock-outline",
"spacexlaunch"
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Mission",
"spacex_latest_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Day",
"spacex_latest_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Time",
"spacex_latest_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Site",
"spacex_latest_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Rocket",
"spacex_latest_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Payload",
"spacex_latest_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Speed",
"spacex_starman_speed",
"mdi:account-star",
"spacexstarman",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Distance",
"spacex_starman_distance",
"mdi:map-marker-distance",
"spacexstarman",
)
)
async_add_entities(sensors)
class SpaceXSensor(CoordinatorEntity):
"""Defines a SpaceX Binary sensor."""
def __init__(
self,
coordinator: SpaceXUpdateCoordinator,
name: str,
entity_id: str,
icon:str,
device_identifier:str,
):
"""Initialize Entities."""
super().__init__(coordinator=coordinator)
self._name = name
self._unique_id = f"spacex_{entity_id}"
self._state = None
self._icon = icon
self._kind = entity_id
self._device_identifier = device_identifier
self._unit_of_measure = None
self.attrs = {}
if self._kind == "spacex_starman_speed":
self._unit_of_measure = SPEED_KILOMETERS_PER_HOUR
elif self._kind == "spacex_starman_distance":
self._unit_of_measure = LENGTH_KILOMETERS
@property
def unique_id(self):
"""Return the unique Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def name(self):
"""Return the friendly name of this entity."""
return self._name
@property
def icon(self):
"""Return the icon for this entity."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement for this entity."""
return self._unit_of_measure
@property
def device_state_attributes(self):
"""Return the attributes."""
coordinator_data = self.coordinator.data
starman_data = coordinator_data["starman"]
launch_data = coordinator_data["next_launch"]
latest_launch_data = coordinator_data["latest_launch"]
if self._kind == "spacex_next_launch_mission":
self.attrs["mission_patch"] = launch_data["links"].get("patch",{}).get("large")
if launch_data.get("details"):
self.attrs["details"] = launch_data["details"][0:255]
if len(launch_data["details"]) > 255:
self.attrs["details2"] = launch_data["details"][255:510]
else:
self.attrs["details2"] = ""
if len(launch_data["details"]) > 510:
self.attrs["details3"] = launch_data["details"][510:765]
else:
self.attrs["details3"] = ""
self.attrs["video_link"] = launch_data["links"].get("webcast")
elif self._kind == "spacex_next_launch_day":
self.attrs["launch_date_unix"] = launch_data["date_unix"]
self.attrs["launch_date_utc"] = launch_data["date_utc"]
elif self._kind == "spacex_next_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_next_launch_countdown":
if launch_data["tbd"]:
self.attrs["t0_countdown"] = "NA"
else:
t0_countdown = int(launch_data["date_unix"]) - int(time.time())
day = t0_countdown // (24 * 3600)
t0_countdown = t0_countdown % (24 * 3600)
hour = t0_countdown // 3600
t0_countdown %= 3600
minutes = t0_countdown // 60
t0_countdown %= 60
seconds = t0_countdown
countdown_string = ""
if day > 0:
countdown_string = f"{day} days, "
if hour > 0:
countdown_string = f"{countdown_string}{hour} hours, "
if minutes > 0:
countdown_string = f"{countdown_string}{minutes} minutes, "
countdown_string = f"{countdown_string}{seconds} seconds until the launch of {launch_data['name']}."
self.attrs["t0_countdown"] = countdown_string
elif self._kind == "spacex_next_confirmed_launch_day":
if launch_data["tbd"]:
self.attrs["launch_date_unix"] = "NA"
self.attrs["launch_date_utc"] = "NA"
else:
self.attrs["launch_date_unix"] = launch_data["date_unix"]
self.attrs["launch_date_utc"] = launch_data["date_utc"]
elif self._kind == "spacex_next_confirmed_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_next_launch_site":
self.attrs["short_name"] = launch_data["launch_site"]["name"]
elif self._kind == "spacex_next_launch_rocket":
core_counter = 1
for this_core in launch_data["cores_detail"]:
if this_core.get("details"):
self.attrs["core_" + str(core_counter) + "_serial"] = this_core["details"].get("serial")
self.attrs["core_" + str(core_counter) + "_block"] = this_core["details"].get("block")
self.attrs["core_" + str(core_counter) + "_flight"] = this_core.get(
"flight"
)
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core.get("landing_attempt")
if this_core.get("landpad"):
self.attrs["core_" + str(core_counter) + "_lz"] = this_core["landpad"][
"name"
]
self.attrs["core_" + str(core_counter) + "_lz_long"] = this_core["landpad"][
"full_name"
]
else:
self.attrs["core_" + str(core_counter) + "_lz"] = "NA"
self.attrs["core_" + str(core_counter) + "_lz_long"] = "NA"
core_counter = core_counter + 1
if launch_data.get("fairings"):
self.attrs["fairings_reused"] = launch_data.get("fairings",{}).get(
"reused"
)
else:
self.attrs["fairings_reused"] = "NA"
elif self._kind == "spacex_next_launch_payload":
if len(launch_data["payloads_detail"]):
if len(launch_data["payloads_detail"][0]["nationalities"]):
self.attrs["nationality"] = launch_data["payloads_detail"][0]["nationalities"][0]
else:
self.attrs["nationality"] = "NA"
if len(launch_data["payloads_detail"][0]["manufacturers"]):
self.attrs["manufacturer"] = launch_data["payloads_detail"][0]["manufacturers"][0]
else:
self.attrs["manufacturer"] = "NA"
self.attrs["payload_type"] = launch_data["payloads_detail"][0]["type"]
self.attrs["payload_mass"] = (
str(
launch_data["payloads_detail"][0]["mass_kg"]
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
launch_data["payloads_detail"][0]["mass_lbs"]
)
+ " lbs"
)
self.attrs["orbit"] = launch_data["payloads_detail"][0]["orbit"]
elif self._kind == "spacex_latest_launch_mission":
self.attrs["mission_patch"] = latest_launch_data["links"].get("patch",{}).get("large")
if latest_launch_data.get("details"):
self.attrs["details"] = latest_launch_data["details"][0:255]
if len(latest_launch_data["details"]) > 255:
self.attrs["details2"] = latest_launch_data["details"][255:510]
else:
self.attrs["details2"] = ""
if len(latest_launch_data["details"]) > 510:
self.attrs["details3"] = latest_launch_data["details"][510:765]
else:
self.attrs["details3"] = ""
self.attrs["video_link"] = latest_launch_data["links"].get("webcast")
elif self._kind == "spacex_latest_launch_day":
self.attrs["launch_date_unix"] = latest_launch_data["date_unix"]
self.attrs["launch_date_utc"] = latest_launch_data["date_utc"]
elif self._kind == "spacex_latest_launch_time":
self.attrs["launch_time_24h"] = self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%H:%M")
elif self._kind == "spacex_latest_launch_site":
self.attrs["short_name"] = latest_launch_data["launch_site"]["name"]
elif self._kind == "spacex_latest_launch_rocket":
core_counter = 1
for this_core in latest_launch_data["cores_detail"]:
self.attrs["core_" + str(core_counter) + "_serial"] = this_core["details"][
"serial"
]
self.attrs["core_" + str(core_counter) + "_flight"] = this_core[
"flight"
]
self.attrs["core_" + str(core_counter) + "_block"] = this_core["details"][
"block"
]
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core["landing_attempt"]
self.attrs["core_" + str(core_counter) + "_lz"] = this_core["landpad"][
"name"
]
self.attrs["core_" + str(core_counter) + "_lz_long"] = this_core["landpad"][
"full_name"
]
core_counter = core_counter + 1
if latest_launch_data.get("fairings"):
self.attrs["fairings_reused"] = latest_launch_data["fairings"].get(
"reused"
)
elif self._kind == "spacex_latest_launch_payload":
if len(latest_launch_data["payloads_detail"]):
if len(latest_launch_data["payloads_detail"][0]["nationalities"]):
self.attrs["nationality"] = latest_launch_data["payloads_detail"][0]["nationalities"][0]
else:
self.attrs["nationality"] = "NA"
if len(latest_launch_data["payloads_detail"][0]["manufacturers"]):
self.attrs["manufacturer"] = latest_launch_data["payloads_detail"][0]["manufacturers"][0]
else:
self.attrs["manufacturer"] = "NA"
self.attrs["payload_type"] = latest_launch_data["payloads_detail"][0]["type"]
self.attrs["payload_mass"] = (
str(
latest_launch_data["payloads_detail"][0]["mass_kg"]
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
latest_launch_data["payloads_detail"][0]["mass_lbs"]
)
+ " lbs"
)
self.attrs["orbit"] = latest_launch_data["payloads_detail"][0]["orbit"]
elif self._kind == "spacex_starman_speed":
self.attrs["machspeed"] = float(starman_data["speed_kph"]) / 1235
elif self._kind == "spacex_starman_distance":
self.attrs["au_distance"] = float(starman_data["earth_distance_km"]) / (1.496 * (10**8))
return self.attrs
@property
def device_info(self):
"""Define the device based on device_identifier."""
device_name = "SpaceX Launches"
device_model = "Launch"
if self._device_identifier != "spacexlaunch":
device_name = "SpaceX Starman"
device_model = "Starman"
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._device_identifier)},
ATTR_NAME: device_name,
ATTR_MANUFACTURER: "SpaceX",
ATTR_MODEL: device_model,
}
@property
def state(self):
"""Return the state."""
coordinator_data = self.coordinator.data
starman_data = coordinator_data["starman"]
launch_data = coordinator_data["next_launch"]
latest_launch_data = coordinator_data["latest_launch"]
if self._kind == "spacex_next_launch_mission":
self._state = launch_data["name"]
elif self._kind == "spacex_next_launch_day":
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_next_launch_time":
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_next_launch_countdown":
if launch_data["tbd"]:
self._state = None
else:
t0_countdown = int(launch_data["date_unix"]) - int(time.time())
self._state = str(datetime.timedelta(seconds=t0_countdown))
elif self._kind == "spacex_next_confirmed_launch_day":
if launch_data["tbd"]:
self._state = None
else:
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_next_confirmed_launch_time":
if launch_data["tbd"]:
self._state = None
else:
self._state = as_local(utc_from_timestamp(
launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_next_launch_site":
self._state = launch_data["launch_site"]["full_name"]
elif self._kind == "spacex_next_launch_rocket":
self._state = launch_data["rocket"]["name"]
elif self._kind == "spacex_next_launch_payload":
self._state = launch_data["payloads_detail"][0]["name"]
elif self._kind == "spacex_latest_launch_mission":
self._state = latest_launch_data["name"]
elif self._kind == "spacex_latest_launch_day":
self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%d-%b-%Y")
elif self._kind == "spacex_latest_launch_time":
self._state = as_local(utc_from_timestamp(
latest_launch_data["date_unix"]
)).strftime("%I:%M %p")
elif self._kind == "spacex_latest_launch_site":
self._state = latest_launch_data["launch_site"]["full_name"]
elif self._kind == "spacex_latest_launch_rocket":
self._state = latest_launch_data["rocket"]["name"]
elif self._kind == "spacex_latest_launch_payload":
self._state = latest_launch_data["payloads_detail"][0]["name"]
elif self._kind == "spacex_starman_speed":
self._state = int(starman_data["speed_kph"])
self._unit_of_measure = SPEED_KILOMETERS_PER_HOUR
elif self._kind == "spacex_starman_distance":
self._state = int(starman_data["earth_distance_km"])
self._unit_of_measure = LENGTH_KILOMETERS
return self._state
async def async_update(self):
"""Update SpaceX Binary Sensor Entity."""
await self.coordinator.async_request_refresh()
_LOGGER.debug("Updating state of the sensors.")
async def async_added_to_hass(self):
"""Subscribe to updates."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
|
"""Tests for the unparse.py script in the Tools/parser directory."""
import unittest
import test.support
import pathlib
import random
import tokenize
import ast
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
for_else = """\
def f():
for x in range(10):
break
else:
y = 2
z = 3
"""
while_else = """\
def g():
while True:
break
else:
y = 2
z = 3
"""
relative_import = """\
from . import fred
from .. import barney
from .australia import shrimp as prawns
"""
nonlocal_ex = """\
def f():
x = 1
def g():
nonlocal x
x = 2
y = 7
def h():
nonlocal x, y
"""
# also acts as test for 'except ... as ...'
raise_from = """\
try:
1 / 0
except ZeroDivisionError as e:
raise ArithmeticError from e
"""
class_decorator = """\
@f1(arg)
@f2
class Foo: pass
"""
elif1 = """\
if cond1:
suite1
elif cond2:
suite2
else:
suite3
"""
elif2 = """\
if cond1:
suite1
elif cond2:
suite2
"""
try_except_finally = """\
try:
suite1
except ex1:
suite2
except ex2:
suite3
else:
suite4
finally:
suite5
"""
with_simple = """\
with f():
suite1
"""
with_as = """\
with f() as x:
suite1
"""
with_two_items = """\
with f() as x, g() as y:
suite1
"""
class ASTTestCase(unittest.TestCase):
def assertASTEqual(self, ast1, ast2):
self.assertEqual(ast.dump(ast1), ast.dump(ast2))
def check_roundtrip(self, code1):
ast1 = ast.parse(code1)
code2 = ast.unparse(ast1)
ast2 = ast.parse(code2)
self.assertASTEqual(ast1, ast2)
def check_invalid(self, node, raises=ValueError):
self.assertRaises(raises, ast.unparse, node)
def check_src_roundtrip(self, code1, code2=None, strip=True):
code2 = code2 or code1
code1 = ast.unparse(ast.parse(code1))
if strip:
code1 = code1.strip()
self.assertEqual(code2, code1)
class UnparseTestCase(ASTTestCase):
# Tests for specific bugs found in earlier versions of unparse
def test_fstrings(self):
# See issue 25180
self.check_roundtrip(r"""f'{f'{0}'*3}'""")
self.check_roundtrip(r"""f'{f'{y}'*3}'""")
def test_strings(self):
self.check_roundtrip("u'foo'")
self.check_roundtrip("r'foo'")
self.check_roundtrip("b'foo'")
def test_del_statement(self):
self.check_roundtrip("del x, y, z")
def test_shifts(self):
self.check_roundtrip("45 << 2")
self.check_roundtrip("13 >> 7")
def test_for_else(self):
self.check_roundtrip(for_else)
def test_while_else(self):
self.check_roundtrip(while_else)
def test_unary_parens(self):
self.check_roundtrip("(-1)**7")
self.check_roundtrip("(-1.)**8")
self.check_roundtrip("(-1j)**6")
self.check_roundtrip("not True or False")
self.check_roundtrip("True or not False")
def test_integer_parens(self):
self.check_roundtrip("3 .__abs__()")
def test_huge_float(self):
self.check_roundtrip("1e1000")
self.check_roundtrip("-1e1000")
self.check_roundtrip("1e1000j")
self.check_roundtrip("-1e1000j")
def test_min_int(self):
self.check_roundtrip(str(-(2 ** 31)))
self.check_roundtrip(str(-(2 ** 63)))
def test_imaginary_literals(self):
self.check_roundtrip("7j")
self.check_roundtrip("-7j")
self.check_roundtrip("0j")
self.check_roundtrip("-0j")
def test_lambda_parentheses(self):
self.check_roundtrip("(lambda: int)()")
def test_chained_comparisons(self):
self.check_roundtrip("1 < 4 <= 5")
self.check_roundtrip("a is b is c is not d")
def test_function_arguments(self):
self.check_roundtrip("def f(): pass")
self.check_roundtrip("def f(a): pass")
self.check_roundtrip("def f(b = 2): pass")
self.check_roundtrip("def f(a, b): pass")
self.check_roundtrip("def f(a, b = 2): pass")
self.check_roundtrip("def f(a = 5, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b): pass")
self.check_roundtrip("def f(*, a, b = 2): pass")
self.check_roundtrip("def f(a, b = None, *, c, **kwds): pass")
self.check_roundtrip("def f(a=2, *args, c=5, d, **kwds): pass")
self.check_roundtrip("def f(*args, **kwargs): pass")
def test_relative_import(self):
self.check_roundtrip(relative_import)
def test_nonlocal(self):
self.check_roundtrip(nonlocal_ex)
def test_raise_from(self):
self.check_roundtrip(raise_from)
def test_bytes(self):
self.check_roundtrip("b'123'")
def test_annotations(self):
self.check_roundtrip("def f(a : int): pass")
self.check_roundtrip("def f(a: int = 5): pass")
self.check_roundtrip("def f(*args: [int]): pass")
self.check_roundtrip("def f(**kwargs: dict): pass")
self.check_roundtrip("def f() -> None: pass")
def test_set_literal(self):
self.check_roundtrip("{'a', 'b', 'c'}")
def test_set_comprehension(self):
self.check_roundtrip("{x for x in range(5)}")
def test_dict_comprehension(self):
self.check_roundtrip("{x: x*x for x in range(10)}")
def test_class_decorators(self):
self.check_roundtrip(class_decorator)
def test_class_definition(self):
self.check_roundtrip("class A(metaclass=type, *[], **{}): pass")
def test_elifs(self):
self.check_roundtrip(elif1)
self.check_roundtrip(elif2)
def test_try_except_finally(self):
self.check_roundtrip(try_except_finally)
def test_starred_assignment(self):
self.check_roundtrip("a, *b, c = seq")
self.check_roundtrip("a, (*b, c) = seq")
self.check_roundtrip("a, *b[0], c = seq")
self.check_roundtrip("a, *(b, c) = seq")
def test_with_simple(self):
self.check_roundtrip(with_simple)
def test_with_as(self):
self.check_roundtrip(with_as)
def test_with_two_items(self):
self.check_roundtrip(with_two_items)
def test_dict_unpacking_in_dict(self):
# See issue 26489
self.check_roundtrip(r"""{**{'y': 2}, 'x': 1}""")
self.check_roundtrip(r"""{**{'y': 2}, **{'x': 1}}""")
def test_invalid_raise(self):
self.check_invalid(ast.Raise(exc=None, cause=ast.Name(id="X")))
def test_invalid_fstring_constant(self):
self.check_invalid(ast.JoinedStr(values=[ast.Constant(value=100)]))
def test_invalid_fstring_conversion(self):
self.check_invalid(
ast.FormattedValue(
value=ast.Constant(value="a", kind=None),
conversion=ord("Y"), # random character
format_spec=None,
)
)
def test_invalid_set(self):
self.check_invalid(ast.Set(elts=[]))
def test_invalid_yield_from(self):
self.check_invalid(ast.YieldFrom(value=None))
class CosmeticTestCase(ASTTestCase):
"""Test if there are cosmetic issues caused by unnecesary additions"""
def test_simple_expressions_parens(self):
self.check_src_roundtrip("(a := b)")
self.check_src_roundtrip("await x")
self.check_src_roundtrip("x if x else y")
self.check_src_roundtrip("lambda x: x")
self.check_src_roundtrip("1 + 1")
self.check_src_roundtrip("1 + 2 / 3")
self.check_src_roundtrip("(1 + 2) / 3")
self.check_src_roundtrip("(1 + 2) * 3 + 4 * (5 + 2)")
self.check_src_roundtrip("(1 + 2) * 3 + 4 * (5 + 2) ** 2")
self.check_src_roundtrip("~ x")
self.check_src_roundtrip("x and y")
self.check_src_roundtrip("x and y and z")
self.check_src_roundtrip("x and (y and x)")
self.check_src_roundtrip("(x and y) and z")
self.check_src_roundtrip("(x ** y) ** z ** q")
self.check_src_roundtrip("x >> y")
self.check_src_roundtrip("x << y")
self.check_src_roundtrip("x >> y and x >> z")
self.check_src_roundtrip("x + y - z * q ^ t ** k")
self.check_src_roundtrip("P * V if P and V else n * R * T")
self.check_src_roundtrip("lambda P, V, n: P * V == n * R * T")
self.check_src_roundtrip("flag & (other | foo)")
self.check_src_roundtrip("not x == y")
self.check_src_roundtrip("x == (not y)")
self.check_src_roundtrip("yield x")
self.check_src_roundtrip("yield from x")
self.check_src_roundtrip("call((yield x))")
self.check_src_roundtrip("return x + (yield x)")
class DirectoryTestCase(ASTTestCase):
"""Test roundtrip behaviour on all files in Lib and Lib/test."""
lib_dir = pathlib.Path(__file__).parent / ".."
test_directories = (lib_dir, lib_dir / "test")
skip_files = {"test_fstring.py"}
run_always_files = {"test_grammar.py", "test_syntax.py", "test_compile.py",
"test_ast.py", "test_asdl_parser.py"}
_files_to_test = None
@classmethod
def files_to_test(cls):
if cls._files_to_test is not None:
return cls._files_to_test
items = [
item.resolve()
for directory in cls.test_directories
for item in directory.glob("*.py")
if not item.name.startswith("bad")
]
# Test limited subset of files unless the 'cpu' resource is specified.
if not test.support.is_resource_enabled("cpu"):
tests_to_run_always = {item for item in items if
item.name in cls.run_always_files}
items = set(random.sample(items, 10))
# Make sure that at least tests that heavily use grammar features are
# always considered in order to reduce the chance of missing something.
items = list(items | tests_to_run_always)
# bpo-31174: Store the names sample to always test the same files.
# It prevents false alarms when hunting reference leaks.
cls._files_to_test = items
return items
def test_files(self):
for item in self.files_to_test():
if test.support.verbose:
print(f"Testing {item.absolute()}")
# Some f-strings are not correctly round-tripped by
# Tools/parser/unparse.py. See issue 28002 for details.
# We need to skip files that contain such f-strings.
if item.name in self.skip_files:
if test.support.verbose:
print(f"Skipping {item.absolute()}: see issue 28002")
continue
with self.subTest(filename=item):
source = read_pyfile(item)
self.check_roundtrip(source)
if __name__ == "__main__":
unittest.main()
| """Tests for the unparse.py script in the Tools/parser directory."""
import unittest
import test.support
import pathlib
import random
import tokenize
import ast
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
for_else = """\
def f():
for x in range(10):
break
else:
y = 2
z = 3
"""
while_else = """\
def g():
while True:
break
else:
y = 2
z = 3
"""
relative_import = """\
from . import fred
from .. import barney
from .australia import shrimp as prawns
"""
nonlocal_ex = """\
def f():
x = 1
def g():
nonlocal x
x = 2
y = 7
def h():
nonlocal x, y
"""
# also acts as test for 'except ... as ...'
raise_from = """\
try:
1 / 0
except ZeroDivisionError as e:
raise ArithmeticError from e
"""
class_decorator = """\
@f1(arg)
@f2
class Foo: pass
"""
elif1 = """\
if cond1:
suite1
elif cond2:
suite2
else:
suite3
"""
elif2 = """\
if cond1:
suite1
elif cond2:
suite2
"""
try_except_finally = """\
try:
suite1
except ex1:
suite2
except ex2:
suite3
else:
suite4
finally:
suite5
"""
with_simple = """\
with f():
suite1
"""
with_as = """\
with f() as x:
suite1
"""
with_two_items = """\
with f() as x, g() as y:
suite1
"""
class ASTTestCase(unittest.TestCase):
def assertASTEqual(self, ast1, ast2):
self.assertEqual(ast.dump(ast1), ast.dump(ast2))
def check_roundtrip(self, code1):
ast1 = ast.parse(code1)
code2 = ast.unparse(ast1)
ast2 = ast.parse(code2)
self.assertASTEqual(ast1, ast2)
def check_invalid(self, node, raises=ValueError):
self.assertRaises(raises, ast.unparse, node)
def check_src_roundtrip(self, code1, code2=None, strip=True):
code2 = code2 or code1
code1 = ast.unparse(ast.parse(code1))
if strip:
code1 = code1.strip()
self.assertEqual(code2, code1)
class UnparseTestCase(ASTTestCase):
# Tests for specific bugs found in earlier versions of unparse
def test_fstrings(self):
# See issue 25180
self.check_roundtrip(r"""f'{f"{0}"*3}'""")
self.check_roundtrip(r"""f'{f"{y}"*3}'""")
def test_strings(self):
self.check_roundtrip("u'foo'")
self.check_roundtrip("r'foo'")
self.check_roundtrip("b'foo'")
def test_del_statement(self):
self.check_roundtrip("del x, y, z")
def test_shifts(self):
self.check_roundtrip("45 << 2")
self.check_roundtrip("13 >> 7")
def test_for_else(self):
self.check_roundtrip(for_else)
def test_while_else(self):
self.check_roundtrip(while_else)
def test_unary_parens(self):
self.check_roundtrip("(-1)**7")
self.check_roundtrip("(-1.)**8")
self.check_roundtrip("(-1j)**6")
self.check_roundtrip("not True or False")
self.check_roundtrip("True or not False")
def test_integer_parens(self):
self.check_roundtrip("3 .__abs__()")
def test_huge_float(self):
self.check_roundtrip("1e1000")
self.check_roundtrip("-1e1000")
self.check_roundtrip("1e1000j")
self.check_roundtrip("-1e1000j")
def test_min_int(self):
self.check_roundtrip(str(-(2 ** 31)))
self.check_roundtrip(str(-(2 ** 63)))
def test_imaginary_literals(self):
self.check_roundtrip("7j")
self.check_roundtrip("-7j")
self.check_roundtrip("0j")
self.check_roundtrip("-0j")
def test_lambda_parentheses(self):
self.check_roundtrip("(lambda: int)()")
def test_chained_comparisons(self):
self.check_roundtrip("1 < 4 <= 5")
self.check_roundtrip("a is b is c is not d")
def test_function_arguments(self):
self.check_roundtrip("def f(): pass")
self.check_roundtrip("def f(a): pass")
self.check_roundtrip("def f(b = 2): pass")
self.check_roundtrip("def f(a, b): pass")
self.check_roundtrip("def f(a, b = 2): pass")
self.check_roundtrip("def f(a = 5, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b): pass")
self.check_roundtrip("def f(*, a, b = 2): pass")
self.check_roundtrip("def f(a, b = None, *, c, **kwds): pass")
self.check_roundtrip("def f(a=2, *args, c=5, d, **kwds): pass")
self.check_roundtrip("def f(*args, **kwargs): pass")
def test_relative_import(self):
self.check_roundtrip(relative_import)
def test_nonlocal(self):
self.check_roundtrip(nonlocal_ex)
def test_raise_from(self):
self.check_roundtrip(raise_from)
def test_bytes(self):
self.check_roundtrip("b'123'")
def test_annotations(self):
self.check_roundtrip("def f(a : int): pass")
self.check_roundtrip("def f(a: int = 5): pass")
self.check_roundtrip("def f(*args: [int]): pass")
self.check_roundtrip("def f(**kwargs: dict): pass")
self.check_roundtrip("def f() -> None: pass")
def test_set_literal(self):
self.check_roundtrip("{'a', 'b', 'c'}")
def test_set_comprehension(self):
self.check_roundtrip("{x for x in range(5)}")
def test_dict_comprehension(self):
self.check_roundtrip("{x: x*x for x in range(10)}")
def test_class_decorators(self):
self.check_roundtrip(class_decorator)
def test_class_definition(self):
self.check_roundtrip("class A(metaclass=type, *[], **{}): pass")
def test_elifs(self):
self.check_roundtrip(elif1)
self.check_roundtrip(elif2)
def test_try_except_finally(self):
self.check_roundtrip(try_except_finally)
def test_starred_assignment(self):
self.check_roundtrip("a, *b, c = seq")
self.check_roundtrip("a, (*b, c) = seq")
self.check_roundtrip("a, *b[0], c = seq")
self.check_roundtrip("a, *(b, c) = seq")
def test_with_simple(self):
self.check_roundtrip(with_simple)
def test_with_as(self):
self.check_roundtrip(with_as)
def test_with_two_items(self):
self.check_roundtrip(with_two_items)
def test_dict_unpacking_in_dict(self):
# See issue 26489
self.check_roundtrip(r"""{**{'y': 2}, 'x': 1}""")
self.check_roundtrip(r"""{**{'y': 2}, **{'x': 1}}""")
def test_invalid_raise(self):
self.check_invalid(ast.Raise(exc=None, cause=ast.Name(id="X")))
def test_invalid_fstring_constant(self):
self.check_invalid(ast.JoinedStr(values=[ast.Constant(value=100)]))
def test_invalid_fstring_conversion(self):
self.check_invalid(
ast.FormattedValue(
value=ast.Constant(value="a", kind=None),
conversion=ord("Y"), # random character
format_spec=None,
)
)
def test_invalid_set(self):
self.check_invalid(ast.Set(elts=[]))
def test_invalid_yield_from(self):
self.check_invalid(ast.YieldFrom(value=None))
class CosmeticTestCase(ASTTestCase):
"""Test if there are cosmetic issues caused by unnecesary additions"""
def test_simple_expressions_parens(self):
self.check_src_roundtrip("(a := b)")
self.check_src_roundtrip("await x")
self.check_src_roundtrip("x if x else y")
self.check_src_roundtrip("lambda x: x")
self.check_src_roundtrip("1 + 1")
self.check_src_roundtrip("1 + 2 / 3")
self.check_src_roundtrip("(1 + 2) / 3")
self.check_src_roundtrip("(1 + 2) * 3 + 4 * (5 + 2)")
self.check_src_roundtrip("(1 + 2) * 3 + 4 * (5 + 2) ** 2")
self.check_src_roundtrip("~ x")
self.check_src_roundtrip("x and y")
self.check_src_roundtrip("x and y and z")
self.check_src_roundtrip("x and (y and x)")
self.check_src_roundtrip("(x and y) and z")
self.check_src_roundtrip("(x ** y) ** z ** q")
self.check_src_roundtrip("x >> y")
self.check_src_roundtrip("x << y")
self.check_src_roundtrip("x >> y and x >> z")
self.check_src_roundtrip("x + y - z * q ^ t ** k")
self.check_src_roundtrip("P * V if P and V else n * R * T")
self.check_src_roundtrip("lambda P, V, n: P * V == n * R * T")
self.check_src_roundtrip("flag & (other | foo)")
self.check_src_roundtrip("not x == y")
self.check_src_roundtrip("x == (not y)")
self.check_src_roundtrip("yield x")
self.check_src_roundtrip("yield from x")
self.check_src_roundtrip("call((yield x))")
self.check_src_roundtrip("return x + (yield x)")
class DirectoryTestCase(ASTTestCase):
"""Test roundtrip behaviour on all files in Lib and Lib/test."""
lib_dir = pathlib.Path(__file__).parent / ".."
test_directories = (lib_dir, lib_dir / "test")
skip_files = {"test_fstring.py"}
run_always_files = {"test_grammar.py", "test_syntax.py", "test_compile.py",
"test_ast.py", "test_asdl_parser.py"}
_files_to_test = None
@classmethod
def files_to_test(cls):
if cls._files_to_test is not None:
return cls._files_to_test
items = [
item.resolve()
for directory in cls.test_directories
for item in directory.glob("*.py")
if not item.name.startswith("bad")
]
# Test limited subset of files unless the 'cpu' resource is specified.
if not test.support.is_resource_enabled("cpu"):
tests_to_run_always = {item for item in items if
item.name in cls.run_always_files}
items = set(random.sample(items, 10))
# Make sure that at least tests that heavily use grammar features are
# always considered in order to reduce the chance of missing something.
items = list(items | tests_to_run_always)
# bpo-31174: Store the names sample to always test the same files.
# It prevents false alarms when hunting reference leaks.
cls._files_to_test = items
return items
def test_files(self):
for item in self.files_to_test():
if test.support.verbose:
print(f"Testing {item.absolute()}")
# Some f-strings are not correctly round-tripped by
# Tools/parser/unparse.py. See issue 28002 for details.
# We need to skip files that contain such f-strings.
if item.name in self.skip_files:
if test.support.verbose:
print(f"Skipping {item.absolute()}: see issue 28002")
continue
with self.subTest(filename=item):
source = read_pyfile(item)
self.check_roundtrip(source)
if __name__ == "__main__":
unittest.main()
|
"""
The ``python_function`` model flavor serves as a default model interface for MLflow Python models.
Any MLflow Python model is expected to be loadable as a ``python_function`` model.
In addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format
<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from
this format. The format is self contained in the sense that it includes all necessary information
for anyone to load it and use it. Dependencies are either stored directly with the model or
referenced via a Conda environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-inference-api:
*************
Inference API
*************
Python function models are loaded as an instance of :py:class:`PyFuncModel
<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model
metadata (MLmodel file). You can score the model by calling the :py:func:`predict()
<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray, scipy.sparse.(csc.csc_matrix | csr.csr_matrix),
List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
All PyFunc models will support `pandas.DataFrame` as input and DL PyFunc models will also support
tensor inputs in the form of Dict[str, numpy.ndarray] (named tensors) and `numpy.ndarrays`
(unnamed tensors).
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model implementation>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory. The model implementation is expected to be an object with a
``predict`` method with the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
::
tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
::
cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import tempfile
import signal
import sys
import numpy as np
import os
import pandas
import yaml
from copy import deepcopy
import logging
import threading
import collections
import subprocess
from typing import Any, Union, List, Dict, Iterator, Tuple
import mlflow
import mlflow.pyfunc.model
from mlflow.models import Model, ModelSignature, ModelInputExample
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.pyfunc.model import ( # pylint: disable=unused-import
PythonModel,
PythonModelContext,
get_default_conda_env,
)
from mlflow.pyfunc.model import get_default_pip_requirements
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import DataType, Schema, TensorSpec
from mlflow.types.utils import clean_tensor_type
from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version, _is_in_ipython_notebook
from mlflow.utils.annotations import deprecated
from mlflow.utils.file_utils import _copy_file_or_tree, write_to
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
_get_flavor_configuration_from_uri,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.uri import append_to_uri_path
from mlflow.utils.environment import (
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_PythonEnv,
)
from mlflow.utils import env_manager as _EnvManager
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.databricks_utils import is_in_databricks_runtime
from mlflow.utils.file_utils import get_or_create_tmp_dir, get_or_create_nfs_tmp_dir
from mlflow.utils.process import cache_return_value_per_process
from mlflow.exceptions import MlflowException
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from scipy.sparse import csc_matrix, csr_matrix
from mlflow.utils.requirements_utils import (
_check_requirement_satisfied,
_parse_requirements,
)
from mlflow.utils import find_free_port
from mlflow.utils.nfs_on_spark import get_nfs_cache_root_dir
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
PyFuncInput = Union[pandas.DataFrame, np.ndarray, csc_matrix, csr_matrix, List[Any], Dict[str, Any]]
PyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param req: pip requirements file.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
params = deepcopy(kwargs)
params[MAIN] = loader_module
params[PY_VERSION] = PYTHON_VERSION
if code:
params[CODE] = code
if data:
params[DATA] = data
if env:
params[ENV] = env
return model.add_flavor(FLAVOR_NAME, **params)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _enforce_mlflow_datatype(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. object -> string
2. int -> long (upcast)
3. float -> double (upcast)
4. int -> double (safe conversion)
5. np.datetime64[x] -> datetime (any precision)
6. object -> datetime
Any other type mismatch will raise error.
"""
if values.dtype == object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == object:
# NB: the object can contain any type and we currently cannot cast to pandas Strings
# due to how None is cast
return values
# NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand
# side of the comparison operator. It works, however, if pandas type is on the left hand side.
# That is because pandas is aware of numpy.
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:
# NB: datetime values have variable precision denoted by brackets, e.g. datetime64[ns]
# denotes nanosecond precision. Since MLflow datetime type is precision agnostic, we
# ignore precision when matching datetime columns.
return values
if t == DataType.datetime and values.dtype == object:
# NB: Pyspark date columns get converted to object when converted to a pandas
# DataFrame. To respect the original typing, we convert the column to datetime.
try:
return values.astype(np.datetime64, errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(name, values.dtype, t)
)
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
# allow (u)int => double conversion
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
def all_ints(xs):
return all(pandas.isnull(x) or int(x) == x for x in xs)
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.{3}".format(name, values.dtype, numpy_type, hint)
)
def _enforce_tensor_spec(
values: Union[np.ndarray, csc_matrix, csr_matrix], tensor_spec: TensorSpec
):
"""
Enforce the input tensor shape and type matches the provided tensor spec.
"""
expected_shape = tensor_spec.shape
actual_shape = values.shape
actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype
if len(expected_shape) != len(actual_shape):
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
for expected, actual in zip(expected_shape, actual_shape):
if expected == -1:
continue
if expected != actual:
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
if clean_tensor_type(actual_type) != tensor_spec.type:
raise MlflowException(
"dtype of input {0} does not match expected dtype {1}".format(
values.dtype, tensor_spec.type
)
)
return values
def _enforce_col_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input columns conform to the model's column-based signature."""
if input_schema.has_input_names():
input_names = input_schema.input_names()
else:
input_names = pfInput.columns[: len(input_schema.inputs)]
input_types = input_schema.input_types()
new_pfInput = pandas.DataFrame()
for i, x in enumerate(input_names):
new_pfInput[x] = _enforce_mlflow_datatype(x, pfInput[x], input_types[i])
return new_pfInput
def _enforce_tensor_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input tensor(s) conforms to the model's tensor-based signature."""
if input_schema.has_input_names():
if isinstance(pfInput, dict):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
if not isinstance(pfInput[col_name], np.ndarray):
raise MlflowException(
"This model contains a tensor-based model signature with input names,"
" which suggests a dictionary input mapping input name to a numpy"
" array, but a dict with value type {0} was found.".format(
type(pfInput[col_name])
)
)
new_pfInput[col_name] = _enforce_tensor_spec(pfInput[col_name], tensor_spec)
elif isinstance(pfInput, pandas.DataFrame):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
new_pfInput[col_name] = _enforce_tensor_spec(
np.array(pfInput[col_name], dtype=tensor_spec.type), tensor_spec
)
else:
raise MlflowException(
"This model contains a tensor-based model signature with input names, which"
" suggests a dictionary input mapping input name to tensor, but an input of"
" type {0} was found.".format(type(pfInput))
)
else:
if isinstance(pfInput, pandas.DataFrame):
new_pfInput = _enforce_tensor_spec(pfInput.to_numpy(), input_schema.inputs[0])
elif isinstance(pfInput, (np.ndarray, csc_matrix, csr_matrix)):
new_pfInput = _enforce_tensor_spec(pfInput, input_schema.inputs[0])
else:
raise MlflowException(
"This model contains a tensor-based model signature with no input names,"
" which suggests a numpy array input, but an input of type {0} was"
" found.".format(type(pfInput))
)
return new_pfInput
def _enforce_schema(pfInput: PyFuncInput, input_schema: Schema):
"""
Enforces the provided input matches the model's input schema,
For signatures with input names, we check there are no missing inputs and reorder the inputs to
match the ordering declared in schema if necessary. Any extra columns are ignored.
For column-based signatures, we make sure the types of the input match the type specified in
the schema or if it can be safely converted to match the input schema.
For tensor-based signatures, we make sure the shape and type of the input matches the shape
and type specified in model's input schema.
"""
if not input_schema.is_tensor_spec():
if isinstance(pfInput, (list, np.ndarray, dict)):
try:
pfInput = pandas.DataFrame(pfInput)
except Exception as e:
raise MlflowException(
"This model contains a column-based signature, which suggests a DataFrame"
" input. There was an error casting the input data to a DataFrame:"
" {0}".format(str(e))
)
if not isinstance(pfInput, pandas.DataFrame):
raise MlflowException(
"Expected input to be DataFrame or list. Found: %s" % type(pfInput).__name__
)
if input_schema.has_input_names():
# make sure there are no missing columns
input_names = input_schema.input_names()
expected_cols = set(input_names)
actual_cols = set()
if len(expected_cols) == 1 and isinstance(pfInput, np.ndarray):
# for schemas with a single column, match input with column
pfInput = {input_names[0]: pfInput}
actual_cols = expected_cols
elif isinstance(pfInput, pandas.DataFrame):
actual_cols = set(pfInput.columns)
elif isinstance(pfInput, dict):
actual_cols = set(pfInput.keys())
missing_cols = expected_cols - actual_cols
extra_cols = actual_cols - expected_cols
# Preserve order from the original columns, since missing/extra columns are likely to
# be in same order.
missing_cols = [c for c in input_names if c in missing_cols]
extra_cols = [c for c in actual_cols if c in extra_cols]
if missing_cols:
raise MlflowException(
"Model is missing inputs {0}."
" Note that there were extra inputs: {1}".format(missing_cols, extra_cols)
)
elif not input_schema.is_tensor_spec():
# The model signature does not specify column names => we can only verify column count.
num_actual_columns = len(pfInput.columns)
if num_actual_columns < len(input_schema.inputs):
raise MlflowException(
"Model inference is missing inputs. The model signature declares "
"{0} inputs but the provided value only has "
"{1} inputs. Note: the inputs were not named in the signature so we can "
"only verify their count.".format(len(input_schema.inputs), num_actual_columns)
)
return (
_enforce_tensor_schema(pfInput, input_schema)
if input_schema.is_tensor_spec()
else _enforce_col_schema(pfInput, input_schema)
)
class PyFuncModel:
"""
MLflow 'python function' model.
Wrapper around model implementation and metadata. This class is not meant to be constructed
directly. Instead, instances of this class are constructed and returned from
:py:func:`load_model() <mlflow.pyfunc.load_model>`.
``model_impl`` can be any Python object that implements the `Pyfunc interface
<https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-inference-api>`_, and is
returned by invoking the model's ``loader_module``.
``model_meta`` contains model metadata loaded from the MLmodel file.
"""
def __init__(self, model_meta: Model, model_impl: Any):
if not hasattr(model_impl, "predict"):
raise MlflowException("Model implementation is missing required predict method.")
if not model_meta:
raise MlflowException("Model is missing metadata.")
self._model_meta = model_meta
self._model_impl = model_impl
def predict(self, data: PyFuncInput) -> PyFuncOutput:
"""
Generate model predictions.
If the model contains signature, enforce the input schema first before calling the model
implementation with the sanitized input. If the pyfunc model does not include model schema,
the input is passed to the model implementation as is. See `Model Signature Enforcement
<https://www.mlflow.org/docs/latest/models.html#signature-enforcement>`_ for more details."
:param data: Model input as one of pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], or
Dict[str, numpy.ndarray]
:return: Model predictions as one of pandas.DataFrame, pandas.Series, numpy.ndarray or list.
"""
input_schema = self.metadata.get_input_schema()
if input_schema is not None:
data = _enforce_schema(data, input_schema)
return self._model_impl.predict(data)
@property
def metadata(self):
"""Model metadata."""
if self._model_meta is None:
raise MlflowException("Model is missing metadata.")
return self._model_meta
def __repr__(self):
info = {}
if self._model_meta is not None:
if hasattr(self._model_meta, "run_id") and self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if (
hasattr(self._model_meta, "artifact_path")
and self._model_meta.artifact_path is not None
):
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump({"mlflow.pyfunc.loaded_model": info}, default_flow_style=False)
def _warn_dependency_requirement_mismatches(model_path):
"""
Inspects the model's dependencies and prints a warning if the current Python environment
doesn't satisfy them.
"""
req_file_path = os.path.join(model_path, _REQUIREMENTS_FILE_NAME)
if not os.path.exists(req_file_path):
return
try:
mismatch_infos = []
for req in _parse_requirements(req_file_path, is_constraint=False):
req_line = req.req_str
mismatch_info = _check_requirement_satisfied(req_line)
if mismatch_info is not None:
mismatch_infos.append(str(mismatch_info))
if len(mismatch_infos) > 0:
mismatch_str = " - " + "\n - ".join(mismatch_infos)
warning_msg = (
"Detected one or more mismatches between the model's dependencies and the current "
f"Python environment:\n{mismatch_str}\n"
"To fix the mismatches, call `mlflow.pyfunc.get_model_dependencies(model_uri)` "
"to fetch the model's environment and install dependencies using the resulting "
"environment file."
)
_logger.warning(warning_msg)
except Exception as e:
_logger.warning(
f"Encountered an unexpected error ({repr(e)}) while detecting model dependency "
"mismatches. Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
def load_model(
model_uri: str, suppress_warnings: bool = False, dst_path: str = None
) -> PyFuncModel:
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
"""
local_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
if not suppress_warnings:
_warn_dependency_requirement_mismatches(local_path)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
conf = model_meta.flavors.get(FLAVOR_NAME)
if conf is None:
raise MlflowException(
'Model does not have the "{flavor_name}" flavor'.format(flavor_name=FLAVOR_NAME),
RESOURCE_DOES_NOT_EXIST,
)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
_add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)
data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path
model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
return PyFuncModel(model_meta=model_meta, model_impl=model_impl)
def _download_model_conda_env(model_uri):
conda_yml_file_name = _get_flavor_configuration_from_uri(model_uri, FLAVOR_NAME)[ENV]
return _download_artifact_from_uri(append_to_uri_path(model_uri, conda_yml_file_name))
def _get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
if format == "pip":
req_file_uri = append_to_uri_path(model_uri, _REQUIREMENTS_FILE_NAME)
try:
return _download_artifact_from_uri(req_file_uri)
except Exception as e:
# fallback to download conda.yaml file and parse the "pip" section from it.
_logger.info(
f"Downloading model '{_REQUIREMENTS_FILE_NAME}' file failed, error is {repr(e)}. "
"Falling back to fetching pip requirements from the model's 'conda.yaml' file. "
"Other conda dependencies will be ignored."
)
conda_yml_path = _download_model_conda_env(model_uri)
with open(conda_yml_path, "r") as yf:
conda_yml = yaml.safe_load(yf)
conda_deps = conda_yml.get("dependencies", [])
for index, dep in enumerate(conda_deps):
if isinstance(dep, dict) and "pip" in dep:
pip_deps_index = index
break
else:
raise MlflowException(
"No pip section found in conda.yaml file in the model directory.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
pip_deps = conda_deps.pop(pip_deps_index)["pip"]
tmp_dir = tempfile.mkdtemp()
pip_file_path = os.path.join(tmp_dir, _REQUIREMENTS_FILE_NAME)
with open(pip_file_path, "w") as f:
f.write("\n".join(pip_deps) + "\n")
if len(conda_deps) > 0:
_logger.warning(
"The following conda dependencies have been excluded from the environment file:"
f" {", ".join(conda_deps)}."
)
return pip_file_path
elif format == "conda":
conda_yml_path = _download_model_conda_env(model_uri)
return conda_yml_path
else:
raise MlflowException(
f"Illegal format argument '{format}'.", error_code=INVALID_PARAMETER_VALUE
)
def get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
"""
:param model_uri: The uri of the model to get dependencies from.
:param format: The format of the returned dependency file. If the ``"pip"`` format is
specified, the path to a pip ``requirements.txt`` file is returned.
If the ``"conda"`` format is specified, the path to a ``"conda.yaml"``
file is returned . If the ``"pip"`` format is specified but the model
was not saved with a ``requirements.txt`` file, the ``pip`` section
of the model's ``conda.yaml`` file is extracted instead, and any
additional conda dependencies are ignored. Default value is ``"pip"``.
:return: The local filesystem path to either a pip ``requirements.txt`` file
(if ``format="pip"``) or a ``conda.yaml`` file (if ``format="conda"``)
specifying the model's dependencies.
"""
dep_file = _get_model_dependencies(model_uri, format)
if format == "pip":
prefix = "%" if _is_in_ipython_notebook() else ""
_logger.info(
"To install the dependencies that were used to train the model, run the "
f"following command: '{prefix}pip install -r {dep_file}'."
)
return dep_file
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION,
)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version,
PYTHON_VERSION,
)
def _create_model_downloading_tmp_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
root_model_cache_dir = os.path.join(root_tmp_dir, "models")
os.makedirs(root_model_cache_dir, exist_ok=True)
tmp_model_dir = tempfile.mkdtemp(dir=root_model_cache_dir)
# mkdtemp creates a directory with permission 0o700
# change it to be 0o777 to ensure it can be seen in spark UDF
os.chmod(tmp_model_dir, 0o777)
return tmp_model_dir
@cache_return_value_per_process
def _get_or_create_env_root_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
env_root_dir = os.path.join(root_tmp_dir, "envs")
os.makedirs(env_root_dir, exist_ok=True)
return env_root_dir
_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:param env_manager: The environment manager to use in order to create the python environment
for model inference. Note that environment is only restored in the context
of the PySpark UDF; the software environment outside of the UDF is
unaffected. Default value is ``local``, and the following values are
supported:
- ``conda``: (Recommended) Use Conda to restore the software environment
that was used to train the model.
- ``virtualenv``: Use virtualenv to restore the python environment that
was used to train the model.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
_EnvManager.validate(env_manager)
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
nfs_root_dir = get_nfs_cache_root_dir()
should_use_nfs = nfs_root_dir is not None
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
env_root_dir = _get_or_create_env_root_dir(should_use_nfs)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any(isinstance(elem_type, x) for x in supported_types):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_create_model_downloading_tmp_dir(should_use_nfs)
)
if env_manager == _EnvManager.LOCAL:
# Assume spark executor python environment is the same with spark driver side.
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
# Note: In databricks runtime, because databricks notebook cell output cannot capture
# child process output, so that set capture_output to be True so that when `conda prepare
# env` command failed, the exception message will include command stdout/stderr output.
# Otherwise user have to check cluster driver log to find command stdout/stderr output.
# In non-databricks runtime, set capture_output to be False, because the benefit of
# "capture_output=False" is the output will be printed immediately, otherwise you have
# to wait conda command fail and suddenly get all output printed (included in error
# message).
if env_manager != _EnvManager.LOCAL:
_get_flavor_backend(
local_model_path,
env_manager=env_manager,
install_mlflow=False,
env_root_dir=env_root_dir,
).prepare_env(model_uri=local_model_path, capture_output=is_in_databricks_runtime())
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
if env_manager != _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Create individual conda_env_root_dir for each spark UDF task process.
env_root_dir_on_executor = _get_or_create_env_root_dir(should_use_nfs)
else:
local_model_path_on_executor = local_model_path
env_root_dir_on_executor = env_root_dir
pyfunc_backend = _get_flavor_backend(
local_model_path_on_executor,
workers=1,
install_mlflow=False,
env_manager=env_manager,
env_root_dir=env_root_dir_on_executor,
)
if should_use_spark_to_broadcast_file:
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
# Set "capture_output" so that if "conda env create" command failed, the command
# stdout/stderr output will be attached to the exception message and included in
# driver side exception.
pyfunc_backend.prepare_env(
model_uri=local_model_path_on_executor, capture_output=True
)
# launch scoring server
server_port = find_free_port()
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
timeout=60,
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager == _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def save_model(
path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
mlflow_model=None,
python_model=None,
artifacts=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
**kwargs,
):
"""
save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\
mlflow_model=Model(), python_model=None, artifacts=None)
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the
parameters for the first workflow: ``python_model``, ``artifacts``, cannot be
specified together.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
"""
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
mlflow_model = kwargs.pop("model", mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
if code_path is not None:
if not isinstance(code_path, list):
raise TypeError("Argument code_path should be a list, not {}".format(type(code_path)))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any(item is not None for item in first_argument_set.values())
second_argument_set_specified = any(item is not None for item in second_argument_set.values())
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set,
)
),
error_code=INVALID_PARAMETER_VALUE,
)
elif (loader_module is None) and (python_model is None):
msg = (
"Either `loader_module` or `python_model` must be specified. A `loader_module` "
"should be a python module. A `python_model` should be a subclass of PythonModel"
)
raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)
_validate_and_prepare_target_save_path(path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path,
loader_module=loader_module,
data_path=data_path,
code_paths=code_path,
conda_env=conda_env,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
code_paths=code_path,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def log_model(
artifact_path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
python_model=None,
artifacts=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``
and the parameters for the first workflow: ``python_model``, ``artifacts`` together.
:param artifact_path: The run-relative artifact path to which to log the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=code_dir_subpath,
data=data,
env=_CONDA_ENV_FILE_NAME,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
| """
The ``python_function`` model flavor serves as a default model interface for MLflow Python models.
Any MLflow Python model is expected to be loadable as a ``python_function`` model.
In addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format
<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from
this format. The format is self contained in the sense that it includes all necessary information
for anyone to load it and use it. Dependencies are either stored directly with the model or
referenced via a Conda environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-inference-api:
*************
Inference API
*************
Python function models are loaded as an instance of :py:class:`PyFuncModel
<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model
metadata (MLmodel file). You can score the model by calling the :py:func:`predict()
<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray, scipy.sparse.(csc.csc_matrix | csr.csr_matrix),
List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
All PyFunc models will support `pandas.DataFrame` as input and DL PyFunc models will also support
tensor inputs in the form of Dict[str, numpy.ndarray] (named tensors) and `numpy.ndarrays`
(unnamed tensors).
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model implementation>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory. The model implementation is expected to be an object with a
``predict`` method with the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
::
tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
::
cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import tempfile
import signal
import sys
import numpy as np
import os
import pandas
import yaml
from copy import deepcopy
import logging
import threading
import collections
import subprocess
from typing import Any, Union, List, Dict, Iterator, Tuple
import mlflow
import mlflow.pyfunc.model
from mlflow.models import Model, ModelSignature, ModelInputExample
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.pyfunc.model import ( # pylint: disable=unused-import
PythonModel,
PythonModelContext,
get_default_conda_env,
)
from mlflow.pyfunc.model import get_default_pip_requirements
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import DataType, Schema, TensorSpec
from mlflow.types.utils import clean_tensor_type
from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version, _is_in_ipython_notebook
from mlflow.utils.annotations import deprecated
from mlflow.utils.file_utils import _copy_file_or_tree, write_to
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
_get_flavor_configuration_from_uri,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.uri import append_to_uri_path
from mlflow.utils.environment import (
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_PythonEnv,
)
from mlflow.utils import env_manager as _EnvManager
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.databricks_utils import is_in_databricks_runtime
from mlflow.utils.file_utils import get_or_create_tmp_dir, get_or_create_nfs_tmp_dir
from mlflow.utils.process import cache_return_value_per_process
from mlflow.exceptions import MlflowException
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from scipy.sparse import csc_matrix, csr_matrix
from mlflow.utils.requirements_utils import (
_check_requirement_satisfied,
_parse_requirements,
)
from mlflow.utils import find_free_port
from mlflow.utils.nfs_on_spark import get_nfs_cache_root_dir
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
PyFuncInput = Union[pandas.DataFrame, np.ndarray, csc_matrix, csr_matrix, List[Any], Dict[str, Any]]
PyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param req: pip requirements file.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
params = deepcopy(kwargs)
params[MAIN] = loader_module
params[PY_VERSION] = PYTHON_VERSION
if code:
params[CODE] = code
if data:
params[DATA] = data
if env:
params[ENV] = env
return model.add_flavor(FLAVOR_NAME, **params)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _enforce_mlflow_datatype(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. object -> string
2. int -> long (upcast)
3. float -> double (upcast)
4. int -> double (safe conversion)
5. np.datetime64[x] -> datetime (any precision)
6. object -> datetime
Any other type mismatch will raise error.
"""
if values.dtype == object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == object:
# NB: the object can contain any type and we currently cannot cast to pandas Strings
# due to how None is cast
return values
# NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand
# side of the comparison operator. It works, however, if pandas type is on the left hand side.
# That is because pandas is aware of numpy.
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:
# NB: datetime values have variable precision denoted by brackets, e.g. datetime64[ns]
# denotes nanosecond precision. Since MLflow datetime type is precision agnostic, we
# ignore precision when matching datetime columns.
return values
if t == DataType.datetime and values.dtype == object:
# NB: Pyspark date columns get converted to object when converted to a pandas
# DataFrame. To respect the original typing, we convert the column to datetime.
try:
return values.astype(np.datetime64, errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(name, values.dtype, t)
)
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
# allow (u)int => double conversion
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
def all_ints(xs):
return all(pandas.isnull(x) or int(x) == x for x in xs)
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.{3}".format(name, values.dtype, numpy_type, hint)
)
def _enforce_tensor_spec(
values: Union[np.ndarray, csc_matrix, csr_matrix], tensor_spec: TensorSpec
):
"""
Enforce the input tensor shape and type matches the provided tensor spec.
"""
expected_shape = tensor_spec.shape
actual_shape = values.shape
actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype
if len(expected_shape) != len(actual_shape):
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
for expected, actual in zip(expected_shape, actual_shape):
if expected == -1:
continue
if expected != actual:
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
if clean_tensor_type(actual_type) != tensor_spec.type:
raise MlflowException(
"dtype of input {0} does not match expected dtype {1}".format(
values.dtype, tensor_spec.type
)
)
return values
def _enforce_col_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input columns conform to the model's column-based signature."""
if input_schema.has_input_names():
input_names = input_schema.input_names()
else:
input_names = pfInput.columns[: len(input_schema.inputs)]
input_types = input_schema.input_types()
new_pfInput = pandas.DataFrame()
for i, x in enumerate(input_names):
new_pfInput[x] = _enforce_mlflow_datatype(x, pfInput[x], input_types[i])
return new_pfInput
def _enforce_tensor_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input tensor(s) conforms to the model's tensor-based signature."""
if input_schema.has_input_names():
if isinstance(pfInput, dict):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
if not isinstance(pfInput[col_name], np.ndarray):
raise MlflowException(
"This model contains a tensor-based model signature with input names,"
" which suggests a dictionary input mapping input name to a numpy"
" array, but a dict with value type {0} was found.".format(
type(pfInput[col_name])
)
)
new_pfInput[col_name] = _enforce_tensor_spec(pfInput[col_name], tensor_spec)
elif isinstance(pfInput, pandas.DataFrame):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
new_pfInput[col_name] = _enforce_tensor_spec(
np.array(pfInput[col_name], dtype=tensor_spec.type), tensor_spec
)
else:
raise MlflowException(
"This model contains a tensor-based model signature with input names, which"
" suggests a dictionary input mapping input name to tensor, but an input of"
" type {0} was found.".format(type(pfInput))
)
else:
if isinstance(pfInput, pandas.DataFrame):
new_pfInput = _enforce_tensor_spec(pfInput.to_numpy(), input_schema.inputs[0])
elif isinstance(pfInput, (np.ndarray, csc_matrix, csr_matrix)):
new_pfInput = _enforce_tensor_spec(pfInput, input_schema.inputs[0])
else:
raise MlflowException(
"This model contains a tensor-based model signature with no input names,"
" which suggests a numpy array input, but an input of type {0} was"
" found.".format(type(pfInput))
)
return new_pfInput
def _enforce_schema(pfInput: PyFuncInput, input_schema: Schema):
"""
Enforces the provided input matches the model's input schema,
For signatures with input names, we check there are no missing inputs and reorder the inputs to
match the ordering declared in schema if necessary. Any extra columns are ignored.
For column-based signatures, we make sure the types of the input match the type specified in
the schema or if it can be safely converted to match the input schema.
For tensor-based signatures, we make sure the shape and type of the input matches the shape
and type specified in model's input schema.
"""
if not input_schema.is_tensor_spec():
if isinstance(pfInput, (list, np.ndarray, dict)):
try:
pfInput = pandas.DataFrame(pfInput)
except Exception as e:
raise MlflowException(
"This model contains a column-based signature, which suggests a DataFrame"
" input. There was an error casting the input data to a DataFrame:"
" {0}".format(str(e))
)
if not isinstance(pfInput, pandas.DataFrame):
raise MlflowException(
"Expected input to be DataFrame or list. Found: %s" % type(pfInput).__name__
)
if input_schema.has_input_names():
# make sure there are no missing columns
input_names = input_schema.input_names()
expected_cols = set(input_names)
actual_cols = set()
if len(expected_cols) == 1 and isinstance(pfInput, np.ndarray):
# for schemas with a single column, match input with column
pfInput = {input_names[0]: pfInput}
actual_cols = expected_cols
elif isinstance(pfInput, pandas.DataFrame):
actual_cols = set(pfInput.columns)
elif isinstance(pfInput, dict):
actual_cols = set(pfInput.keys())
missing_cols = expected_cols - actual_cols
extra_cols = actual_cols - expected_cols
# Preserve order from the original columns, since missing/extra columns are likely to
# be in same order.
missing_cols = [c for c in input_names if c in missing_cols]
extra_cols = [c for c in actual_cols if c in extra_cols]
if missing_cols:
raise MlflowException(
"Model is missing inputs {0}."
" Note that there were extra inputs: {1}".format(missing_cols, extra_cols)
)
elif not input_schema.is_tensor_spec():
# The model signature does not specify column names => we can only verify column count.
num_actual_columns = len(pfInput.columns)
if num_actual_columns < len(input_schema.inputs):
raise MlflowException(
"Model inference is missing inputs. The model signature declares "
"{0} inputs but the provided value only has "
"{1} inputs. Note: the inputs were not named in the signature so we can "
"only verify their count.".format(len(input_schema.inputs), num_actual_columns)
)
return (
_enforce_tensor_schema(pfInput, input_schema)
if input_schema.is_tensor_spec()
else _enforce_col_schema(pfInput, input_schema)
)
class PyFuncModel:
"""
MLflow 'python function' model.
Wrapper around model implementation and metadata. This class is not meant to be constructed
directly. Instead, instances of this class are constructed and returned from
:py:func:`load_model() <mlflow.pyfunc.load_model>`.
``model_impl`` can be any Python object that implements the `Pyfunc interface
<https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-inference-api>`_, and is
returned by invoking the model's ``loader_module``.
``model_meta`` contains model metadata loaded from the MLmodel file.
"""
def __init__(self, model_meta: Model, model_impl: Any):
if not hasattr(model_impl, "predict"):
raise MlflowException("Model implementation is missing required predict method.")
if not model_meta:
raise MlflowException("Model is missing metadata.")
self._model_meta = model_meta
self._model_impl = model_impl
def predict(self, data: PyFuncInput) -> PyFuncOutput:
"""
Generate model predictions.
If the model contains signature, enforce the input schema first before calling the model
implementation with the sanitized input. If the pyfunc model does not include model schema,
the input is passed to the model implementation as is. See `Model Signature Enforcement
<https://www.mlflow.org/docs/latest/models.html#signature-enforcement>`_ for more details."
:param data: Model input as one of pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], or
Dict[str, numpy.ndarray]
:return: Model predictions as one of pandas.DataFrame, pandas.Series, numpy.ndarray or list.
"""
input_schema = self.metadata.get_input_schema()
if input_schema is not None:
data = _enforce_schema(data, input_schema)
return self._model_impl.predict(data)
@property
def metadata(self):
"""Model metadata."""
if self._model_meta is None:
raise MlflowException("Model is missing metadata.")
return self._model_meta
def __repr__(self):
info = {}
if self._model_meta is not None:
if hasattr(self._model_meta, "run_id") and self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if (
hasattr(self._model_meta, "artifact_path")
and self._model_meta.artifact_path is not None
):
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump({"mlflow.pyfunc.loaded_model": info}, default_flow_style=False)
def _warn_dependency_requirement_mismatches(model_path):
"""
Inspects the model's dependencies and prints a warning if the current Python environment
doesn't satisfy them.
"""
req_file_path = os.path.join(model_path, _REQUIREMENTS_FILE_NAME)
if not os.path.exists(req_file_path):
return
try:
mismatch_infos = []
for req in _parse_requirements(req_file_path, is_constraint=False):
req_line = req.req_str
mismatch_info = _check_requirement_satisfied(req_line)
if mismatch_info is not None:
mismatch_infos.append(str(mismatch_info))
if len(mismatch_infos) > 0:
mismatch_str = " - " + "\n - ".join(mismatch_infos)
warning_msg = (
"Detected one or more mismatches between the model's dependencies and the current "
f"Python environment:\n{mismatch_str}\n"
"To fix the mismatches, call `mlflow.pyfunc.get_model_dependencies(model_uri)` "
"to fetch the model's environment and install dependencies using the resulting "
"environment file."
)
_logger.warning(warning_msg)
except Exception as e:
_logger.warning(
f"Encountered an unexpected error ({repr(e)}) while detecting model dependency "
"mismatches. Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
def load_model(
model_uri: str, suppress_warnings: bool = False, dst_path: str = None
) -> PyFuncModel:
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
"""
local_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
if not suppress_warnings:
_warn_dependency_requirement_mismatches(local_path)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
conf = model_meta.flavors.get(FLAVOR_NAME)
if conf is None:
raise MlflowException(
'Model does not have the "{flavor_name}" flavor'.format(flavor_name=FLAVOR_NAME),
RESOURCE_DOES_NOT_EXIST,
)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
_add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)
data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path
model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
return PyFuncModel(model_meta=model_meta, model_impl=model_impl)
def _download_model_conda_env(model_uri):
conda_yml_file_name = _get_flavor_configuration_from_uri(model_uri, FLAVOR_NAME)[ENV]
return _download_artifact_from_uri(append_to_uri_path(model_uri, conda_yml_file_name))
def _get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
if format == "pip":
req_file_uri = append_to_uri_path(model_uri, _REQUIREMENTS_FILE_NAME)
try:
return _download_artifact_from_uri(req_file_uri)
except Exception as e:
# fallback to download conda.yaml file and parse the "pip" section from it.
_logger.info(
f"Downloading model '{_REQUIREMENTS_FILE_NAME}' file failed, error is {repr(e)}. "
"Falling back to fetching pip requirements from the model's 'conda.yaml' file. "
"Other conda dependencies will be ignored."
)
conda_yml_path = _download_model_conda_env(model_uri)
with open(conda_yml_path, "r") as yf:
conda_yml = yaml.safe_load(yf)
conda_deps = conda_yml.get("dependencies", [])
for index, dep in enumerate(conda_deps):
if isinstance(dep, dict) and "pip" in dep:
pip_deps_index = index
break
else:
raise MlflowException(
"No pip section found in conda.yaml file in the model directory.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
pip_deps = conda_deps.pop(pip_deps_index)["pip"]
tmp_dir = tempfile.mkdtemp()
pip_file_path = os.path.join(tmp_dir, _REQUIREMENTS_FILE_NAME)
with open(pip_file_path, "w") as f:
f.write("\n".join(pip_deps) + "\n")
if len(conda_deps) > 0:
_logger.warning(
"The following conda dependencies have been excluded from the environment file:"
f" {', '.join(conda_deps)}."
)
return pip_file_path
elif format == "conda":
conda_yml_path = _download_model_conda_env(model_uri)
return conda_yml_path
else:
raise MlflowException(
f"Illegal format argument '{format}'.", error_code=INVALID_PARAMETER_VALUE
)
def get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
"""
:param model_uri: The uri of the model to get dependencies from.
:param format: The format of the returned dependency file. If the ``"pip"`` format is
specified, the path to a pip ``requirements.txt`` file is returned.
If the ``"conda"`` format is specified, the path to a ``"conda.yaml"``
file is returned . If the ``"pip"`` format is specified but the model
was not saved with a ``requirements.txt`` file, the ``pip`` section
of the model's ``conda.yaml`` file is extracted instead, and any
additional conda dependencies are ignored. Default value is ``"pip"``.
:return: The local filesystem path to either a pip ``requirements.txt`` file
(if ``format="pip"``) or a ``conda.yaml`` file (if ``format="conda"``)
specifying the model's dependencies.
"""
dep_file = _get_model_dependencies(model_uri, format)
if format == "pip":
prefix = "%" if _is_in_ipython_notebook() else ""
_logger.info(
"To install the dependencies that were used to train the model, run the "
f"following command: '{prefix}pip install -r {dep_file}'."
)
return dep_file
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION,
)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version,
PYTHON_VERSION,
)
def _create_model_downloading_tmp_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
root_model_cache_dir = os.path.join(root_tmp_dir, "models")
os.makedirs(root_model_cache_dir, exist_ok=True)
tmp_model_dir = tempfile.mkdtemp(dir=root_model_cache_dir)
# mkdtemp creates a directory with permission 0o700
# change it to be 0o777 to ensure it can be seen in spark UDF
os.chmod(tmp_model_dir, 0o777)
return tmp_model_dir
@cache_return_value_per_process
def _get_or_create_env_root_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
env_root_dir = os.path.join(root_tmp_dir, "envs")
os.makedirs(env_root_dir, exist_ok=True)
return env_root_dir
_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:param env_manager: The environment manager to use in order to create the python environment
for model inference. Note that environment is only restored in the context
of the PySpark UDF; the software environment outside of the UDF is
unaffected. Default value is ``local``, and the following values are
supported:
- ``conda``: (Recommended) Use Conda to restore the software environment
that was used to train the model.
- ``virtualenv``: Use virtualenv to restore the python environment that
was used to train the model.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
_EnvManager.validate(env_manager)
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
nfs_root_dir = get_nfs_cache_root_dir()
should_use_nfs = nfs_root_dir is not None
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
env_root_dir = _get_or_create_env_root_dir(should_use_nfs)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any(isinstance(elem_type, x) for x in supported_types):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_create_model_downloading_tmp_dir(should_use_nfs)
)
if env_manager == _EnvManager.LOCAL:
# Assume spark executor python environment is the same with spark driver side.
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
# Note: In databricks runtime, because databricks notebook cell output cannot capture
# child process output, so that set capture_output to be True so that when `conda prepare
# env` command failed, the exception message will include command stdout/stderr output.
# Otherwise user have to check cluster driver log to find command stdout/stderr output.
# In non-databricks runtime, set capture_output to be False, because the benefit of
# "capture_output=False" is the output will be printed immediately, otherwise you have
# to wait conda command fail and suddenly get all output printed (included in error
# message).
if env_manager != _EnvManager.LOCAL:
_get_flavor_backend(
local_model_path,
env_manager=env_manager,
install_mlflow=False,
env_root_dir=env_root_dir,
).prepare_env(model_uri=local_model_path, capture_output=is_in_databricks_runtime())
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
if env_manager != _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Create individual conda_env_root_dir for each spark UDF task process.
env_root_dir_on_executor = _get_or_create_env_root_dir(should_use_nfs)
else:
local_model_path_on_executor = local_model_path
env_root_dir_on_executor = env_root_dir
pyfunc_backend = _get_flavor_backend(
local_model_path_on_executor,
workers=1,
install_mlflow=False,
env_manager=env_manager,
env_root_dir=env_root_dir_on_executor,
)
if should_use_spark_to_broadcast_file:
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
# Set "capture_output" so that if "conda env create" command failed, the command
# stdout/stderr output will be attached to the exception message and included in
# driver side exception.
pyfunc_backend.prepare_env(
model_uri=local_model_path_on_executor, capture_output=True
)
# launch scoring server
server_port = find_free_port()
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
timeout=60,
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager == _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def save_model(
path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
mlflow_model=None,
python_model=None,
artifacts=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
**kwargs,
):
"""
save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\
mlflow_model=Model(), python_model=None, artifacts=None)
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the
parameters for the first workflow: ``python_model``, ``artifacts``, cannot be
specified together.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
"""
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
mlflow_model = kwargs.pop("model", mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
if code_path is not None:
if not isinstance(code_path, list):
raise TypeError("Argument code_path should be a list, not {}".format(type(code_path)))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any(item is not None for item in first_argument_set.values())
second_argument_set_specified = any(item is not None for item in second_argument_set.values())
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set,
)
),
error_code=INVALID_PARAMETER_VALUE,
)
elif (loader_module is None) and (python_model is None):
msg = (
"Either `loader_module` or `python_model` must be specified. A `loader_module` "
"should be a python module. A `python_model` should be a subclass of PythonModel"
)
raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)
_validate_and_prepare_target_save_path(path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path,
loader_module=loader_module,
data_path=data_path,
code_paths=code_path,
conda_env=conda_env,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
code_paths=code_path,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def log_model(
artifact_path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
python_model=None,
artifacts=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``
and the parameters for the first workflow: ``python_model``, ``artifacts`` together.
:param artifact_path: The run-relative artifact path to which to log the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=code_dir_subpath,
data=data,
env=_CONDA_ENV_FILE_NAME,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
|
""" Script for cleaning data for 12 month evaluation. """
import os
import re
import sys
import numpy as np
import pandas as pd
from pprint import pprint
from glob import glob
from typing import List, Dict
from delphi.utils.shell import cd
from delphi.paths import data_dir, south_sudan_data
from delphi.utils.fp import grouper
from functools import partial
from itertools import groupby
def get_state_from_filename(filename, get_state_func):
return " ".join(re.findall("[A-Z][^A-Z]*", get_state_func(filename)))
def process_file_with_single_table(
filename, variable_name_func, get_state_func, country="South Sudan"
):
records = []
df = pd.read_csv(
filename, index_col=0, names=range(12), header=0, skipinitialspace=True
)
for ind in df.index:
for column in df.columns:
record = {
"Variable": variable_name_func(ind),
"Month": column + 1,
"Value": df.loc[ind][column],
"State": get_state_from_filename(filename, get_state_func),
"Country": country,
}
set_defaults(record)
records.append(record)
return records
def set_climis_south_sudan_default_params(
filename, df, get_state_func=lambda x: x.split("_")[-2]
):
df["Country"] = "South Sudan"
df["Source"] = "CLiMIS"
df["Year"] = int(filename.split(".")[0].split("_")[-1])
df["State"] = get_state_from_filename(filename, get_state_func)
return df
def make_livestock_prices_table(filename):
df = pd.read_csv(
filename,
index_col=[0, 1],
header=0,
names=["County", "Market"] + list(range(1, 13)),
skipinitialspace=True,
thousands=",",
)
df = df.stack().reset_index(name="Value")
df.columns = ["County", "Market", "Month", "Value"]
df = df.pivot_table(values="Value", index=["County", "Month"])
df = set_climis_south_sudan_default_params(filename, df)
df["Unit"] = "SSP"
df["Variable"] = f"Average price of {filename.split("_")[-3].lower()}"
df = df.reset_index()
return df
def set_defaults(record: Dict):
record.update(
{
"Year": 2017,
"Country": "South Sudan",
"Unit": "%",
"Source": "CLiMIS",
"County": None,
}
)
def make_group_dict(groups):
return {k[0][0]: g for k, g in grouper(groups, 2)}
def make_df_from_group(k, v, index_func):
df = pd.DataFrame(v)
df.set_index(0, inplace=True)
df.index = [index_func(k, i) for i in df.index]
df = df.stack().reset_index(name="Value")
df.columns = ["Variable", "Month", "Value"]
df["Month"] = df["Month"].astype(int)
return df
def process_file_with_multiple_tables(filename, header_dict):
dfs = []
df = pd.read_csv(filename, index_col=0, names=range(12), header=0)
# Define a grouping key function to split the CSV by the header rows
grouping_key_function = lambda _tuple: _tuple[1][1:].isna().all()
iterrows = filter(lambda r: r[1][0] != "", df.iterrows())
key_group_tuples = groupby(iterrows, grouping_key_function)
groups = [
[
[x[0].strip()] + x[1].values.tolist()
for x in list(g)
if isinstance(x[0], str)
]
for k, g in key_group_tuples
]
for k, v in make_group_dict(groups).items():
if v is not None:
df = make_df_from_group(
k, v, lambda k, i: header_dict.get(k.strip(), lambda x: k)(i)
)
df["Value"] = df["Value"].replace(" ", np.nan)
df = df.dropna()
df["County"] = None
df = set_climis_south_sudan_default_params(filename, df)
if len(df.Value.values) > 0 and any(
map(lambda v: "%" in v, df["Value"].values)
):
df.Value = df.Value.str.replace("%", "")
df["Unit"] = "%"
else:
df["Unit"] = None
if len(df["Variable"].values) > 0:
if "SSP" in df["Variable"].values[0]:
df["Variable"] = (
df["Variable"].str.replace("\(SSP\)", "").str.strip()
)
df["Unit"] = "SSP"
if len(df.Value.values) > 0 and "-" in df.Value.values[0]:
# For percentage ranges, take the mean value
df.Value = (
df.Value.str.strip()
.str.split("-")
.map(lambda x: list(map(float, x)))
.map(lambda x: np.mean(x))
)
dfs.append(df)
if len(dfs) > 0:
return pd.concat(dfs)
else:
return None
def process_climis_crop_production_data(data_dir: str):
""" Process CLiMIS crop production data """
climis_crop_production_csvs = glob(
"{data_dir}/Climis South Sudan Crop Production Data/"
"Crops_EstimatedProductionConsumptionBalance*.csv"
)
state_county_df = pd.read_csv(
f"data/south_sudan_data_fewsnet.tsv", skipinitialspace=True
)
combined_records = []
for f in climis_crop_production_csvs:
year = int(f.split("/")[-1].split("_")[2].split(".")[0])
df = pd.read_csv(f).dropna()
for i, r in df.iterrows():
record = {
"Year": year,
"Month": None,
"Source": "CLiMIS",
"Country": "South Sudan",
}
region = r["State/County"].strip()
if region.lower() in state_county_df["State"].str.lower().values:
record["State"] = region
record["County"] = None
else:
potential_states = state_county_df.loc[
state_county_df["County"] == region
]["State"]
record["State"] = (
potential_states.iloc[0]
if len(potential_states) != 0
else None
)
record["County"] = region
for field in r.index:
if field != "State/County":
if "Net Cereal production" in field:
record["Variable"] = "Net Cereal Production"
record["Value"] = r[field]
if field.split()[-1].startswith("("):
record["Unit"] = field.split()[-1][1:-1].lower()
else:
record["Unit"] = None
combined_records.append(record)
df = pd.DataFrame(combined_records)
return df
def process_climis_livestock_data(data_dir: str):
""" Process CLiMIS livestock data. """
records = []
livestock_data_dir = f"{data_dir}/Climis South Sudan Livestock Data"
for filename in glob(
f"{livestock_data_dir}/Livestock Body Condition/*2017.csv"
):
records += process_file_with_single_table(
filename,
lambda ind: f"Percentage of {filename.split("_")[-3].lower()} with body condition {ind.lower()}",
lambda f: f.split("_")[-2],
)
for filename in glob(
f"{livestock_data_dir}/Livestock Production/*2017.csv"
):
records += process_file_with_single_table(
filename,
lambda ind: "Percentage of householding at least milking one of their livestocks",
lambda f: f.split("_")[1],
)
disease_acronym_dict = {
"FMD": "Foot and Mouth Disease (FMD)",
"LSD": "Lumpy Skin Disease (LSD)",
"CBPP": "Contagious Bovine Pleuropneumonia (CBPP)",
"CCPP": "Contagious Caprine Pleuropneumonia (CCPP)",
"NC": "NC",
"PPR": "Peste des Petits Ruminants (PPR)",
"Others": "Other diseases",
}
func = (
lambda k, i: f"Percentage of livestock with {disease_acronym_dict[k]} that are {i.lower().strip()}"
)
livestock_disease_header_dict = {
k: partial(func, k) for k in disease_acronym_dict
}
livestock_migration_header_dict = {
"Livestock migration": lambda i: f"Percentage of livestock migrating {i.split()[-1].lower()}",
"Distance covered": lambda i: "Distance covered by migrating livestock",
"Proportion of livestock that migrated": lambda i: "Percentage of livestock that migrated",
"Migration normal at this time of the year": lambda i: f"Migration normal at this time of year, {i}",
"Duration in months when the migrated animals are expected to be back after": lambda i: "Duration in months when the migrated animals are expected to be back after",
"Reasons for livestock migration": lambda i: f"Percentage of livestock migrating due to {i.lower()}",
}
def process_directory(dirname, header_dict):
return pd.concat(
[
df
for df in [
process_file_with_multiple_tables(f, header_dict)
for f in glob(f"{livestock_data_dir}/{dirname}/*2017.csv")
]
if df is not None
]
)
func2 = (
lambda k, i: f"{k.replace("animals", i.lower()).replace("stock", "stock of "+i.lower()).replace("animal", i.lower())}"
)
livestock_ownership_headers = [
"Average current stock per household",
"Average number of animals born per household during last 4 weeks",
"Average number of animals acquired per household during last 4 weeks (dowry, purchase, gift)",
"Average number of animals given out as bride price/gift per household during last 4 weeks per household",
"Average number of animals sold per household during last 4 weeks household",
"Average price of animal sold (SSP)",
"Average number of animals exchanged for grain per household during last 4 weeks",
"Average number of animals died/slaughtered/lost per household during last 4 weeks",
]
livestock_ownership_header_dict = {
k: partial(func2, k) for k in livestock_ownership_headers
}
ownership_df = process_directory(
"Livestock Ownership", livestock_ownership_header_dict
)
disease_df = process_directory(
"Livestock Diseases", livestock_disease_header_dict
)
livestock_migration_df = process_directory(
"Livestock Migration", livestock_migration_header_dict
)
livestock_pasture_header_dict = {
"Pasture condtion": lambda i: f"Percentage of livestock pasture in {i.lower()} condition",
"Pasture condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} condition compared to a similar time in a normal year",
"Browse condition": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition",
"Browse condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition compared to a similar time in a normal year",
"Presence of constraints in accessing forage": lambda i: f"Percentage reporting the {("presence" if i=="Yes" else "absence")} of constraints in accessing forage",
"Main forage constraints": lambda i: f"Percentage reporting {i.lower()} as the main forage constraint",
}
livestock_pasture_df = process_directory(
"Livestock Pasture", livestock_pasture_header_dict
)
livestock_water_sources_header_dict = {
"Main water sources": lambda i: f"Percentage of livestock whose main water source is {i.lower()}",
"Number of days livestock have been watered in the last 7 days": lambda i: f"Number of days {i.lower()} have been watered in the last 7 days",
}
livestock_water_sources_df = process_directory(
"Livestock Water Sources", livestock_water_sources_header_dict
)
for filename in glob(f"{livestock_data_dir}/Livestock Loss/*2017.csv"):
records += process_file_with_single_table(
filename,
lambda ind: f"Percentage of {filename.split("_")[-3].lower()} loss accounted for by {ind.lower()}",
lambda f: f.split("_")[-2],
)
for record in records:
if isinstance(record["Value"], str):
record["Value"] = record["Value"].replace("%", "")
livestock_prices_df = pd.concat(
[
make_livestock_prices_table(f)
for f in glob(
f"{livestock_data_dir}/Livestock Market Prices/*2017.csv"
)
]
)
climis_livestock_data_df = pd.concat(
[
pd.DataFrame(records),
disease_df,
ownership_df,
livestock_prices_df,
livestock_migration_df,
livestock_pasture_df,
livestock_water_sources_df,
],
sort=True
)
return climis_livestock_data_df
def process_climis_import_data(data_dir: str) -> pd.DataFrame:
dfs = []
for f in glob(f"{data_dir}/CLiMIS Import Data/*.csv"):
df = pd.read_csv(f, names=range(1, 13), header=0, thousands=",")
df = df.stack().reset_index(name="Value")
df.columns = ["Year", "Month", "Value"]
df["Month"] = df["Month"].astype(int)
df["Year"] = df["Year"].astype(int)
dfs.append(df)
df = (
pd.concat(dfs)
.pivot_table(values="Value", index=["Year", "Month"], aggfunc=np.sum)
.reset_index()
)
df.columns = ["Year", "Month", "Value"]
df["Variable"] = "Total amount of cereal grains imported"
df["Unit"] = "metric tonne"
df["Country"] = "South Sudan"
df["County"] = None
df["State"] = None
return df
def process_climis_rainfall_data(data_dir: str) -> pd.DataFrame:
dfs = []
# Read CSV files first
for f in glob(f"{data_dir}/CLiMIS South Sudan Rainfall Data in"
" Millimeters/*.csv"):
# Get the name of the table without path and extension
table_name = os.path.basename(f)[:-4]
# Get state and year from groups
pattern = r'^(.*) ([0-9]+) Rainfall'
state, year = re.match(pattern, table_name).groups()
df = pd.read_csv(f, header=0, thousands=",")
cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',
'State', 'County', 'Country']
df_new = pd.DataFrame(columns=cols)
df_new['Month'] = range(1, 13)
df_new['Year'] = int(year)
df_new['Value'] = df['monthly rainfall data ']
df_new['Variable'] = 'Rainfall'
df_new['Unit'] = 'millimeters'
df_new['County'] = None
df_new['State'] = state
df_new['Source'] = 'CLiMIS'
df_new['Country'] = 'South Sudan'
dfs.append(df_new)
df1 = pd.concat(dfs)
# Read XLSX file next
fname = f'{data_dir}/CLiMIS South Sudan Rainfall Data in Millimeters/' + \
'Rainfall-Early_Warning_6month_Summary-2017-data_table.xlsx'
df = pd.read_excel(fname, sheet_name='Rainfall Data', header=1)
cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',
'State', 'County', 'Country']
df_new = pd.DataFrame(columns=cols)
states = []
counties = []
years = []
months = []
values = []
for row in df.itertuples():
state, county, year = row[1:4]
for month in range(1,13):
value = row[3 + month]
if pd.isnull(value):
continue
states.append(state)
counties.append(county)
years.append(year)
months.append(month)
values.append(value)
df_new['Year'] = years
df_new['Month'] = months
df_new['Value'] = values
df_new['County'] = counties
df_new['State'] = states
df_new['Variable'] = 'Rainfall'
df_new['Unit'] = 'millimeters'
df_new['Source'] = 'CLiMIS'
df_new['Country'] = 'South Sudan'
df = pd.concat([df1, df_new])
return df
def process_UNHCR_data(data_dir: str):
df = pd.read_table(f"{data_dir}/UNHCR Refugee Data/RefugeeData.tsv",
index_col=0,
parse_dates=True, infer_datetime_format=True)
df["Year"] = df.index.year
df["Month"] = df.index.month
df.rename(columns = {"individuals":"Value"}, inplace=True)
df["Country"] = "South Sudan"
df["State"] = None
df["County"] = None
df["Source"] = "UNHCR"
df["Unit"] = None
df["Variable"] = "Number of refugees"
del df["unix_timestamp"]
return df
def create_combined_table(data_dir: str, columns: List[str]) -> pd.DataFrame:
climis_crop_production_df = process_climis_crop_production_data(data_dir)
climis_livestock_data_df = process_climis_livestock_data(data_dir)
climis_import_data_df = process_climis_import_data(data_dir)
climis_rainfall_data_df = process_climis_rainfall_data(data_dir)
UNHCR_data_df = process_UNHCR_data(data_dir)
# Severe acute malnutrition and inflation rate indicators from PDFs
pdf_indicators_df = pd.read_table(f"{data_dir}/indicator_data_from_pdfs.tsv")
df = pd.concat(
[
climis_crop_production_df,
climis_livestock_data_df,
climis_import_data_df,
climis_rainfall_data_df,
pdf_indicators_df,
UNHCR_data_df,
],
sort=True,
)
return df[columns]
if __name__ == "__main__":
columns = [
"Variable",
"Year",
"Month",
"Value",
"Unit",
"Source",
"State",
"County",
"Country",
]
data_dir = str(data_dir / "raw" / "wm_12_month_evaluation")
df = create_combined_table(data_dir, columns)
df["Year"] = df["Year"].astype(int)
df.to_csv(sys.argv[1], index=False, sep="\t")
| """ Script for cleaning data for 12 month evaluation. """
import os
import re
import sys
import numpy as np
import pandas as pd
from pprint import pprint
from glob import glob
from typing import List, Dict
from delphi.utils.shell import cd
from delphi.paths import data_dir, south_sudan_data
from delphi.utils.fp import grouper
from functools import partial
from itertools import groupby
def get_state_from_filename(filename, get_state_func):
return " ".join(re.findall("[A-Z][^A-Z]*", get_state_func(filename)))
def process_file_with_single_table(
filename, variable_name_func, get_state_func, country="South Sudan"
):
records = []
df = pd.read_csv(
filename, index_col=0, names=range(12), header=0, skipinitialspace=True
)
for ind in df.index:
for column in df.columns:
record = {
"Variable": variable_name_func(ind),
"Month": column + 1,
"Value": df.loc[ind][column],
"State": get_state_from_filename(filename, get_state_func),
"Country": country,
}
set_defaults(record)
records.append(record)
return records
def set_climis_south_sudan_default_params(
filename, df, get_state_func=lambda x: x.split("_")[-2]
):
df["Country"] = "South Sudan"
df["Source"] = "CLiMIS"
df["Year"] = int(filename.split(".")[0].split("_")[-1])
df["State"] = get_state_from_filename(filename, get_state_func)
return df
def make_livestock_prices_table(filename):
df = pd.read_csv(
filename,
index_col=[0, 1],
header=0,
names=["County", "Market"] + list(range(1, 13)),
skipinitialspace=True,
thousands=",",
)
df = df.stack().reset_index(name="Value")
df.columns = ["County", "Market", "Month", "Value"]
df = df.pivot_table(values="Value", index=["County", "Month"])
df = set_climis_south_sudan_default_params(filename, df)
df["Unit"] = "SSP"
df["Variable"] = f"Average price of {filename.split('_')[-3].lower()}"
df = df.reset_index()
return df
def set_defaults(record: Dict):
record.update(
{
"Year": 2017,
"Country": "South Sudan",
"Unit": "%",
"Source": "CLiMIS",
"County": None,
}
)
def make_group_dict(groups):
return {k[0][0]: g for k, g in grouper(groups, 2)}
def make_df_from_group(k, v, index_func):
df = pd.DataFrame(v)
df.set_index(0, inplace=True)
df.index = [index_func(k, i) for i in df.index]
df = df.stack().reset_index(name="Value")
df.columns = ["Variable", "Month", "Value"]
df["Month"] = df["Month"].astype(int)
return df
def process_file_with_multiple_tables(filename, header_dict):
dfs = []
df = pd.read_csv(filename, index_col=0, names=range(12), header=0)
# Define a grouping key function to split the CSV by the header rows
grouping_key_function = lambda _tuple: _tuple[1][1:].isna().all()
iterrows = filter(lambda r: r[1][0] != "", df.iterrows())
key_group_tuples = groupby(iterrows, grouping_key_function)
groups = [
[
[x[0].strip()] + x[1].values.tolist()
for x in list(g)
if isinstance(x[0], str)
]
for k, g in key_group_tuples
]
for k, v in make_group_dict(groups).items():
if v is not None:
df = make_df_from_group(
k, v, lambda k, i: header_dict.get(k.strip(), lambda x: k)(i)
)
df["Value"] = df["Value"].replace(" ", np.nan)
df = df.dropna()
df["County"] = None
df = set_climis_south_sudan_default_params(filename, df)
if len(df.Value.values) > 0 and any(
map(lambda v: "%" in v, df["Value"].values)
):
df.Value = df.Value.str.replace("%", "")
df["Unit"] = "%"
else:
df["Unit"] = None
if len(df["Variable"].values) > 0:
if "SSP" in df["Variable"].values[0]:
df["Variable"] = (
df["Variable"].str.replace("\(SSP\)", "").str.strip()
)
df["Unit"] = "SSP"
if len(df.Value.values) > 0 and "-" in df.Value.values[0]:
# For percentage ranges, take the mean value
df.Value = (
df.Value.str.strip()
.str.split("-")
.map(lambda x: list(map(float, x)))
.map(lambda x: np.mean(x))
)
dfs.append(df)
if len(dfs) > 0:
return pd.concat(dfs)
else:
return None
def process_climis_crop_production_data(data_dir: str):
""" Process CLiMIS crop production data """
climis_crop_production_csvs = glob(
"{data_dir}/Climis South Sudan Crop Production Data/"
"Crops_EstimatedProductionConsumptionBalance*.csv"
)
state_county_df = pd.read_csv(
f"data/south_sudan_data_fewsnet.tsv", skipinitialspace=True
)
combined_records = []
for f in climis_crop_production_csvs:
year = int(f.split("/")[-1].split("_")[2].split(".")[0])
df = pd.read_csv(f).dropna()
for i, r in df.iterrows():
record = {
"Year": year,
"Month": None,
"Source": "CLiMIS",
"Country": "South Sudan",
}
region = r["State/County"].strip()
if region.lower() in state_county_df["State"].str.lower().values:
record["State"] = region
record["County"] = None
else:
potential_states = state_county_df.loc[
state_county_df["County"] == region
]["State"]
record["State"] = (
potential_states.iloc[0]
if len(potential_states) != 0
else None
)
record["County"] = region
for field in r.index:
if field != "State/County":
if "Net Cereal production" in field:
record["Variable"] = "Net Cereal Production"
record["Value"] = r[field]
if field.split()[-1].startswith("("):
record["Unit"] = field.split()[-1][1:-1].lower()
else:
record["Unit"] = None
combined_records.append(record)
df = pd.DataFrame(combined_records)
return df
def process_climis_livestock_data(data_dir: str):
""" Process CLiMIS livestock data. """
records = []
livestock_data_dir = f"{data_dir}/Climis South Sudan Livestock Data"
for filename in glob(
f"{livestock_data_dir}/Livestock Body Condition/*2017.csv"
):
records += process_file_with_single_table(
filename,
lambda ind: f"Percentage of {filename.split('_')[-3].lower()} with body condition {ind.lower()}",
lambda f: f.split("_")[-2],
)
for filename in glob(
f"{livestock_data_dir}/Livestock Production/*2017.csv"
):
records += process_file_with_single_table(
filename,
lambda ind: "Percentage of householding at least milking one of their livestocks",
lambda f: f.split("_")[1],
)
disease_acronym_dict = {
"FMD": "Foot and Mouth Disease (FMD)",
"LSD": "Lumpy Skin Disease (LSD)",
"CBPP": "Contagious Bovine Pleuropneumonia (CBPP)",
"CCPP": "Contagious Caprine Pleuropneumonia (CCPP)",
"NC": "NC",
"PPR": "Peste des Petits Ruminants (PPR)",
"Others": "Other diseases",
}
func = (
lambda k, i: f"Percentage of livestock with {disease_acronym_dict[k]} that are {i.lower().strip()}"
)
livestock_disease_header_dict = {
k: partial(func, k) for k in disease_acronym_dict
}
livestock_migration_header_dict = {
"Livestock migration": lambda i: f"Percentage of livestock migrating {i.split()[-1].lower()}",
"Distance covered": lambda i: "Distance covered by migrating livestock",
"Proportion of livestock that migrated": lambda i: "Percentage of livestock that migrated",
"Migration normal at this time of the year": lambda i: f"Migration normal at this time of year, {i}",
"Duration in months when the migrated animals are expected to be back after": lambda i: "Duration in months when the migrated animals are expected to be back after",
"Reasons for livestock migration": lambda i: f"Percentage of livestock migrating due to {i.lower()}",
}
def process_directory(dirname, header_dict):
return pd.concat(
[
df
for df in [
process_file_with_multiple_tables(f, header_dict)
for f in glob(f"{livestock_data_dir}/{dirname}/*2017.csv")
]
if df is not None
]
)
func2 = (
lambda k, i: f"{k.replace('animals', i.lower()).replace('stock', 'stock of '+i.lower()).replace('animal', i.lower())}"
)
livestock_ownership_headers = [
"Average current stock per household",
"Average number of animals born per household during last 4 weeks",
"Average number of animals acquired per household during last 4 weeks (dowry, purchase, gift)",
"Average number of animals given out as bride price/gift per household during last 4 weeks per household",
"Average number of animals sold per household during last 4 weeks household",
"Average price of animal sold (SSP)",
"Average number of animals exchanged for grain per household during last 4 weeks",
"Average number of animals died/slaughtered/lost per household during last 4 weeks",
]
livestock_ownership_header_dict = {
k: partial(func2, k) for k in livestock_ownership_headers
}
ownership_df = process_directory(
"Livestock Ownership", livestock_ownership_header_dict
)
disease_df = process_directory(
"Livestock Diseases", livestock_disease_header_dict
)
livestock_migration_df = process_directory(
"Livestock Migration", livestock_migration_header_dict
)
livestock_pasture_header_dict = {
"Pasture condtion": lambda i: f"Percentage of livestock pasture in {i.lower()} condition",
"Pasture condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} condition compared to a similar time in a normal year",
"Browse condition": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition",
"Browse condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition compared to a similar time in a normal year",
"Presence of constraints in accessing forage": lambda i: f"Percentage reporting the {('presence' if i=='Yes' else 'absence')} of constraints in accessing forage",
"Main forage constraints": lambda i: f"Percentage reporting {i.lower()} as the main forage constraint",
}
livestock_pasture_df = process_directory(
"Livestock Pasture", livestock_pasture_header_dict
)
livestock_water_sources_header_dict = {
"Main water sources": lambda i: f"Percentage of livestock whose main water source is {i.lower()}",
"Number of days livestock have been watered in the last 7 days": lambda i: f"Number of days {i.lower()} have been watered in the last 7 days",
}
livestock_water_sources_df = process_directory(
"Livestock Water Sources", livestock_water_sources_header_dict
)
for filename in glob(f"{livestock_data_dir}/Livestock Loss/*2017.csv"):
records += process_file_with_single_table(
filename,
lambda ind: f"Percentage of {filename.split('_')[-3].lower()} loss accounted for by {ind.lower()}",
lambda f: f.split("_")[-2],
)
for record in records:
if isinstance(record["Value"], str):
record["Value"] = record["Value"].replace("%", "")
livestock_prices_df = pd.concat(
[
make_livestock_prices_table(f)
for f in glob(
f"{livestock_data_dir}/Livestock Market Prices/*2017.csv"
)
]
)
climis_livestock_data_df = pd.concat(
[
pd.DataFrame(records),
disease_df,
ownership_df,
livestock_prices_df,
livestock_migration_df,
livestock_pasture_df,
livestock_water_sources_df,
],
sort=True
)
return climis_livestock_data_df
def process_climis_import_data(data_dir: str) -> pd.DataFrame:
dfs = []
for f in glob(f"{data_dir}/CLiMIS Import Data/*.csv"):
df = pd.read_csv(f, names=range(1, 13), header=0, thousands=",")
df = df.stack().reset_index(name="Value")
df.columns = ["Year", "Month", "Value"]
df["Month"] = df["Month"].astype(int)
df["Year"] = df["Year"].astype(int)
dfs.append(df)
df = (
pd.concat(dfs)
.pivot_table(values="Value", index=["Year", "Month"], aggfunc=np.sum)
.reset_index()
)
df.columns = ["Year", "Month", "Value"]
df["Variable"] = "Total amount of cereal grains imported"
df["Unit"] = "metric tonne"
df["Country"] = "South Sudan"
df["County"] = None
df["State"] = None
return df
def process_climis_rainfall_data(data_dir: str) -> pd.DataFrame:
dfs = []
# Read CSV files first
for f in glob(f"{data_dir}/CLiMIS South Sudan Rainfall Data in"
" Millimeters/*.csv"):
# Get the name of the table without path and extension
table_name = os.path.basename(f)[:-4]
# Get state and year from groups
pattern = r'^(.*) ([0-9]+) Rainfall'
state, year = re.match(pattern, table_name).groups()
df = pd.read_csv(f, header=0, thousands=",")
cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',
'State', 'County', 'Country']
df_new = pd.DataFrame(columns=cols)
df_new['Month'] = range(1, 13)
df_new['Year'] = int(year)
df_new['Value'] = df['monthly rainfall data ']
df_new['Variable'] = 'Rainfall'
df_new['Unit'] = 'millimeters'
df_new['County'] = None
df_new['State'] = state
df_new['Source'] = 'CLiMIS'
df_new['Country'] = 'South Sudan'
dfs.append(df_new)
df1 = pd.concat(dfs)
# Read XLSX file next
fname = f'{data_dir}/CLiMIS South Sudan Rainfall Data in Millimeters/' + \
'Rainfall-Early_Warning_6month_Summary-2017-data_table.xlsx'
df = pd.read_excel(fname, sheet_name='Rainfall Data', header=1)
cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',
'State', 'County', 'Country']
df_new = pd.DataFrame(columns=cols)
states = []
counties = []
years = []
months = []
values = []
for row in df.itertuples():
state, county, year = row[1:4]
for month in range(1,13):
value = row[3 + month]
if pd.isnull(value):
continue
states.append(state)
counties.append(county)
years.append(year)
months.append(month)
values.append(value)
df_new['Year'] = years
df_new['Month'] = months
df_new['Value'] = values
df_new['County'] = counties
df_new['State'] = states
df_new['Variable'] = 'Rainfall'
df_new['Unit'] = 'millimeters'
df_new['Source'] = 'CLiMIS'
df_new['Country'] = 'South Sudan'
df = pd.concat([df1, df_new])
return df
def process_UNHCR_data(data_dir: str):
df = pd.read_table(f"{data_dir}/UNHCR Refugee Data/RefugeeData.tsv",
index_col=0,
parse_dates=True, infer_datetime_format=True)
df["Year"] = df.index.year
df["Month"] = df.index.month
df.rename(columns = {"individuals":"Value"}, inplace=True)
df["Country"] = "South Sudan"
df["State"] = None
df["County"] = None
df["Source"] = "UNHCR"
df["Unit"] = None
df["Variable"] = "Number of refugees"
del df["unix_timestamp"]
return df
def create_combined_table(data_dir: str, columns: List[str]) -> pd.DataFrame:
climis_crop_production_df = process_climis_crop_production_data(data_dir)
climis_livestock_data_df = process_climis_livestock_data(data_dir)
climis_import_data_df = process_climis_import_data(data_dir)
climis_rainfall_data_df = process_climis_rainfall_data(data_dir)
UNHCR_data_df = process_UNHCR_data(data_dir)
# Severe acute malnutrition and inflation rate indicators from PDFs
pdf_indicators_df = pd.read_table(f"{data_dir}/indicator_data_from_pdfs.tsv")
df = pd.concat(
[
climis_crop_production_df,
climis_livestock_data_df,
climis_import_data_df,
climis_rainfall_data_df,
pdf_indicators_df,
UNHCR_data_df,
],
sort=True,
)
return df[columns]
if __name__ == "__main__":
columns = [
"Variable",
"Year",
"Month",
"Value",
"Unit",
"Source",
"State",
"County",
"Country",
]
data_dir = str(data_dir / "raw" / "wm_12_month_evaluation")
df = create_combined_table(data_dir, columns)
df["Year"] = df["Year"].astype(int)
df.to_csv(sys.argv[1], index=False, sep="\t")
|
# coding=utf-8
# Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch BigBird model. """
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, SequenceSummary, apply_chunking_to_forward
from ...utils import logging
from .configuration_big_bird import BigBirdConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base"
_CONFIG_FOR_DOC = "BigBirdConfig"
_TOKENIZER_FOR_DOC = "BigBirdTokenizer"
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/bigbird-roberta-base",
"google/bigbird-roberta-large",
"google/bigbird-base-trivia-itc",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
]
_TRIVIA_QA_MAPPING = {
"big_bird_attention": "attention/self",
"output_layer_norm": "output/LayerNorm",
"attention_output": "attention/output/dense",
"output": "output/dense",
"self_attention_layer_norm": "attention/output/LayerNorm",
"intermediate": "intermediate/dense",
"word_embeddings": "bert/embeddings/word_embeddings",
"position_embedding": "bert/embeddings/position_embeddings",
"type_embeddings": "bert/embeddings/token_type_embeddings",
"embeddings": "bert/embeddings",
"layer_normalization": "output/LayerNorm",
"layer_norm": "LayerNorm",
"trivia_qa_head": "qa_classifier",
"dense": "intermediate/dense",
"dense_1": "qa_outputs",
}
def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False):
"""Load tf checkpoints in a pytorch model."""
def load_tf_weights_bert(init_vars, tf_path):
names = []
tf_weights = {}
for name, shape in init_vars:
array = tf.train.load_variable(tf_path, name)
name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm")
logger.info(f"Loading TF weight {name} with shape {shape}")
names.append(name)
tf_weights[name] = array
return names, tf_weights
def load_tf_weights_trivia_qa(init_vars):
names = []
tf_weights = {}
for i, var in enumerate(init_vars):
name_items = var.name.split("/")
if "transformer_scaffold" in name_items[0]:
layer_name_items = name_items[0].split("_")
if len(layer_name_items) < 3:
layer_name_items += [0]
name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}"
name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[
:-2
] # remove last :0 in variable
if "self/attention/output" in name:
name = name.replace("self/attention/output", "output")
if i >= len(init_vars) - 2:
name = name.replace("intermediate", "output")
logger.info(f"Loading TF weight {name} with shape {var.shape}")
array = var.value().numpy()
names.append(name)
tf_weights[name] = array
return names, tf_weights
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path)
assert len(init_vars) > 0, "Loaded trained variables cannot be empty."
pt_names = list(model.state_dict().keys())
if is_trivia_qa:
names, tf_weights = load_tf_weights_trivia_qa(init_vars)
else:
names, tf_weights = load_tf_weights_bert(init_vars, tf_path)
for txt_name in names:
array = tf_weights[txt_name]
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {"/".join(name)}")
continue
pointer = model
pt_name = []
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
pt_name.append("bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
pt_name.append("classifier")
elif scope_names[0] == "transform":
pointer = getattr(pointer, "transform")
pt_name.append("transform")
if ("bias" in name) or ("kernel" in name):
pointer = getattr(pointer, "dense")
pt_name.append("dense")
elif ("beta" in name) or ("gamma" in name):
pointer = getattr(pointer, "LayerNorm")
pt_name.append("LayerNorm")
else:
try:
pointer = getattr(pointer, scope_names[0])
pt_name.append(f"{scope_names[0]}")
except AttributeError:
logger.info(f"Skipping {m_name}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
pt_name.append(f"{num}")
if m_name[-11:] == "_embeddings" or m_name == "embeddings":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape):
# print(txt_name, array.shape)
if (
txt_name.endswith("attention/self/key/kernel")
or txt_name.endswith("attention/self/query/kernel")
or txt_name.endswith("attention/self/value/kernel")
):
array = array.transpose(1, 0, 2).reshape(pointer.shape)
elif txt_name.endswith("attention/output/dense/kernel"):
array = array.transpose(0, 2, 1).reshape(pointer.shape)
else:
array = array.reshape(pointer.shape)
if pointer.shape != array.shape:
raise ValueError(
f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}."
)
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
pt_weight_name = ".".join(pt_name)
logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.")
pointer.data = torch.from_numpy(array)
tf_weights.pop(txt_name, None)
pt_names.remove(pt_weight_name)
logger.info(f"Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}.")
logger.info(f"Weights not initialized in PyTorch model: {", ".join(pt_names)}.")
return model
class BigBirdEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
# End copy
self.rescale_embeddings = config.rescale_embeddings
self.hidden_size = config.hidden_size
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.rescale_embeddings:
inputs_embeds = inputs_embeds * (self.hidden_size ** 0.5)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.dropout(embeddings)
embeddings = self.LayerNorm(embeddings)
return embeddings
class BigBirdSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BigBirdBlockSparseAttention(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.max_seqlen = config.max_position_embeddings
self.seed = seed
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.num_random_blocks = config.num_random_blocks
self.block_size = config.block_size
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
output_attentions=None,
):
# Currently this `class` can't be used in decoder.
batch_size, seqlen, _ = hidden_states.size()
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = self.block_size
assert from_seq_length % from_block_size == 0, "Query sided sequence length must be multiple of block size"
assert to_seq_length % to_block_size == 0, "Key/Value sided sequence length must be multiple of block size"
query_layer = self.transpose_for_scores(self.query(hidden_states))
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
context_layer, attention_probs = self.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
self.num_attention_heads,
self.num_random_blocks,
self.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=self.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=output_attentions,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
@staticmethod
def torch_bmm_nd(inp_1, inp_2, ndim=None):
""" Fast nd matrix multiplication """
# faster replacement of torch.einsum ("bhqk,bhkd->bhqd")
return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(
inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])
)
@staticmethod
def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):
""" Fast nd matrix multiplication with transpose """
# faster replacement of torch.einsum (bhqd,bhkd->bhqk)
return torch.bmm(
inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)
).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))
def bigbird_block_sparse_attention(
self,
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
n_heads,
n_rand_blocks,
attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_len,
to_seq_len,
seed,
plan_from_length,
plan_num_rand_blocks,
output_attentions,
):
# BigBird block-sparse attention as suggested in paper
# ITC:
# global tokens: 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# ETC:
# global tokens: extra_globals_tokens + 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# Note:
# 1) Currently, ETC is not supported.
# 2) Window size is fixed to 3 blocks & it can be changed only by
# changing `block_size`.
# 3) Number of global blocks are fixed (2 blocks here) & global tokens can be
# controlled only by `block_size`.
# attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention)
# hence following code can be divided into 5 parts.
if from_seq_len // from_block_size != to_seq_len // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
rsqrt_d = 1 / math.sqrt(attention_head_size)
bsz = batch_size
# generate random attention and corresponding masks
np.random.seed(seed)
if from_seq_len in [1024, 3072, 4096]: # old plans used in paper
rand_attn = [
self._bigbird_block_rand_mask(
self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024
)[: (from_seq_len // from_block_size - 2)]
for _ in range(n_heads)
]
else:
if plan_from_length is None:
plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(
from_seq_len, from_block_size, n_rand_blocks
)
rand_attn = self._bigbird_block_rand_mask_with_head(
from_seq_length=from_seq_len,
to_seq_length=to_seq_len,
from_block_size=from_block_size,
to_block_size=to_block_size,
num_heads=n_heads,
plan_from_length=plan_from_length,
plan_num_rand_blocks=plan_num_rand_blocks,
)
rand_attn = np.stack(rand_attn, axis=0)
rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)
rand_attn.unsqueeze_(0)
rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)
rand_mask = self._create_rand_mask_from_inputs(
from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size
)
blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)
blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
# preparing block for randn attn
gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)
gathered_key = gathered_key.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)
gathered_value = gathered_value.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
# 1st PART
# 1st block (global block) attention scores
# q[0] x (k[0], k[1], k[2], k[3], k[4] .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)
first_product = first_product * rsqrt_d
first_product += (1.0 - to_mask) * -10000.0
first_attn_weights = F.softmax(first_product, dim=-1) # [bsz, n_heads, from_block_size, to_seq_len]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)
first_context_layer.unsqueeze_(2)
# 2nd PART
# 2nd block attention scores
# q[1] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> 2nd, 3rd blocks
# global key blocks -> 1st block
second_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
second_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)
second_seq_pad = torch.cat(
[
to_mask[:, :, :, : 3 * to_block_size],
to_mask[:, :, :, -to_block_size:],
first_context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_rand_pad = torch.cat(
[
first_context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, 0],
],
dim=3,
)
second_product = second_product * rsqrt_d
second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * -10000.0
second_attn_weights = F.softmax(
second_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)
second_context_layer.unsqueeze_(2)
# 3rd PART
# Middle blocks attention scores
# q[-2:2] x (sliding_keys, random_keys, global_keys)
# sliding attn is calculated using special trick of shifting tokens as discussed in paper
# random keys are generated by taking random indices as per `rand_attn`
# global keys -> 1st & last block
exp_blocked_key_matrix = torch.cat(
[blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
exp_blocked_value_matrix = torch.cat(
[blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],
dim=3,
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
# sliding attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size]
inner_band_product = inner_band_product * rsqrt_d
# randn attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size]
rand_band_product = rand_band_product * rsqrt_d
# Including 1st block (since it's global)
first_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
first_band_product = first_band_product * rsqrt_d
# Including last block (since it's global)
last_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
last_band_product = last_band_product * rsqrt_d
# masking padded tokens
inner_band_product += (1.0 - band_mask) * -10000.0
first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * -10000.0
last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * -10000.0
rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * -10000.0
# completing attention scores matrix for all q[-2:2]
band_product = torch.cat(
[first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# safely doing softmax since attention matrix is completed
attn_weights = F.softmax(
band_product, dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# contibution of sliding keys
# [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
context_layer = self.torch_bmm_nd(
attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of random keys
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
context_layer += self.torch_bmm_nd(
attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of global keys
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# 4th PART
# last 2nd token attention scores
# q[-2] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> last 3 blocks
# global key block -> 1st block
# random key block -> based on indices stored in `randn_attn`
second_last_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1]
second_last_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+r)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)
second_last_seq_pad = torch.cat(
[
to_mask[:, :, :, :to_block_size],
to_mask[:, :, :, -3 * to_block_size :],
context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_last_rand_pad = torch.cat(
[
context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, -1],
],
dim=3,
)
second_last_product = second_last_product * rsqrt_d
second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0
second_last_attn_weights = F.softmax(
second_last_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)
second_last_context_layer.unsqueeze_(2)
# 5th PART
# last block (global) attention scores
# q[-1] x (k[0], k[1], k[2], k[3], .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)
last_product = last_product * rsqrt_d
last_product += (1.0 - to_mask) * -10000.0
last_attn_weights = F.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)
last_context_layer.unsqueeze_(2)
# combining representations of all tokens
context_layer = torch.cat(
[first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],
dim=2,
)
context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask
context_layer = torch.transpose(context_layer, 1, 2)
# this is just for visualizing; forward pass doesn't depend on following code
if output_attentions:
# TODO(PVP): need to verify if below code is correct
attention_probs = torch.zeros(
bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device
)
# 1st query block
# corresponding to `first_context_layer`
attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global
# 2nd query block
# corresponding to `second_context_layer`
attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[
:, :, :, : 3 * to_block_size
] # 1st three key blocks (global + sliding)
attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[
:, :, :, 3 * to_block_size : 4 * to_block_size
] # last key block (global)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Middle query blocks
# corresponding to `context_layer`
# sliding keys
for q_idx in range(from_seq_len // from_block_size - 4):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)[:, :, 2:-2, :, 1:-1, :]
right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]
attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(
bsz, n_heads, from_block_size, 3, to_block_size
) # inner_band_product
# global keys (correspomding to 1st key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[
:, :, :, :, :to_block_size
].view(
bsz, n_heads, -1, to_block_size
) # first_band_product
# global keys (corresponding to last key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[
:, :, :, :, -to_block_size:
].view(
bsz, n_heads, -1, to_block_size
) # last_band_product
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
for q_idx in range(1, len(i2) - 1):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]
attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Second-last query block
# corresponding to `second_last_context_layer`
attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[
:, :, :, :to_block_size
] # 1st key block (global)
attention_probs[
:, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :
] = second_last_attn_weights[
:, :, :, to_block_size : 4 * to_block_size
] # last three blocks (global + sliding)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# last query block
# corresponding to `last_context_layer`
attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global
else:
attention_probs = None
return context_layer, attention_probs
@staticmethod
def torch_gather_b2(params, indices):
# this operation is equilvalent to tf.gather when batch_dims=2
if params.shape[:2] != indices.shape[:2]:
raise ValueError(
f"Make sure that the first two dimensions of params and indices are identical, \
but they are params: {params.shape[:2]} vs. indices: {params.shape[:2]}"
)
num_indices_to_gather = indices.shape[-2] * indices.shape[-1]
num_indices_to_pick_from = params.shape[2]
indices_shift = (
torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)
// num_indices_to_gather
* num_indices_to_pick_from
)
flattened_indices = indices.view(-1) + indices_shift
flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])
out_flattened = flattened_params.index_select(0, flattened_indices)
out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])
return out
@staticmethod
def _create_rand_mask_from_inputs(
from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
num_rand_blocks,
batch_size,
from_seq_length,
from_block_size,
):
"""
Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
batch_size: int. Batch size for computation.
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
Returns:
float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,
from_block_size, num_rand_blocks*to_block_size].
"""
num_windows = from_seq_length // from_block_size - 2
rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])
rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)
rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask)
return rand_mask
@staticmethod
def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):
"""
Gives the plan of where to put random attention.
Args:
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
num_rand_blocks: int. Number of random chunks per row.
Returns:
plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for
each block
"""
plan_from_length = []
plan_num_rand_blocks = []
if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(0)
elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks // 2)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))
else:
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks)
return plan_from_length, plan_num_rand_blocks
@staticmethod
def _bigbird_block_rand_mask(
from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_rand_blocks: int. Number of random chunks per row.
last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,
if positive then num_rand_blocks blocks choosen only upto last_idx.
Returns:
adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks
"""
# using this method when from_seq_length in [1024, 3072, 4096]
assert (
from_seq_length // from_block_size == to_seq_length // to_block_size
), "Error the number of blocks needs to be same!"
rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)
middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)
last = to_seq_length // to_block_size - 1
if last_idx > (2 * to_block_size):
last = (last_idx // to_block_size) - 1
r = num_rand_blocks # shorthand
for i in range(1, from_seq_length // from_block_size - 1):
start = i - 2
end = i
if i == 1:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]
elif i == 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]
elif i == from_seq_length // from_block_size - 3:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -3: should have been sliced till last-3
elif i == from_seq_length // from_block_size - 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -4: should have been sliced till last-4
else:
if start > last:
start = last
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
elif (end + 1) == last:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
else:
rand_attn[i - 1, :] = np.random.permutation(
np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))
)[:r]
return rand_attn
def _bigbird_block_rand_mask_with_head(
self,
from_seq_length,
to_seq_length,
from_block_size,
to_block_size,
num_heads,
plan_from_length,
plan_num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_top=1,
global_block_bottom=1,
global_block_left=1,
global_block_right=1,
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_heads: int. total number of heads.
plan_from_length: list. plan from length where num_random_blocks are choosen from.
plan_num_rand_blocks: list. number of rand blocks within the plan.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_top: int. number of blocks at the top.
global_block_bottom: int. number of blocks at the bottom.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by
num_rand_blocks
"""
# using this method when from_seq_length not in [1024, 3072, 4096]
assert (
from_seq_length // from_block_size == to_seq_length // to_block_size
), "Error the number of blocks needs to be same!"
assert from_seq_length in plan_from_length, "Error from sequence length not in plan!"
# Total number of blocks in the mmask
num_blocks = from_seq_length // from_block_size
# Number of blocks per plan
plan_block_length = np.array(plan_from_length) // from_block_size
# till when to follow plan
max_plan_idx = plan_from_length.index(from_seq_length)
# Random Attention adjajency list
rand_attn = [
np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)
for i in range(num_heads)
]
# We will go iteratively over the plan blocks and pick random number of
# Attention blocks from the legally allowed blocks
for plan_idx in range(max_plan_idx + 1):
rnd_r_cnt = 0
if plan_idx > 0:
# set the row for all from_blocks starting from 0 to
# plan_block_length[plan_idx-1]
# column indx start fromm plan_block_length[plan_idx-1] and ends at
# plan_block_length[plan_idx]
if plan_num_rand_blocks[plan_idx] > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=plan_block_length[plan_idx - 1],
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for pl_id in range(plan_idx):
if plan_num_rand_blocks[pl_id] == 0:
continue
for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):
rnd_r_cnt = 0
to_start_block_id = 0
if pl_id > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))
to_start_block_id = plan_block_length[pl_id - 1]
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[pl_id],
num_rand_blocks=plan_num_rand_blocks[pl_id],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
if plan_num_rand_blocks[plan_idx] == 0:
continue
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
from_start_block_id = global_block_top
to_start_block_id = 0
if plan_idx > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
from_start_block_id = plan_block_length[plan_idx - 1]
to_start_block_id = plan_block_length[plan_idx - 1]
for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for nh in range(num_heads):
rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
return rand_attn
@staticmethod
def _get_single_block_row_attention(
block_id,
to_start_block_id,
to_end_block_id,
num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_left=1,
global_block_right=1,
):
"""
For a single row block get random row attention.
Args:
block_id: int. block id of row.
to_start_block_id: int. random attention coloum start id.
to_end_block_id: int. random attention coloum end id.
num_rand_blocks: int. number of random blocks to be selected.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
row containing the random attention vector of size num_rand_blocks.
"""
# list of to_blocks from which to choose random attention
to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)
# permute the blocks
perm_block = np.random.permutation(to_block_list)
# illegal blocks for the current block id, using window
illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))
# Add blocks at the start and at the end
illegal_blocks.extend(list(range(global_block_left)))
illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))
# The second from_block cannot choose random attention on second last to_block
if block_id == 1:
illegal_blocks.append(to_end_block_id - 2)
# The second last from_block cannot choose random attention on second to_block
if block_id == to_end_block_id - 2:
illegal_blocks.append(1)
selected_random_blokcs = []
for i in range(to_end_block_id - to_start_block_id):
if perm_block[i] not in illegal_blocks:
selected_random_blokcs.append(perm_block[i])
if len(selected_random_blokcs) == num_rand_blocks:
break
return np.array(selected_random_blokcs, dtype=np.int32)
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird
class BigBirdSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BigBirdAttention(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.attention_type = config.attention_type
self.config = config
self.seed = seed
if self.config.attention_type == "original_full":
self.self = BigBirdSelfAttention(config)
elif self.config.attention_type == "block_sparse":
self.self = BigBirdBlockSparseAttention(config, seed)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}"
)
self.output = BigBirdSelfOutput(config)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
if value == "original_full":
# copy all weights to new full attention class
attn_weights = BigBirdSelfAttention(self.config)
else:
# copy all weights to new sparse attention class
attn_weights = BigBirdBlockSparseAttention(self.config, self.seed)
attn_weights.query = self.self.query
attn_weights.value = self.self.value
attn_weights.key = self.self.key
self.self = attn_weights
self.attention_type = value
if not self.training:
self.self.eval()
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
# block_sparse config
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
):
if self.attention_type == "original_full":
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
else:
assert (
encoder_hidden_states is None
), "BigBird cannot be used as a decoder when config.attention_type != 'original_full'"
self_outputs = self.self(
hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird
class BigBirdIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird
class BigBirdOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BigBirdLayer(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BigBirdAttention(config, seed=seed)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BigBirdAttention(config)
self.intermediate = BigBirdIntermediate(config)
self.output = BigBirdOutput(config)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
self.attention.set_attention_type(value)
if self.add_cross_attention:
self.crossattention.set_attention_type(value)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_value=self_attn_past_key_value,
output_attentions=output_attentions,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=blocked_encoder_mask,
to_blocked_mask=blocked_encoder_mask,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with \
cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BigBirdEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.layer = nn.ModuleList(
[BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
for layer in self.layer:
layer.set_attention_type(value)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
band_mask,
from_mask,
to_mask,
blocked_encoder_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
band_mask,
from_mask,
to_mask,
blocked_encoder_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird
class BigBirdPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird
class BigBirdLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BigBirdPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird
class BigBirdOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BigBirdLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird
class BigBirdOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird
class BigBirdPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BigBirdLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BigBirdPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BigBirdConfig
load_tf_weights = load_tf_weights_in_big_bird
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
BIG_BIRD_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config (:class:`~transformers.BigBirdConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BIG_BIRD_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BigBirdTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@dataclass
class BigBirdForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BigBirdtForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@add_start_docstrings(
"The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdModel(BigBirdPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.attention_type = self.config.attention_type
self.config = config
self.block_size = self.config.block_size
self.embeddings = BigBirdEmbeddings(config)
self.encoder = BigBirdEncoder(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
else:
self.pooler = None
self.activation = None
if self.attention_type != "original_full" and config.add_cross_attention:
logger.warning(
"When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting `attention_type=original_full`"
)
self.set_attention_type("original_full")
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
self.encoder.set_attention_type(value)
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# in order to use block_sparse attention, sequence_length has to be at least
# bigger than all global attentions: 2 * block_size
# + sliding tokens: 3 * block_size
# + random tokens: 2 * num_random_blocks * block_size
max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size
if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend:
# change attention_type from block_sparse to original_full
sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
logger.warning(
"Attention type 'block_sparse' is not possible if sequence_length: "
f"{sequence_length} <= num global tokens: 2 * config.block_size "
"+ min. num sliding tokens: 3 * config.block_size "
"+ config.num_random_blocks * config.block_size "
"+ additional buffer: config.num_random_blocks * config.block_size "
f"= {max_tokens_to_attend} with config.block_size "
f"= {self.config.block_size}, config.num_random_blocks "
f"= {self.config.num_random_blocks}."
"Changing attention type to 'original_full'..."
)
self.set_attention_type("original_full")
if self.attention_type == "block_sparse":
(
padding_len,
input_ids,
attention_mask,
token_type_ids,
position_ids,
inputs_embeds,
) = self._pad_to_block_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
else:
padding_len = 0
if self.attention_type == "block_sparse":
blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(
attention_mask, self.block_size
)
extended_attention_mask = None
elif self.attention_type == "original_full":
blocked_encoder_mask = None
band_mask = None
from_mask = None
to_mask = None
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.attention_type}"
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
blocked_encoder_mask=blocked_encoder_mask,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None
# undo padding
if padding_len > 0:
# unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)
sequence_output = sequence_output[:, :-padding_len]
if not return_dict:
return (sequence_output, pooler_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooler_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@staticmethod
def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int):
batch_size, seq_length = attention_mask.size()
assert (
seq_length % block_size == 0
), f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}."
def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):
"""
Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
Returns:
float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,
3*to_block_size].
"""
exp_blocked_to_pad = torch.cat(
[to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2
)
band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad)
band_mask.unsqueeze_(1)
return band_mask
blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size)
band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask)
from_mask = attention_mask.view(batch_size, 1, seq_length, 1)
to_mask = attention_mask.view(batch_size, 1, 1, seq_length)
return blocked_encoder_mask, band_mask, from_mask, to_mask
def _pad_to_block_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention."""
# padding
block_size = self.config.block_size
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (block_size - seq_len % block_size) % block_size
if padding_len > 0:
logger.info(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.block_size`: {block_size}"
)
if input_ids is not None:
input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings
position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens
token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
class BigBirdForPreTraining(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BigBirdModel(config, add_pooling_layer=True)
self.cls = BigBirdPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be
added to masked_lm loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be
in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BigBirdTokenizer, BigBirdForPreTraining
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('bigbird-roberta-base')
>>> model = BigBirdForPreTraining.from_pretrained('bigbird-roberta-base')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if next_sentence_label is not None and total_loss is not None:
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = total_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BigBirdForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""BigBird Model with a `language modeling` head on top. """, BIG_BIRD_START_DOCSTRING)
class BigBirdForMaskedLM(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BigBirdModel(config)
self.cls = BigBirdOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""BigBird Model with a `language modeling` head on top for CLM fine-tuning. """, BIG_BIRD_START_DOCSTRING
)
class BigBirdForCausalLM(BigBirdPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`")
self.bert = BigBirdModel(config)
self.cls = BigBirdOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BigBirdTokenizer, BigBirdForCausalLM, BigBirdConfig
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
>>> config = BigBirdConfig.from_pretrained("google/bigbird-base")
>>> config.is_decoder = True
>>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
class BigBirdClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForSequenceClassification(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BigBirdModel(config)
self.classifier = BigBirdClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForMultipleChoice(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BigBirdModel(config)
self.sequence_summary = SequenceSummary(config)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForTokenClassification(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BigBirdModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BigBirdForQuestionAnsweringHead(nn.Module):
"""Head for question answering tasks."""
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.intermediate = BigBirdIntermediate(config)
self.output = BigBirdOutput(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, encoder_output):
hidden_states = self.dropout(encoder_output)
hidden_states = self.intermediate(hidden_states)
hidden_states = self.output(hidden_states, encoder_output)
hidden_states = self.qa_outputs(hidden_states)
return hidden_states
@add_start_docstrings(
"""
BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForQuestionAnswering(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.sep_token_id = config.sep_token_id
self.bert = BigBirdModel(config, add_pooling_layer=False)
self.qa_classifier = BigBirdForQuestionAnsweringHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/bigbird-base-trivia-itc",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
question_lengths=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
if question_lengths is None and input_ids is not None:
# assuming input_ids format: <cls> <question> <sep> context <sep>
question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1
question_lengths.unsqueeze_(1)
logits_mask = None
if question_lengths is not None:
# setting lengths logits to `-infi`
logits_mask = self.prepare_question_mask(question_lengths, seqlen)
if token_type_ids is None:
token_type_ids = (~logits_mask).long()
logits_mask = logits_mask
logits_mask.unsqueeze_(2)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_classifier(sequence_output)
if logits_mask is not None:
# removing question tokens from the competition
logits = logits - logits_mask * 1e6
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@staticmethod
def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int):
# q_lengths -> (bz, 1)
mask = torch.arange(0, maxlen).to(q_lengths.device)
mask.unsqueeze_(0) # -> (1, maxlen)
mask = mask < q_lengths
return mask
| # coding=utf-8
# Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch BigBird model. """
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, SequenceSummary, apply_chunking_to_forward
from ...utils import logging
from .configuration_big_bird import BigBirdConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base"
_CONFIG_FOR_DOC = "BigBirdConfig"
_TOKENIZER_FOR_DOC = "BigBirdTokenizer"
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/bigbird-roberta-base",
"google/bigbird-roberta-large",
"google/bigbird-base-trivia-itc",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
]
_TRIVIA_QA_MAPPING = {
"big_bird_attention": "attention/self",
"output_layer_norm": "output/LayerNorm",
"attention_output": "attention/output/dense",
"output": "output/dense",
"self_attention_layer_norm": "attention/output/LayerNorm",
"intermediate": "intermediate/dense",
"word_embeddings": "bert/embeddings/word_embeddings",
"position_embedding": "bert/embeddings/position_embeddings",
"type_embeddings": "bert/embeddings/token_type_embeddings",
"embeddings": "bert/embeddings",
"layer_normalization": "output/LayerNorm",
"layer_norm": "LayerNorm",
"trivia_qa_head": "qa_classifier",
"dense": "intermediate/dense",
"dense_1": "qa_outputs",
}
def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False):
"""Load tf checkpoints in a pytorch model."""
def load_tf_weights_bert(init_vars, tf_path):
names = []
tf_weights = {}
for name, shape in init_vars:
array = tf.train.load_variable(tf_path, name)
name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm")
logger.info(f"Loading TF weight {name} with shape {shape}")
names.append(name)
tf_weights[name] = array
return names, tf_weights
def load_tf_weights_trivia_qa(init_vars):
names = []
tf_weights = {}
for i, var in enumerate(init_vars):
name_items = var.name.split("/")
if "transformer_scaffold" in name_items[0]:
layer_name_items = name_items[0].split("_")
if len(layer_name_items) < 3:
layer_name_items += [0]
name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}"
name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[
:-2
] # remove last :0 in variable
if "self/attention/output" in name:
name = name.replace("self/attention/output", "output")
if i >= len(init_vars) - 2:
name = name.replace("intermediate", "output")
logger.info(f"Loading TF weight {name} with shape {var.shape}")
array = var.value().numpy()
names.append(name)
tf_weights[name] = array
return names, tf_weights
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path)
assert len(init_vars) > 0, "Loaded trained variables cannot be empty."
pt_names = list(model.state_dict().keys())
if is_trivia_qa:
names, tf_weights = load_tf_weights_trivia_qa(init_vars)
else:
names, tf_weights = load_tf_weights_bert(init_vars, tf_path)
for txt_name in names:
array = tf_weights[txt_name]
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
pt_name = []
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
pt_name.append("bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
pt_name.append("classifier")
elif scope_names[0] == "transform":
pointer = getattr(pointer, "transform")
pt_name.append("transform")
if ("bias" in name) or ("kernel" in name):
pointer = getattr(pointer, "dense")
pt_name.append("dense")
elif ("beta" in name) or ("gamma" in name):
pointer = getattr(pointer, "LayerNorm")
pt_name.append("LayerNorm")
else:
try:
pointer = getattr(pointer, scope_names[0])
pt_name.append(f"{scope_names[0]}")
except AttributeError:
logger.info(f"Skipping {m_name}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
pt_name.append(f"{num}")
if m_name[-11:] == "_embeddings" or m_name == "embeddings":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape):
# print(txt_name, array.shape)
if (
txt_name.endswith("attention/self/key/kernel")
or txt_name.endswith("attention/self/query/kernel")
or txt_name.endswith("attention/self/value/kernel")
):
array = array.transpose(1, 0, 2).reshape(pointer.shape)
elif txt_name.endswith("attention/output/dense/kernel"):
array = array.transpose(0, 2, 1).reshape(pointer.shape)
else:
array = array.reshape(pointer.shape)
if pointer.shape != array.shape:
raise ValueError(
f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}."
)
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
pt_weight_name = ".".join(pt_name)
logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.")
pointer.data = torch.from_numpy(array)
tf_weights.pop(txt_name, None)
pt_names.remove(pt_weight_name)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
logger.info(f"Weights not initialized in PyTorch model: {', '.join(pt_names)}.")
return model
class BigBirdEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
# End copy
self.rescale_embeddings = config.rescale_embeddings
self.hidden_size = config.hidden_size
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.rescale_embeddings:
inputs_embeds = inputs_embeds * (self.hidden_size ** 0.5)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.dropout(embeddings)
embeddings = self.LayerNorm(embeddings)
return embeddings
class BigBirdSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BigBirdBlockSparseAttention(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.max_seqlen = config.max_position_embeddings
self.seed = seed
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.num_random_blocks = config.num_random_blocks
self.block_size = config.block_size
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
output_attentions=None,
):
# Currently this `class` can't be used in decoder.
batch_size, seqlen, _ = hidden_states.size()
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = self.block_size
assert from_seq_length % from_block_size == 0, "Query sided sequence length must be multiple of block size"
assert to_seq_length % to_block_size == 0, "Key/Value sided sequence length must be multiple of block size"
query_layer = self.transpose_for_scores(self.query(hidden_states))
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
context_layer, attention_probs = self.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
self.num_attention_heads,
self.num_random_blocks,
self.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=self.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=output_attentions,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
@staticmethod
def torch_bmm_nd(inp_1, inp_2, ndim=None):
""" Fast nd matrix multiplication """
# faster replacement of torch.einsum ("bhqk,bhkd->bhqd")
return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(
inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])
)
@staticmethod
def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):
""" Fast nd matrix multiplication with transpose """
# faster replacement of torch.einsum (bhqd,bhkd->bhqk)
return torch.bmm(
inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)
).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))
def bigbird_block_sparse_attention(
self,
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
n_heads,
n_rand_blocks,
attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_len,
to_seq_len,
seed,
plan_from_length,
plan_num_rand_blocks,
output_attentions,
):
# BigBird block-sparse attention as suggested in paper
# ITC:
# global tokens: 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# ETC:
# global tokens: extra_globals_tokens + 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# Note:
# 1) Currently, ETC is not supported.
# 2) Window size is fixed to 3 blocks & it can be changed only by
# changing `block_size`.
# 3) Number of global blocks are fixed (2 blocks here) & global tokens can be
# controlled only by `block_size`.
# attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention)
# hence following code can be divided into 5 parts.
if from_seq_len // from_block_size != to_seq_len // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
rsqrt_d = 1 / math.sqrt(attention_head_size)
bsz = batch_size
# generate random attention and corresponding masks
np.random.seed(seed)
if from_seq_len in [1024, 3072, 4096]: # old plans used in paper
rand_attn = [
self._bigbird_block_rand_mask(
self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024
)[: (from_seq_len // from_block_size - 2)]
for _ in range(n_heads)
]
else:
if plan_from_length is None:
plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(
from_seq_len, from_block_size, n_rand_blocks
)
rand_attn = self._bigbird_block_rand_mask_with_head(
from_seq_length=from_seq_len,
to_seq_length=to_seq_len,
from_block_size=from_block_size,
to_block_size=to_block_size,
num_heads=n_heads,
plan_from_length=plan_from_length,
plan_num_rand_blocks=plan_num_rand_blocks,
)
rand_attn = np.stack(rand_attn, axis=0)
rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)
rand_attn.unsqueeze_(0)
rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)
rand_mask = self._create_rand_mask_from_inputs(
from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size
)
blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)
blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
# preparing block for randn attn
gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)
gathered_key = gathered_key.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)
gathered_value = gathered_value.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
# 1st PART
# 1st block (global block) attention scores
# q[0] x (k[0], k[1], k[2], k[3], k[4] .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)
first_product = first_product * rsqrt_d
first_product += (1.0 - to_mask) * -10000.0
first_attn_weights = F.softmax(first_product, dim=-1) # [bsz, n_heads, from_block_size, to_seq_len]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)
first_context_layer.unsqueeze_(2)
# 2nd PART
# 2nd block attention scores
# q[1] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> 2nd, 3rd blocks
# global key blocks -> 1st block
second_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
second_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)
second_seq_pad = torch.cat(
[
to_mask[:, :, :, : 3 * to_block_size],
to_mask[:, :, :, -to_block_size:],
first_context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_rand_pad = torch.cat(
[
first_context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, 0],
],
dim=3,
)
second_product = second_product * rsqrt_d
second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * -10000.0
second_attn_weights = F.softmax(
second_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)
second_context_layer.unsqueeze_(2)
# 3rd PART
# Middle blocks attention scores
# q[-2:2] x (sliding_keys, random_keys, global_keys)
# sliding attn is calculated using special trick of shifting tokens as discussed in paper
# random keys are generated by taking random indices as per `rand_attn`
# global keys -> 1st & last block
exp_blocked_key_matrix = torch.cat(
[blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
exp_blocked_value_matrix = torch.cat(
[blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],
dim=3,
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
# sliding attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size]
inner_band_product = inner_band_product * rsqrt_d
# randn attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size]
rand_band_product = rand_band_product * rsqrt_d
# Including 1st block (since it's global)
first_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
first_band_product = first_band_product * rsqrt_d
# Including last block (since it's global)
last_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
last_band_product = last_band_product * rsqrt_d
# masking padded tokens
inner_band_product += (1.0 - band_mask) * -10000.0
first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * -10000.0
last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * -10000.0
rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * -10000.0
# completing attention scores matrix for all q[-2:2]
band_product = torch.cat(
[first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# safely doing softmax since attention matrix is completed
attn_weights = F.softmax(
band_product, dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# contibution of sliding keys
# [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
context_layer = self.torch_bmm_nd(
attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of random keys
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
context_layer += self.torch_bmm_nd(
attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of global keys
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# 4th PART
# last 2nd token attention scores
# q[-2] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> last 3 blocks
# global key block -> 1st block
# random key block -> based on indices stored in `randn_attn`
second_last_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1]
second_last_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+r)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)
second_last_seq_pad = torch.cat(
[
to_mask[:, :, :, :to_block_size],
to_mask[:, :, :, -3 * to_block_size :],
context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_last_rand_pad = torch.cat(
[
context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, -1],
],
dim=3,
)
second_last_product = second_last_product * rsqrt_d
second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0
second_last_attn_weights = F.softmax(
second_last_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)
second_last_context_layer.unsqueeze_(2)
# 5th PART
# last block (global) attention scores
# q[-1] x (k[0], k[1], k[2], k[3], .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)
last_product = last_product * rsqrt_d
last_product += (1.0 - to_mask) * -10000.0
last_attn_weights = F.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)
last_context_layer.unsqueeze_(2)
# combining representations of all tokens
context_layer = torch.cat(
[first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],
dim=2,
)
context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask
context_layer = torch.transpose(context_layer, 1, 2)
# this is just for visualizing; forward pass doesn't depend on following code
if output_attentions:
# TODO(PVP): need to verify if below code is correct
attention_probs = torch.zeros(
bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device
)
# 1st query block
# corresponding to `first_context_layer`
attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global
# 2nd query block
# corresponding to `second_context_layer`
attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[
:, :, :, : 3 * to_block_size
] # 1st three key blocks (global + sliding)
attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[
:, :, :, 3 * to_block_size : 4 * to_block_size
] # last key block (global)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Middle query blocks
# corresponding to `context_layer`
# sliding keys
for q_idx in range(from_seq_len // from_block_size - 4):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)[:, :, 2:-2, :, 1:-1, :]
right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]
attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(
bsz, n_heads, from_block_size, 3, to_block_size
) # inner_band_product
# global keys (correspomding to 1st key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[
:, :, :, :, :to_block_size
].view(
bsz, n_heads, -1, to_block_size
) # first_band_product
# global keys (corresponding to last key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[
:, :, :, :, -to_block_size:
].view(
bsz, n_heads, -1, to_block_size
) # last_band_product
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
for q_idx in range(1, len(i2) - 1):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]
attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Second-last query block
# corresponding to `second_last_context_layer`
attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[
:, :, :, :to_block_size
] # 1st key block (global)
attention_probs[
:, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :
] = second_last_attn_weights[
:, :, :, to_block_size : 4 * to_block_size
] # last three blocks (global + sliding)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# last query block
# corresponding to `last_context_layer`
attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global
else:
attention_probs = None
return context_layer, attention_probs
@staticmethod
def torch_gather_b2(params, indices):
# this operation is equilvalent to tf.gather when batch_dims=2
if params.shape[:2] != indices.shape[:2]:
raise ValueError(
f"Make sure that the first two dimensions of params and indices are identical, \
but they are params: {params.shape[:2]} vs. indices: {params.shape[:2]}"
)
num_indices_to_gather = indices.shape[-2] * indices.shape[-1]
num_indices_to_pick_from = params.shape[2]
indices_shift = (
torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)
// num_indices_to_gather
* num_indices_to_pick_from
)
flattened_indices = indices.view(-1) + indices_shift
flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])
out_flattened = flattened_params.index_select(0, flattened_indices)
out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])
return out
@staticmethod
def _create_rand_mask_from_inputs(
from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
num_rand_blocks,
batch_size,
from_seq_length,
from_block_size,
):
"""
Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
batch_size: int. Batch size for computation.
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
Returns:
float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,
from_block_size, num_rand_blocks*to_block_size].
"""
num_windows = from_seq_length // from_block_size - 2
rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])
rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)
rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask)
return rand_mask
@staticmethod
def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):
"""
Gives the plan of where to put random attention.
Args:
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
num_rand_blocks: int. Number of random chunks per row.
Returns:
plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for
each block
"""
plan_from_length = []
plan_num_rand_blocks = []
if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(0)
elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks // 2)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))
else:
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks)
return plan_from_length, plan_num_rand_blocks
@staticmethod
def _bigbird_block_rand_mask(
from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_rand_blocks: int. Number of random chunks per row.
last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,
if positive then num_rand_blocks blocks choosen only upto last_idx.
Returns:
adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks
"""
# using this method when from_seq_length in [1024, 3072, 4096]
assert (
from_seq_length // from_block_size == to_seq_length // to_block_size
), "Error the number of blocks needs to be same!"
rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)
middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)
last = to_seq_length // to_block_size - 1
if last_idx > (2 * to_block_size):
last = (last_idx // to_block_size) - 1
r = num_rand_blocks # shorthand
for i in range(1, from_seq_length // from_block_size - 1):
start = i - 2
end = i
if i == 1:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]
elif i == 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]
elif i == from_seq_length // from_block_size - 3:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -3: should have been sliced till last-3
elif i == from_seq_length // from_block_size - 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -4: should have been sliced till last-4
else:
if start > last:
start = last
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
elif (end + 1) == last:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
else:
rand_attn[i - 1, :] = np.random.permutation(
np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))
)[:r]
return rand_attn
def _bigbird_block_rand_mask_with_head(
self,
from_seq_length,
to_seq_length,
from_block_size,
to_block_size,
num_heads,
plan_from_length,
plan_num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_top=1,
global_block_bottom=1,
global_block_left=1,
global_block_right=1,
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_heads: int. total number of heads.
plan_from_length: list. plan from length where num_random_blocks are choosen from.
plan_num_rand_blocks: list. number of rand blocks within the plan.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_top: int. number of blocks at the top.
global_block_bottom: int. number of blocks at the bottom.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by
num_rand_blocks
"""
# using this method when from_seq_length not in [1024, 3072, 4096]
assert (
from_seq_length // from_block_size == to_seq_length // to_block_size
), "Error the number of blocks needs to be same!"
assert from_seq_length in plan_from_length, "Error from sequence length not in plan!"
# Total number of blocks in the mmask
num_blocks = from_seq_length // from_block_size
# Number of blocks per plan
plan_block_length = np.array(plan_from_length) // from_block_size
# till when to follow plan
max_plan_idx = plan_from_length.index(from_seq_length)
# Random Attention adjajency list
rand_attn = [
np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)
for i in range(num_heads)
]
# We will go iteratively over the plan blocks and pick random number of
# Attention blocks from the legally allowed blocks
for plan_idx in range(max_plan_idx + 1):
rnd_r_cnt = 0
if plan_idx > 0:
# set the row for all from_blocks starting from 0 to
# plan_block_length[plan_idx-1]
# column indx start fromm plan_block_length[plan_idx-1] and ends at
# plan_block_length[plan_idx]
if plan_num_rand_blocks[plan_idx] > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=plan_block_length[plan_idx - 1],
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for pl_id in range(plan_idx):
if plan_num_rand_blocks[pl_id] == 0:
continue
for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):
rnd_r_cnt = 0
to_start_block_id = 0
if pl_id > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))
to_start_block_id = plan_block_length[pl_id - 1]
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[pl_id],
num_rand_blocks=plan_num_rand_blocks[pl_id],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
if plan_num_rand_blocks[plan_idx] == 0:
continue
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
from_start_block_id = global_block_top
to_start_block_id = 0
if plan_idx > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
from_start_block_id = plan_block_length[plan_idx - 1]
to_start_block_id = plan_block_length[plan_idx - 1]
for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for nh in range(num_heads):
rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
return rand_attn
@staticmethod
def _get_single_block_row_attention(
block_id,
to_start_block_id,
to_end_block_id,
num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_left=1,
global_block_right=1,
):
"""
For a single row block get random row attention.
Args:
block_id: int. block id of row.
to_start_block_id: int. random attention coloum start id.
to_end_block_id: int. random attention coloum end id.
num_rand_blocks: int. number of random blocks to be selected.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
row containing the random attention vector of size num_rand_blocks.
"""
# list of to_blocks from which to choose random attention
to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)
# permute the blocks
perm_block = np.random.permutation(to_block_list)
# illegal blocks for the current block id, using window
illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))
# Add blocks at the start and at the end
illegal_blocks.extend(list(range(global_block_left)))
illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))
# The second from_block cannot choose random attention on second last to_block
if block_id == 1:
illegal_blocks.append(to_end_block_id - 2)
# The second last from_block cannot choose random attention on second to_block
if block_id == to_end_block_id - 2:
illegal_blocks.append(1)
selected_random_blokcs = []
for i in range(to_end_block_id - to_start_block_id):
if perm_block[i] not in illegal_blocks:
selected_random_blokcs.append(perm_block[i])
if len(selected_random_blokcs) == num_rand_blocks:
break
return np.array(selected_random_blokcs, dtype=np.int32)
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird
class BigBirdSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BigBirdAttention(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.attention_type = config.attention_type
self.config = config
self.seed = seed
if self.config.attention_type == "original_full":
self.self = BigBirdSelfAttention(config)
elif self.config.attention_type == "block_sparse":
self.self = BigBirdBlockSparseAttention(config, seed)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}"
)
self.output = BigBirdSelfOutput(config)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
if value == "original_full":
# copy all weights to new full attention class
attn_weights = BigBirdSelfAttention(self.config)
else:
# copy all weights to new sparse attention class
attn_weights = BigBirdBlockSparseAttention(self.config, self.seed)
attn_weights.query = self.self.query
attn_weights.value = self.self.value
attn_weights.key = self.self.key
self.self = attn_weights
self.attention_type = value
if not self.training:
self.self.eval()
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
# block_sparse config
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
):
if self.attention_type == "original_full":
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
else:
assert (
encoder_hidden_states is None
), "BigBird cannot be used as a decoder when config.attention_type != 'original_full'"
self_outputs = self.self(
hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird
class BigBirdIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird
class BigBirdOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BigBirdLayer(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BigBirdAttention(config, seed=seed)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BigBirdAttention(config)
self.intermediate = BigBirdIntermediate(config)
self.output = BigBirdOutput(config)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
self.attention.set_attention_type(value)
if self.add_cross_attention:
self.crossattention.set_attention_type(value)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_value=self_attn_past_key_value,
output_attentions=output_attentions,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=blocked_encoder_mask,
to_blocked_mask=blocked_encoder_mask,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with \
cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BigBirdEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.layer = nn.ModuleList(
[BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
for layer in self.layer:
layer.set_attention_type(value)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
band_mask,
from_mask,
to_mask,
blocked_encoder_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
band_mask,
from_mask,
to_mask,
blocked_encoder_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird
class BigBirdPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird
class BigBirdLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BigBirdPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird
class BigBirdOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BigBirdLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird
class BigBirdOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird
class BigBirdPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BigBirdLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BigBirdPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BigBirdConfig
load_tf_weights = load_tf_weights_in_big_bird
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
BIG_BIRD_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config (:class:`~transformers.BigBirdConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BIG_BIRD_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BigBirdTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@dataclass
class BigBirdForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BigBirdtForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@add_start_docstrings(
"The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdModel(BigBirdPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.attention_type = self.config.attention_type
self.config = config
self.block_size = self.config.block_size
self.embeddings = BigBirdEmbeddings(config)
self.encoder = BigBirdEncoder(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
else:
self.pooler = None
self.activation = None
if self.attention_type != "original_full" and config.add_cross_attention:
logger.warning(
"When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting `attention_type=original_full`"
)
self.set_attention_type("original_full")
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
self.encoder.set_attention_type(value)
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# in order to use block_sparse attention, sequence_length has to be at least
# bigger than all global attentions: 2 * block_size
# + sliding tokens: 3 * block_size
# + random tokens: 2 * num_random_blocks * block_size
max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size
if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend:
# change attention_type from block_sparse to original_full
sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
logger.warning(
"Attention type 'block_sparse' is not possible if sequence_length: "
f"{sequence_length} <= num global tokens: 2 * config.block_size "
"+ min. num sliding tokens: 3 * config.block_size "
"+ config.num_random_blocks * config.block_size "
"+ additional buffer: config.num_random_blocks * config.block_size "
f"= {max_tokens_to_attend} with config.block_size "
f"= {self.config.block_size}, config.num_random_blocks "
f"= {self.config.num_random_blocks}."
"Changing attention type to 'original_full'..."
)
self.set_attention_type("original_full")
if self.attention_type == "block_sparse":
(
padding_len,
input_ids,
attention_mask,
token_type_ids,
position_ids,
inputs_embeds,
) = self._pad_to_block_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
else:
padding_len = 0
if self.attention_type == "block_sparse":
blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(
attention_mask, self.block_size
)
extended_attention_mask = None
elif self.attention_type == "original_full":
blocked_encoder_mask = None
band_mask = None
from_mask = None
to_mask = None
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.attention_type}"
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
blocked_encoder_mask=blocked_encoder_mask,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None
# undo padding
if padding_len > 0:
# unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)
sequence_output = sequence_output[:, :-padding_len]
if not return_dict:
return (sequence_output, pooler_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooler_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@staticmethod
def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int):
batch_size, seq_length = attention_mask.size()
assert (
seq_length % block_size == 0
), f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}."
def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):
"""
Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
Returns:
float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,
3*to_block_size].
"""
exp_blocked_to_pad = torch.cat(
[to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2
)
band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad)
band_mask.unsqueeze_(1)
return band_mask
blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size)
band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask)
from_mask = attention_mask.view(batch_size, 1, seq_length, 1)
to_mask = attention_mask.view(batch_size, 1, 1, seq_length)
return blocked_encoder_mask, band_mask, from_mask, to_mask
def _pad_to_block_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention."""
# padding
block_size = self.config.block_size
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (block_size - seq_len % block_size) % block_size
if padding_len > 0:
logger.info(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.block_size`: {block_size}"
)
if input_ids is not None:
input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings
position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens
token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
class BigBirdForPreTraining(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BigBirdModel(config, add_pooling_layer=True)
self.cls = BigBirdPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be
added to masked_lm loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be
in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BigBirdTokenizer, BigBirdForPreTraining
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('bigbird-roberta-base')
>>> model = BigBirdForPreTraining.from_pretrained('bigbird-roberta-base')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if next_sentence_label is not None and total_loss is not None:
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = total_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BigBirdForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""BigBird Model with a `language modeling` head on top. """, BIG_BIRD_START_DOCSTRING)
class BigBirdForMaskedLM(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BigBirdModel(config)
self.cls = BigBirdOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""BigBird Model with a `language modeling` head on top for CLM fine-tuning. """, BIG_BIRD_START_DOCSTRING
)
class BigBirdForCausalLM(BigBirdPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`")
self.bert = BigBirdModel(config)
self.cls = BigBirdOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BigBirdTokenizer, BigBirdForCausalLM, BigBirdConfig
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
>>> config = BigBirdConfig.from_pretrained("google/bigbird-base")
>>> config.is_decoder = True
>>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
class BigBirdClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForSequenceClassification(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BigBirdModel(config)
self.classifier = BigBirdClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForMultipleChoice(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BigBirdModel(config)
self.sequence_summary = SequenceSummary(config)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForTokenClassification(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BigBirdModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BigBirdForQuestionAnsweringHead(nn.Module):
"""Head for question answering tasks."""
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.intermediate = BigBirdIntermediate(config)
self.output = BigBirdOutput(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, encoder_output):
hidden_states = self.dropout(encoder_output)
hidden_states = self.intermediate(hidden_states)
hidden_states = self.output(hidden_states, encoder_output)
hidden_states = self.qa_outputs(hidden_states)
return hidden_states
@add_start_docstrings(
"""
BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForQuestionAnswering(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.sep_token_id = config.sep_token_id
self.bert = BigBirdModel(config, add_pooling_layer=False)
self.qa_classifier = BigBirdForQuestionAnsweringHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/bigbird-base-trivia-itc",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
question_lengths=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
if question_lengths is None and input_ids is not None:
# assuming input_ids format: <cls> <question> <sep> context <sep>
question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1
question_lengths.unsqueeze_(1)
logits_mask = None
if question_lengths is not None:
# setting lengths logits to `-infi`
logits_mask = self.prepare_question_mask(question_lengths, seqlen)
if token_type_ids is None:
token_type_ids = (~logits_mask).long()
logits_mask = logits_mask
logits_mask.unsqueeze_(2)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_classifier(sequence_output)
if logits_mask is not None:
# removing question tokens from the competition
logits = logits - logits_mask * 1e6
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@staticmethod
def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int):
# q_lengths -> (bz, 1)
mask = torch.arange(0, maxlen).to(q_lengths.device)
mask.unsqueeze_(0) # -> (1, maxlen)
mask = mask < q_lengths
return mask
|
"""Provide the Reddit class."""
import asyncio
import configparser
import os
import re
import time
from itertools import islice
from logging import getLogger
from typing import (
IO,
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
Optional,
Type,
Union,
)
from warnings import warn
from prawcore import (
Authorizer,
DeviceIDAuthorizer,
ReadOnlyAuthorizer,
Redirect,
Requestor,
ScriptAuthorizer,
TrustedAuthenticator,
UntrustedAuthenticator,
session,
)
from prawcore.exceptions import BadRequest
from . import models
from .config import Config
from .const import API_PATH, USER_AGENT_FORMAT, __version__
from .exceptions import (
ClientException,
MissingRequiredAttributeException,
RedditAPIException,
)
from .objector import Objector
from .util import _deprecate_args
from .util.token_manager import BaseTokenManager
try:
from update_checker import update_check
UPDATE_CHECKER_MISSING = False
except ImportError: # pragma: no cover
UPDATE_CHECKER_MISSING = True
if TYPE_CHECKING: # pragma: no cover
import praw
Comment = models.Comment
Redditor = models.Redditor
Submission = models.Submission
Subreddit = models.Subreddit
logger = getLogger("praw")
class Reddit:
"""The Reddit class provides convenient access to Reddit's API.
Instances of this class are the gateway to interacting with Reddit's API through
PRAW. The canonical way to obtain an instance of this class is via:
.. code-block:: python
import praw
reddit = praw.Reddit(
client_id="CLIENT_ID",
client_secret="CLIENT_SECRET",
password="PASSWORD",
user_agent="USERAGENT",
username="USERNAME",
)
"""
update_checked = False
_ratelimit_regex = re.compile(r"([0-9]{1,3}) (milliseconds?|seconds?|minutes?)")
@property
def _next_unique(self) -> int:
value = self._unique_counter
self._unique_counter += 1
return value
@property
def read_only(self) -> bool:
"""Return ``True`` when using the ``ReadOnlyAuthorizer``."""
return self._core == self._read_only_core
@read_only.setter
def read_only(self, value: bool) -> None:
"""Set or unset the use of the ReadOnlyAuthorizer.
:raises: :class:`.ClientException` when attempting to unset ``read_only`` and
only the ``ReadOnlyAuthorizer`` is available.
"""
if value:
self._core = self._read_only_core
elif self._authorized_core is None:
raise ClientException(
"read_only cannot be unset as only the ReadOnlyAuthorizer is available."
)
else:
self._core = self._authorized_core
@property
def validate_on_submit(self) -> bool:
"""Get validate_on_submit.
.. deprecated:: 7.0
If property :attr:`.validate_on_submit` is set to ``False``, the behavior is
deprecated by Reddit. This attribute will be removed around May-June 2020.
"""
value = self._validate_on_submit
if value is False:
warn(
"Reddit will check for validation on all posts around May-June 2020. It"
" is recommended to check for validation by setting"
" reddit.validate_on_submit to True.",
category=DeprecationWarning,
stacklevel=3,
)
return value
@validate_on_submit.setter
def validate_on_submit(self, val: bool):
self._validate_on_submit = val
def __enter__(self):
"""Handle the context manager open."""
return self
def __exit__(self, *_args):
"""Handle the context manager close."""
@_deprecate_args(
"site_name",
"config_interpolation",
"requestor_class",
"requestor_kwargs",
"token_manager",
)
def __init__(
self,
site_name: Optional[str] = None,
*,
config_interpolation: Optional[str] = None,
requestor_class: Optional[Type[Requestor]] = None,
requestor_kwargs: Optional[Dict[str, Any]] = None,
token_manager: Optional[BaseTokenManager] = None,
**config_settings: Optional[Union[str, bool]],
): # noqa: D207, D301
"""Initialize a :class:`.Reddit` instance.
:param site_name: The name of a section in your ``praw.ini`` file from which to
load settings from. This parameter, in tandem with an appropriately
configured ``praw.ini``, file is useful if you wish to easily save
credentials for different applications, or communicate with other servers
running Reddit. If ``site_name`` is ``None``, then the site name will be
looked for in the environment variable ``praw_site``. If it is not found
there, the ``DEFAULT`` site will be used (default: ``None``).
:param config_interpolation: Config parser interpolation type that will be
passed to :class:`.Config` (default: ``None``).
:param requestor_class: A class that will be used to create a requestor. If not
set, use ``prawcore.Requestor`` (default: ``None``).
:param requestor_kwargs: Dictionary with additional keyword arguments used to
initialize the requestor (default: ``None``).
:param token_manager: When provided, the passed instance, a subclass of
:class:`.BaseTokenManager`, will manage tokens via two callback functions.
This parameter must be provided in order to work with refresh tokens
(default: ``None``).
Additional keyword arguments will be used to initialize the :class:`.Config`
object. This can be used to specify configuration settings during instantiation
of the :class:`.Reddit` instance. For more details, please see
:ref:`configuration`.
Required settings are:
- ``client_id``
- ``client_secret`` (for installed applications set this value to ``None``)
- ``user_agent``
The ``requestor_class`` and ``requestor_kwargs`` allow for customization of the
requestor :class:`.Reddit` will use. This allows, e.g., easily adding behavior
to the requestor or wrapping its |Session|_ in a caching layer. Example usage:
.. |Session| replace:: ``Session``
.. _session: https://2.python-requests.org/en/master/api/#requests.Session
.. code-block:: python
import json
import betamax
import requests
from prawcore import Requestor
from praw import Reddit
class JSONDebugRequestor(Requestor):
def request(self, *args, **kwargs):
response = super().request(*args, **kwargs)
print(json.dumps(response.json(), indent=4))
return response
my_session = betamax.Betamax(requests.Session())
reddit = Reddit(
..., requestor_class=JSONDebugRequestor, requestor_kwargs={"session": my_session}
)
"""
self._core = self._authorized_core = self._read_only_core = None
self._objector = None
self._token_manager = token_manager
self._unique_counter = 0
self._validate_on_submit = False
try:
config_section = site_name or os.getenv("praw_site") or "DEFAULT"
self.config = Config(
config_section, config_interpolation, **config_settings
)
except configparser.NoSectionError as exc:
help_message = (
"You provided the name of a praw.ini configuration which does not"
" exist.\n\nFor help with creating a Reddit instance,"
" visit\nhttps://praw.readthedocs.io/en/latest/code_overview/reddit_instance.html\n\nFor"
" help on configuring PRAW,"
" visit\nhttps://praw.readthedocs.io/en/latest/getting_started/configuration.html"
)
if site_name is not None:
exc.message += f"\n{help_message}"
raise
required_message = (
"Required configuration setting {!r} missing. \nThis setting can be"
" provided in a praw.ini file, as a keyword argument to the `Reddit` class"
" constructor, or as an environment variable."
)
for attribute in ("client_id", "user_agent"):
if getattr(self.config, attribute) in (self.config.CONFIG_NOT_SET, None):
raise MissingRequiredAttributeException(
required_message.format(attribute)
)
if self.config.client_secret is self.config.CONFIG_NOT_SET:
raise MissingRequiredAttributeException(
f"{required_message.format("client_secret")}\nFor installed"
" applications this value must be set to None via a keyword argument"
" to the `Reddit` class constructor."
)
self._check_for_update()
self._prepare_objector()
self._prepare_prawcore(
requestor_class=requestor_class, requestor_kwargs=requestor_kwargs
)
self.auth = models.Auth(self, None)
"""An instance of :class:`.Auth`.
Provides the interface for interacting with installed and web applications.
.. seealso::
:ref:`auth_url`
"""
self.drafts = models.DraftHelper(self, None)
"""An instance of :class:`.DraftHelper`.
Provides the interface for working with :class:`.Draft` instances.
For example, to list the currently authenticated user's drafts:
.. code-block:: python
drafts = reddit.drafts()
To create a draft on r/test run:
.. code-block:: python
reddit.drafts.create(title="title", selftext="selftext", subreddit="test")
"""
self.front = models.Front(self)
"""An instance of :class:`.Front`.
Provides the interface for interacting with front page listings. For example:
.. code-block:: python
for submission in reddit.front.hot():
print(submission)
"""
self.inbox = models.Inbox(self, None)
"""An instance of :class:`.Inbox`.
Provides the interface to a user's inbox which produces :class:`.Message`,
:class:`.Comment`, and :class:`.Submission` instances. For example, to iterate
through comments which mention the authorized user run:
.. code-block:: python
for comment in reddit.inbox.mentions():
print(comment)
"""
self.live = models.LiveHelper(self, None)
"""An instance of :class:`.LiveHelper`.
Provides the interface for working with :class:`.LiveThread` instances. At
present only new live threads can be created.
.. code-block:: python
reddit.live.create(title="title", description="description")
"""
self.multireddit = models.MultiredditHelper(self, None)
"""An instance of :class:`.MultiredditHelper`.
Provides the interface to working with :class:`.Multireddit` instances. For
example, you can obtain a :class:`.Multireddit` instance via:
.. code-block:: python
reddit.multireddit(redditor="samuraisam", name="programming")
"""
self.redditors = models.Redditors(self, None)
"""An instance of :class:`.Redditors`.
Provides the interface for :class:`.Redditor` discovery. For example, to iterate
over the newest Redditors, run:
.. code-block:: python
for redditor in reddit.redditors.new(limit=None):
print(redditor)
"""
self.subreddit = models.SubredditHelper(self, None)
"""An instance of :class:`.SubredditHelper`.
Provides the interface to working with :class:`.Subreddit` instances. For
example to create a :class:`.Subreddit` run:
.. code-block:: python
reddit.subreddit.create(name="coolnewsubname")
To obtain a lazy :class:`.Subreddit` instance run:
.. code-block:: python
reddit.subreddit("test")
Multiple subreddits can be combined and filtered views of r/all can also be used
just like a subreddit:
.. code-block:: python
reddit.subreddit("redditdev+learnpython+botwatch")
reddit.subreddit("all-redditdev-learnpython")
"""
self.subreddits = models.Subreddits(self, None)
"""An instance of :class:`.Subreddits`.
Provides the interface for :class:`.Subreddit` discovery. For example, to
iterate over the set of default subreddits run:
.. code-block:: python
for subreddit in reddit.subreddits.default(limit=None):
print(subreddit)
"""
self.user = models.User(self)
"""An instance of :class:`.User`.
Provides the interface to the currently authorized :class:`.Redditor`. For
example to get the name of the current user run:
.. code-block:: python
print(reddit.user.me())
"""
def _check_for_async(self):
if self.config.check_for_async: # pragma: no cover
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return
except NameError:
pass
in_async = False
try:
asyncio.get_running_loop()
in_async = True
except Exception: # Quietly fail if any exception occurs during the check
pass
if in_async:
logger.warning(
"It appears that you are using PRAW in an asynchronous"
" environment.\nIt is strongly recommended to use Async PRAW:"
" https://asyncpraw.readthedocs.io.\nSee"
" https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments"
" for more info.\n",
)
def _check_for_update(self):
if UPDATE_CHECKER_MISSING:
return
if not Reddit.update_checked and self.config.check_for_updates:
update_check(__package__, __version__)
Reddit.update_checked = True
def _prepare_common_authorizer(self, authenticator):
if self._token_manager is not None:
warn(
"Token managers have been deprecated and will be removed in the near"
" future. See https://www.reddit.com/r/redditdev/comments/olk5e6/"
"followup_oauth2_api_changes_regarding_refresh/ for more details.",
category=DeprecationWarning,
stacklevel=2,
)
if self.config.refresh_token:
raise TypeError(
"``refresh_token`` setting cannot be provided when providing"
" ``token_manager``"
)
self._token_manager.reddit = self
authorizer = Authorizer(
authenticator,
post_refresh_callback=self._token_manager.post_refresh_callback,
pre_refresh_callback=self._token_manager.pre_refresh_callback,
)
elif self.config.refresh_token:
authorizer = Authorizer(
authenticator, refresh_token=self.config.refresh_token
)
else:
self._core = self._read_only_core
return
self._core = self._authorized_core = session(authorizer)
def _prepare_objector(self):
mappings = {
self.config.kinds["comment"]: models.Comment,
self.config.kinds["message"]: models.Message,
self.config.kinds["redditor"]: models.Redditor,
self.config.kinds["submission"]: models.Submission,
self.config.kinds["subreddit"]: models.Subreddit,
self.config.kinds["trophy"]: models.Trophy,
"Button": models.Button,
"Collection": models.Collection,
"Draft": models.Draft,
"DraftList": models.DraftList,
"Image": models.Image,
"LabeledMulti": models.Multireddit,
"Listing": models.Listing,
"LiveUpdate": models.LiveUpdate,
"LiveUpdateEvent": models.LiveThread,
"MenuLink": models.MenuLink,
"ModeratedList": models.ModeratedList,
"ModmailAction": models.ModmailAction,
"ModmailConversation": models.ModmailConversation,
"ModmailConversations-list": models.ModmailConversationsListing,
"ModmailMessage": models.ModmailMessage,
"Submenu": models.Submenu,
"TrophyList": models.TrophyList,
"UserList": models.RedditorList,
"UserSubreddit": models.UserSubreddit,
"button": models.ButtonWidget,
"calendar": models.Calendar,
"community-list": models.CommunityList,
"custom": models.CustomWidget,
"id-card": models.IDCard,
"image": models.ImageWidget,
"menu": models.Menu,
"modaction": models.ModAction,
"moderator-list": models.ModeratorListing,
"moderators": models.ModeratorsWidget,
"more": models.MoreComments,
"post-flair": models.PostFlairWidget,
"rule": models.Rule,
"stylesheet": models.Stylesheet,
"subreddit-rules": models.RulesWidget,
"textarea": models.TextArea,
"widget": models.Widget,
}
self._objector = Objector(self, mappings)
def _prepare_prawcore(self, *, requestor_class=None, requestor_kwargs=None):
requestor_class = requestor_class or Requestor
requestor_kwargs = requestor_kwargs or {}
requestor = requestor_class(
USER_AGENT_FORMAT.format(self.config.user_agent),
self.config.oauth_url,
self.config.reddit_url,
**requestor_kwargs,
)
if self.config.client_secret:
self._prepare_trusted_prawcore(requestor)
else:
self._prepare_untrusted_prawcore(requestor)
def _prepare_trusted_prawcore(self, requestor):
authenticator = TrustedAuthenticator(
requestor,
self.config.client_id,
self.config.client_secret,
self.config.redirect_uri,
)
read_only_authorizer = ReadOnlyAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
if self.config.username and self.config.password:
script_authorizer = ScriptAuthorizer(
authenticator, self.config.username, self.config.password
)
self._core = self._authorized_core = session(script_authorizer)
else:
self._prepare_common_authorizer(authenticator)
def _prepare_untrusted_prawcore(self, requestor):
authenticator = UntrustedAuthenticator(
requestor, self.config.client_id, self.config.redirect_uri
)
read_only_authorizer = DeviceIDAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
self._prepare_common_authorizer(authenticator)
@_deprecate_args("id", "url")
def comment(
self, # pylint: disable=invalid-name
id: Optional[str] = None, # pylint: disable=redefined-builtin
*,
url: Optional[str] = None,
):
"""Return a lazy instance of :class:`.Comment`.
:param id: The ID of the comment.
:param url: A permalink pointing to the comment.
.. note::
If you want to obtain the comment's replies, you will need to call
:meth:`~.Comment.refresh` on the returned :class:`.Comment`.
"""
return models.Comment(self, id=id, url=url)
def domain(self, domain: str):
"""Return an instance of :class:`.DomainListing`.
:param domain: The domain to obtain submission listings for.
"""
return models.DomainListing(self, domain)
@_deprecate_args("path", "params")
def get(
self,
path: str,
*,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
):
"""Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default: ``None``).
"""
return self._objectify_request(method="GET", params=params, path=path)
@_deprecate_args("fullnames", "url", "subreddits")
def info(
self,
*,
fullnames: Optional[Iterable[str]] = None,
subreddits: Optional[Iterable[Union["praw.models.Subreddit", str]]] = None,
url: Optional[str] = None,
) -> Generator[
Union["praw.models.Subreddit", "praw.models.Comment", "praw.models.Submission"],
None,
None,
]:
"""Fetch information about each item in ``fullnames``, ``url``, or ``subreddits``.
:param fullnames: A list of fullnames for comments, submissions, and/or
subreddits.
:param subreddits: A list of subreddit names or :class:`.Subreddit` objects to
retrieve subreddits from.
:param url: A url (as a string) to retrieve lists of link submissions from.
:returns: A generator that yields found items in their relative order.
Items that cannot be matched will not be generated. Requests will be issued in
batches for each 100 fullnames.
.. note::
For comments that are retrieved via this method, if you want to obtain its
replies, you will need to call :meth:`~.Comment.refresh` on the yielded
:class:`.Comment`.
.. note::
When using the URL option, it is important to be aware that URLs are treated
literally by Reddit's API. As such, the URLs ``"youtube.com"`` and
``"https://www.youtube.com"`` will provide a different set of submissions.
"""
none_count = (fullnames, url, subreddits).count(None)
if none_count != 2:
raise TypeError(
"Either `fullnames`, `url`, or `subreddits` must be provided."
)
is_using_fullnames = fullnames is not None
ids_or_names = fullnames if is_using_fullnames else subreddits
if ids_or_names is not None:
if isinstance(ids_or_names, str):
raise TypeError(
"`fullnames` and `subreddits` must be a non-str iterable."
)
api_parameter_name = "id" if is_using_fullnames else "sr_name"
def generator(names):
if is_using_fullnames:
iterable = iter(names)
else:
iterable = iter([str(item) for item in names])
while True:
chunk = list(islice(iterable, 100))
if not chunk:
break
params = {api_parameter_name: ",".join(chunk)}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(ids_or_names)
def generator(url):
params = {"url": url}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(url)
def _objectify_request(
self,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
method: str = "",
params: Optional[Union[str, Dict[str, str]]] = None,
path: str = "",
) -> Any:
"""Run a request through the ``Objector``.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., ``"GET"``, ``"POST"``, ``"PUT"``,
``"DELETE"``).
:param params: The query parameters to add to the request (default: ``None``).
:param path: The path to fetch.
"""
return self._objector.objectify(
self.request(
data=data,
files=files,
json=json,
method=method,
params=params,
path=path,
)
)
def _handle_rate_limit(
self, exception: RedditAPIException
) -> Optional[Union[int, float]]:
for item in exception.items:
if item.error_type == "RATELIMIT":
amount_search = self._ratelimit_regex.search(item.message)
if not amount_search:
break
seconds = int(amount_search.group(1))
if amount_search.group(2).startswith("minute"):
seconds *= 60
elif amount_search.group(2).startswith("millisecond"):
seconds = 0
if seconds <= int(self.config.ratelimit_seconds):
sleep_seconds = seconds + 1
return sleep_seconds
return None
@_deprecate_args("path", "data", "json", "params")
def delete(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
"""Return parsed objects returned from a DELETE request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param params: The query parameters to add to the request (default: ``None``).
"""
return self._objectify_request(
data=data, json=json, method="DELETE", params=params, path=path
)
@_deprecate_args("path", "data", "json")
def patch(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
) -> Any:
"""Return parsed objects returned from a PATCH request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
return self._objectify_request(data=data, json=json, method="PATCH", path=path)
@_deprecate_args("path", "data", "files", "params", "json")
def post(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
"""Return parsed objects returned from a POST request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param params: The query parameters to add to the request (default: ``None``).
"""
if json is None:
data = data or {}
attempts = 3
last_exception = None
while attempts > 0:
attempts -= 1
try:
return self._objectify_request(
data=data,
files=files,
json=json,
method="POST",
params=params,
path=path,
)
except RedditAPIException as exception:
last_exception = exception
seconds = self._handle_rate_limit(exception=exception)
if seconds is None:
break
second_string = "second" if seconds == 1 else "seconds"
logger.debug(f"Rate limit hit, sleeping for {seconds} {second_string}")
time.sleep(seconds)
raise last_exception
@_deprecate_args("path", "data", "json")
def put(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
):
"""Return parsed objects returned from a PUT request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
return self._objectify_request(data=data, json=json, method="PUT", path=path)
@_deprecate_args("nsfw")
def random_subreddit(self, *, nsfw: bool = False) -> "praw.models.Subreddit":
"""Return a random lazy instance of :class:`.Subreddit`.
:param nsfw: Return a random NSFW (not safe for work) subreddit (default:
``False``).
"""
url = API_PATH["subreddit"].format(subreddit="randnsfw" if nsfw else "random")
path = None
try:
self.get(url, params={"unique": self._next_unique})
except Redirect as redirect:
path = redirect.path
return models.Subreddit(self, path.split("/")[2])
@_deprecate_args("name", "fullname")
def redditor(
self, name: Optional[str] = None, *, fullname: Optional[str] = None
) -> "praw.models.Redditor":
"""Return a lazy instance of :class:`.Redditor`.
:param name: The name of the redditor.
:param fullname: The fullname of the redditor, starting with ``t2_``.
Either ``name`` or ``fullname`` can be provided, but not both.
"""
return models.Redditor(self, name=name, fullname=fullname)
@_deprecate_args("method", "path", "params", "data", "files", "json")
def request(
self,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
method: str,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
path: str,
) -> Any:
"""Return the parsed JSON data returned from a request to URL.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., ``"GET"``, ``"POST"``, ``"PUT"``,
``"DELETE"``).
:param params: The query parameters to add to the request (default: ``None``).
:param path: The path to fetch.
"""
if self.config.check_for_async:
self._check_for_async()
if data and json:
raise ClientException("At most one of `data` or `json` is supported.")
try:
return self._core.request(
data=data,
files=files,
json=json,
method=method,
params=params,
path=path,
)
except BadRequest as exception:
try:
data = exception.response.json()
except ValueError:
if exception.response.text:
data = {"reason": exception.response.text}
else:
raise exception
if set(data) == {"error", "message"}:
raise
explanation = data.get("explanation")
if "fields" in data:
assert len(data["fields"]) == 1
field = data["fields"][0]
else:
field = None
raise RedditAPIException(
[data["reason"], explanation, field]
) from exception
@_deprecate_args("id", "url")
def submission( # pylint: disable=invalid-name,redefined-builtin
self, id: Optional[str] = None, *, url: Optional[str] = None
) -> "praw.models.Submission":
"""Return a lazy instance of :class:`.Submission`.
:param id: A Reddit base36 submission ID, e.g., ``"2gmzqe"``.
:param url: A URL supported by :meth:`.Submission.id_from_url`.
Either ``id`` or ``url`` can be provided, but not both.
"""
return models.Submission(self, id=id, url=url)
def username_available(self, name: str) -> bool:
"""Check to see if the username is available.
For example, to check if the username ``bboe`` is available, try:
.. code-block:: python
reddit.username_available("bboe")
"""
return self._objectify_request(
method="GET", params={"user": name}, path=API_PATH["username_available"]
)
| """Provide the Reddit class."""
import asyncio
import configparser
import os
import re
import time
from itertools import islice
from logging import getLogger
from typing import (
IO,
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
Optional,
Type,
Union,
)
from warnings import warn
from prawcore import (
Authorizer,
DeviceIDAuthorizer,
ReadOnlyAuthorizer,
Redirect,
Requestor,
ScriptAuthorizer,
TrustedAuthenticator,
UntrustedAuthenticator,
session,
)
from prawcore.exceptions import BadRequest
from . import models
from .config import Config
from .const import API_PATH, USER_AGENT_FORMAT, __version__
from .exceptions import (
ClientException,
MissingRequiredAttributeException,
RedditAPIException,
)
from .objector import Objector
from .util import _deprecate_args
from .util.token_manager import BaseTokenManager
try:
from update_checker import update_check
UPDATE_CHECKER_MISSING = False
except ImportError: # pragma: no cover
UPDATE_CHECKER_MISSING = True
if TYPE_CHECKING: # pragma: no cover
import praw
Comment = models.Comment
Redditor = models.Redditor
Submission = models.Submission
Subreddit = models.Subreddit
logger = getLogger("praw")
class Reddit:
"""The Reddit class provides convenient access to Reddit's API.
Instances of this class are the gateway to interacting with Reddit's API through
PRAW. The canonical way to obtain an instance of this class is via:
.. code-block:: python
import praw
reddit = praw.Reddit(
client_id="CLIENT_ID",
client_secret="CLIENT_SECRET",
password="PASSWORD",
user_agent="USERAGENT",
username="USERNAME",
)
"""
update_checked = False
_ratelimit_regex = re.compile(r"([0-9]{1,3}) (milliseconds?|seconds?|minutes?)")
@property
def _next_unique(self) -> int:
value = self._unique_counter
self._unique_counter += 1
return value
@property
def read_only(self) -> bool:
"""Return ``True`` when using the ``ReadOnlyAuthorizer``."""
return self._core == self._read_only_core
@read_only.setter
def read_only(self, value: bool) -> None:
"""Set or unset the use of the ReadOnlyAuthorizer.
:raises: :class:`.ClientException` when attempting to unset ``read_only`` and
only the ``ReadOnlyAuthorizer`` is available.
"""
if value:
self._core = self._read_only_core
elif self._authorized_core is None:
raise ClientException(
"read_only cannot be unset as only the ReadOnlyAuthorizer is available."
)
else:
self._core = self._authorized_core
@property
def validate_on_submit(self) -> bool:
"""Get validate_on_submit.
.. deprecated:: 7.0
If property :attr:`.validate_on_submit` is set to ``False``, the behavior is
deprecated by Reddit. This attribute will be removed around May-June 2020.
"""
value = self._validate_on_submit
if value is False:
warn(
"Reddit will check for validation on all posts around May-June 2020. It"
" is recommended to check for validation by setting"
" reddit.validate_on_submit to True.",
category=DeprecationWarning,
stacklevel=3,
)
return value
@validate_on_submit.setter
def validate_on_submit(self, val: bool):
self._validate_on_submit = val
def __enter__(self):
"""Handle the context manager open."""
return self
def __exit__(self, *_args):
"""Handle the context manager close."""
@_deprecate_args(
"site_name",
"config_interpolation",
"requestor_class",
"requestor_kwargs",
"token_manager",
)
def __init__(
self,
site_name: Optional[str] = None,
*,
config_interpolation: Optional[str] = None,
requestor_class: Optional[Type[Requestor]] = None,
requestor_kwargs: Optional[Dict[str, Any]] = None,
token_manager: Optional[BaseTokenManager] = None,
**config_settings: Optional[Union[str, bool]],
): # noqa: D207, D301
"""Initialize a :class:`.Reddit` instance.
:param site_name: The name of a section in your ``praw.ini`` file from which to
load settings from. This parameter, in tandem with an appropriately
configured ``praw.ini``, file is useful if you wish to easily save
credentials for different applications, or communicate with other servers
running Reddit. If ``site_name`` is ``None``, then the site name will be
looked for in the environment variable ``praw_site``. If it is not found
there, the ``DEFAULT`` site will be used (default: ``None``).
:param config_interpolation: Config parser interpolation type that will be
passed to :class:`.Config` (default: ``None``).
:param requestor_class: A class that will be used to create a requestor. If not
set, use ``prawcore.Requestor`` (default: ``None``).
:param requestor_kwargs: Dictionary with additional keyword arguments used to
initialize the requestor (default: ``None``).
:param token_manager: When provided, the passed instance, a subclass of
:class:`.BaseTokenManager`, will manage tokens via two callback functions.
This parameter must be provided in order to work with refresh tokens
(default: ``None``).
Additional keyword arguments will be used to initialize the :class:`.Config`
object. This can be used to specify configuration settings during instantiation
of the :class:`.Reddit` instance. For more details, please see
:ref:`configuration`.
Required settings are:
- ``client_id``
- ``client_secret`` (for installed applications set this value to ``None``)
- ``user_agent``
The ``requestor_class`` and ``requestor_kwargs`` allow for customization of the
requestor :class:`.Reddit` will use. This allows, e.g., easily adding behavior
to the requestor or wrapping its |Session|_ in a caching layer. Example usage:
.. |Session| replace:: ``Session``
.. _session: https://2.python-requests.org/en/master/api/#requests.Session
.. code-block:: python
import json
import betamax
import requests
from prawcore import Requestor
from praw import Reddit
class JSONDebugRequestor(Requestor):
def request(self, *args, **kwargs):
response = super().request(*args, **kwargs)
print(json.dumps(response.json(), indent=4))
return response
my_session = betamax.Betamax(requests.Session())
reddit = Reddit(
..., requestor_class=JSONDebugRequestor, requestor_kwargs={"session": my_session}
)
"""
self._core = self._authorized_core = self._read_only_core = None
self._objector = None
self._token_manager = token_manager
self._unique_counter = 0
self._validate_on_submit = False
try:
config_section = site_name or os.getenv("praw_site") or "DEFAULT"
self.config = Config(
config_section, config_interpolation, **config_settings
)
except configparser.NoSectionError as exc:
help_message = (
"You provided the name of a praw.ini configuration which does not"
" exist.\n\nFor help with creating a Reddit instance,"
" visit\nhttps://praw.readthedocs.io/en/latest/code_overview/reddit_instance.html\n\nFor"
" help on configuring PRAW,"
" visit\nhttps://praw.readthedocs.io/en/latest/getting_started/configuration.html"
)
if site_name is not None:
exc.message += f"\n{help_message}"
raise
required_message = (
"Required configuration setting {!r} missing. \nThis setting can be"
" provided in a praw.ini file, as a keyword argument to the `Reddit` class"
" constructor, or as an environment variable."
)
for attribute in ("client_id", "user_agent"):
if getattr(self.config, attribute) in (self.config.CONFIG_NOT_SET, None):
raise MissingRequiredAttributeException(
required_message.format(attribute)
)
if self.config.client_secret is self.config.CONFIG_NOT_SET:
raise MissingRequiredAttributeException(
f"{required_message.format('client_secret')}\nFor installed"
" applications this value must be set to None via a keyword argument"
" to the `Reddit` class constructor."
)
self._check_for_update()
self._prepare_objector()
self._prepare_prawcore(
requestor_class=requestor_class, requestor_kwargs=requestor_kwargs
)
self.auth = models.Auth(self, None)
"""An instance of :class:`.Auth`.
Provides the interface for interacting with installed and web applications.
.. seealso::
:ref:`auth_url`
"""
self.drafts = models.DraftHelper(self, None)
"""An instance of :class:`.DraftHelper`.
Provides the interface for working with :class:`.Draft` instances.
For example, to list the currently authenticated user's drafts:
.. code-block:: python
drafts = reddit.drafts()
To create a draft on r/test run:
.. code-block:: python
reddit.drafts.create(title="title", selftext="selftext", subreddit="test")
"""
self.front = models.Front(self)
"""An instance of :class:`.Front`.
Provides the interface for interacting with front page listings. For example:
.. code-block:: python
for submission in reddit.front.hot():
print(submission)
"""
self.inbox = models.Inbox(self, None)
"""An instance of :class:`.Inbox`.
Provides the interface to a user's inbox which produces :class:`.Message`,
:class:`.Comment`, and :class:`.Submission` instances. For example, to iterate
through comments which mention the authorized user run:
.. code-block:: python
for comment in reddit.inbox.mentions():
print(comment)
"""
self.live = models.LiveHelper(self, None)
"""An instance of :class:`.LiveHelper`.
Provides the interface for working with :class:`.LiveThread` instances. At
present only new live threads can be created.
.. code-block:: python
reddit.live.create(title="title", description="description")
"""
self.multireddit = models.MultiredditHelper(self, None)
"""An instance of :class:`.MultiredditHelper`.
Provides the interface to working with :class:`.Multireddit` instances. For
example, you can obtain a :class:`.Multireddit` instance via:
.. code-block:: python
reddit.multireddit(redditor="samuraisam", name="programming")
"""
self.redditors = models.Redditors(self, None)
"""An instance of :class:`.Redditors`.
Provides the interface for :class:`.Redditor` discovery. For example, to iterate
over the newest Redditors, run:
.. code-block:: python
for redditor in reddit.redditors.new(limit=None):
print(redditor)
"""
self.subreddit = models.SubredditHelper(self, None)
"""An instance of :class:`.SubredditHelper`.
Provides the interface to working with :class:`.Subreddit` instances. For
example to create a :class:`.Subreddit` run:
.. code-block:: python
reddit.subreddit.create(name="coolnewsubname")
To obtain a lazy :class:`.Subreddit` instance run:
.. code-block:: python
reddit.subreddit("test")
Multiple subreddits can be combined and filtered views of r/all can also be used
just like a subreddit:
.. code-block:: python
reddit.subreddit("redditdev+learnpython+botwatch")
reddit.subreddit("all-redditdev-learnpython")
"""
self.subreddits = models.Subreddits(self, None)
"""An instance of :class:`.Subreddits`.
Provides the interface for :class:`.Subreddit` discovery. For example, to
iterate over the set of default subreddits run:
.. code-block:: python
for subreddit in reddit.subreddits.default(limit=None):
print(subreddit)
"""
self.user = models.User(self)
"""An instance of :class:`.User`.
Provides the interface to the currently authorized :class:`.Redditor`. For
example to get the name of the current user run:
.. code-block:: python
print(reddit.user.me())
"""
def _check_for_async(self):
if self.config.check_for_async: # pragma: no cover
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return
except NameError:
pass
in_async = False
try:
asyncio.get_running_loop()
in_async = True
except Exception: # Quietly fail if any exception occurs during the check
pass
if in_async:
logger.warning(
"It appears that you are using PRAW in an asynchronous"
" environment.\nIt is strongly recommended to use Async PRAW:"
" https://asyncpraw.readthedocs.io.\nSee"
" https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments"
" for more info.\n",
)
def _check_for_update(self):
if UPDATE_CHECKER_MISSING:
return
if not Reddit.update_checked and self.config.check_for_updates:
update_check(__package__, __version__)
Reddit.update_checked = True
def _prepare_common_authorizer(self, authenticator):
if self._token_manager is not None:
warn(
"Token managers have been deprecated and will be removed in the near"
" future. See https://www.reddit.com/r/redditdev/comments/olk5e6/"
"followup_oauth2_api_changes_regarding_refresh/ for more details.",
category=DeprecationWarning,
stacklevel=2,
)
if self.config.refresh_token:
raise TypeError(
"``refresh_token`` setting cannot be provided when providing"
" ``token_manager``"
)
self._token_manager.reddit = self
authorizer = Authorizer(
authenticator,
post_refresh_callback=self._token_manager.post_refresh_callback,
pre_refresh_callback=self._token_manager.pre_refresh_callback,
)
elif self.config.refresh_token:
authorizer = Authorizer(
authenticator, refresh_token=self.config.refresh_token
)
else:
self._core = self._read_only_core
return
self._core = self._authorized_core = session(authorizer)
def _prepare_objector(self):
mappings = {
self.config.kinds["comment"]: models.Comment,
self.config.kinds["message"]: models.Message,
self.config.kinds["redditor"]: models.Redditor,
self.config.kinds["submission"]: models.Submission,
self.config.kinds["subreddit"]: models.Subreddit,
self.config.kinds["trophy"]: models.Trophy,
"Button": models.Button,
"Collection": models.Collection,
"Draft": models.Draft,
"DraftList": models.DraftList,
"Image": models.Image,
"LabeledMulti": models.Multireddit,
"Listing": models.Listing,
"LiveUpdate": models.LiveUpdate,
"LiveUpdateEvent": models.LiveThread,
"MenuLink": models.MenuLink,
"ModeratedList": models.ModeratedList,
"ModmailAction": models.ModmailAction,
"ModmailConversation": models.ModmailConversation,
"ModmailConversations-list": models.ModmailConversationsListing,
"ModmailMessage": models.ModmailMessage,
"Submenu": models.Submenu,
"TrophyList": models.TrophyList,
"UserList": models.RedditorList,
"UserSubreddit": models.UserSubreddit,
"button": models.ButtonWidget,
"calendar": models.Calendar,
"community-list": models.CommunityList,
"custom": models.CustomWidget,
"id-card": models.IDCard,
"image": models.ImageWidget,
"menu": models.Menu,
"modaction": models.ModAction,
"moderator-list": models.ModeratorListing,
"moderators": models.ModeratorsWidget,
"more": models.MoreComments,
"post-flair": models.PostFlairWidget,
"rule": models.Rule,
"stylesheet": models.Stylesheet,
"subreddit-rules": models.RulesWidget,
"textarea": models.TextArea,
"widget": models.Widget,
}
self._objector = Objector(self, mappings)
def _prepare_prawcore(self, *, requestor_class=None, requestor_kwargs=None):
requestor_class = requestor_class or Requestor
requestor_kwargs = requestor_kwargs or {}
requestor = requestor_class(
USER_AGENT_FORMAT.format(self.config.user_agent),
self.config.oauth_url,
self.config.reddit_url,
**requestor_kwargs,
)
if self.config.client_secret:
self._prepare_trusted_prawcore(requestor)
else:
self._prepare_untrusted_prawcore(requestor)
def _prepare_trusted_prawcore(self, requestor):
authenticator = TrustedAuthenticator(
requestor,
self.config.client_id,
self.config.client_secret,
self.config.redirect_uri,
)
read_only_authorizer = ReadOnlyAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
if self.config.username and self.config.password:
script_authorizer = ScriptAuthorizer(
authenticator, self.config.username, self.config.password
)
self._core = self._authorized_core = session(script_authorizer)
else:
self._prepare_common_authorizer(authenticator)
def _prepare_untrusted_prawcore(self, requestor):
authenticator = UntrustedAuthenticator(
requestor, self.config.client_id, self.config.redirect_uri
)
read_only_authorizer = DeviceIDAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
self._prepare_common_authorizer(authenticator)
@_deprecate_args("id", "url")
def comment(
self, # pylint: disable=invalid-name
id: Optional[str] = None, # pylint: disable=redefined-builtin
*,
url: Optional[str] = None,
):
"""Return a lazy instance of :class:`.Comment`.
:param id: The ID of the comment.
:param url: A permalink pointing to the comment.
.. note::
If you want to obtain the comment's replies, you will need to call
:meth:`~.Comment.refresh` on the returned :class:`.Comment`.
"""
return models.Comment(self, id=id, url=url)
def domain(self, domain: str):
"""Return an instance of :class:`.DomainListing`.
:param domain: The domain to obtain submission listings for.
"""
return models.DomainListing(self, domain)
@_deprecate_args("path", "params")
def get(
self,
path: str,
*,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
):
"""Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default: ``None``).
"""
return self._objectify_request(method="GET", params=params, path=path)
@_deprecate_args("fullnames", "url", "subreddits")
def info(
self,
*,
fullnames: Optional[Iterable[str]] = None,
subreddits: Optional[Iterable[Union["praw.models.Subreddit", str]]] = None,
url: Optional[str] = None,
) -> Generator[
Union["praw.models.Subreddit", "praw.models.Comment", "praw.models.Submission"],
None,
None,
]:
"""Fetch information about each item in ``fullnames``, ``url``, or ``subreddits``.
:param fullnames: A list of fullnames for comments, submissions, and/or
subreddits.
:param subreddits: A list of subreddit names or :class:`.Subreddit` objects to
retrieve subreddits from.
:param url: A url (as a string) to retrieve lists of link submissions from.
:returns: A generator that yields found items in their relative order.
Items that cannot be matched will not be generated. Requests will be issued in
batches for each 100 fullnames.
.. note::
For comments that are retrieved via this method, if you want to obtain its
replies, you will need to call :meth:`~.Comment.refresh` on the yielded
:class:`.Comment`.
.. note::
When using the URL option, it is important to be aware that URLs are treated
literally by Reddit's API. As such, the URLs ``"youtube.com"`` and
``"https://www.youtube.com"`` will provide a different set of submissions.
"""
none_count = (fullnames, url, subreddits).count(None)
if none_count != 2:
raise TypeError(
"Either `fullnames`, `url`, or `subreddits` must be provided."
)
is_using_fullnames = fullnames is not None
ids_or_names = fullnames if is_using_fullnames else subreddits
if ids_or_names is not None:
if isinstance(ids_or_names, str):
raise TypeError(
"`fullnames` and `subreddits` must be a non-str iterable."
)
api_parameter_name = "id" if is_using_fullnames else "sr_name"
def generator(names):
if is_using_fullnames:
iterable = iter(names)
else:
iterable = iter([str(item) for item in names])
while True:
chunk = list(islice(iterable, 100))
if not chunk:
break
params = {api_parameter_name: ",".join(chunk)}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(ids_or_names)
def generator(url):
params = {"url": url}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(url)
def _objectify_request(
self,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
method: str = "",
params: Optional[Union[str, Dict[str, str]]] = None,
path: str = "",
) -> Any:
"""Run a request through the ``Objector``.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., ``"GET"``, ``"POST"``, ``"PUT"``,
``"DELETE"``).
:param params: The query parameters to add to the request (default: ``None``).
:param path: The path to fetch.
"""
return self._objector.objectify(
self.request(
data=data,
files=files,
json=json,
method=method,
params=params,
path=path,
)
)
def _handle_rate_limit(
self, exception: RedditAPIException
) -> Optional[Union[int, float]]:
for item in exception.items:
if item.error_type == "RATELIMIT":
amount_search = self._ratelimit_regex.search(item.message)
if not amount_search:
break
seconds = int(amount_search.group(1))
if amount_search.group(2).startswith("minute"):
seconds *= 60
elif amount_search.group(2).startswith("millisecond"):
seconds = 0
if seconds <= int(self.config.ratelimit_seconds):
sleep_seconds = seconds + 1
return sleep_seconds
return None
@_deprecate_args("path", "data", "json", "params")
def delete(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
"""Return parsed objects returned from a DELETE request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param params: The query parameters to add to the request (default: ``None``).
"""
return self._objectify_request(
data=data, json=json, method="DELETE", params=params, path=path
)
@_deprecate_args("path", "data", "json")
def patch(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
) -> Any:
"""Return parsed objects returned from a PATCH request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
return self._objectify_request(data=data, json=json, method="PATCH", path=path)
@_deprecate_args("path", "data", "files", "params", "json")
def post(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
"""Return parsed objects returned from a POST request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param params: The query parameters to add to the request (default: ``None``).
"""
if json is None:
data = data or {}
attempts = 3
last_exception = None
while attempts > 0:
attempts -= 1
try:
return self._objectify_request(
data=data,
files=files,
json=json,
method="POST",
params=params,
path=path,
)
except RedditAPIException as exception:
last_exception = exception
seconds = self._handle_rate_limit(exception=exception)
if seconds is None:
break
second_string = "second" if seconds == 1 else "seconds"
logger.debug(f"Rate limit hit, sleeping for {seconds} {second_string}")
time.sleep(seconds)
raise last_exception
@_deprecate_args("path", "data", "json")
def put(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
):
"""Return parsed objects returned from a PUT request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
return self._objectify_request(data=data, json=json, method="PUT", path=path)
@_deprecate_args("nsfw")
def random_subreddit(self, *, nsfw: bool = False) -> "praw.models.Subreddit":
"""Return a random lazy instance of :class:`.Subreddit`.
:param nsfw: Return a random NSFW (not safe for work) subreddit (default:
``False``).
"""
url = API_PATH["subreddit"].format(subreddit="randnsfw" if nsfw else "random")
path = None
try:
self.get(url, params={"unique": self._next_unique})
except Redirect as redirect:
path = redirect.path
return models.Subreddit(self, path.split("/")[2])
@_deprecate_args("name", "fullname")
def redditor(
self, name: Optional[str] = None, *, fullname: Optional[str] = None
) -> "praw.models.Redditor":
"""Return a lazy instance of :class:`.Redditor`.
:param name: The name of the redditor.
:param fullname: The fullname of the redditor, starting with ``t2_``.
Either ``name`` or ``fullname`` can be provided, but not both.
"""
return models.Redditor(self, name=name, fullname=fullname)
@_deprecate_args("method", "path", "params", "data", "files", "json")
def request(
self,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
method: str,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
path: str,
) -> Any:
"""Return the parsed JSON data returned from a request to URL.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., ``"GET"``, ``"POST"``, ``"PUT"``,
``"DELETE"``).
:param params: The query parameters to add to the request (default: ``None``).
:param path: The path to fetch.
"""
if self.config.check_for_async:
self._check_for_async()
if data and json:
raise ClientException("At most one of `data` or `json` is supported.")
try:
return self._core.request(
data=data,
files=files,
json=json,
method=method,
params=params,
path=path,
)
except BadRequest as exception:
try:
data = exception.response.json()
except ValueError:
if exception.response.text:
data = {"reason": exception.response.text}
else:
raise exception
if set(data) == {"error", "message"}:
raise
explanation = data.get("explanation")
if "fields" in data:
assert len(data["fields"]) == 1
field = data["fields"][0]
else:
field = None
raise RedditAPIException(
[data["reason"], explanation, field]
) from exception
@_deprecate_args("id", "url")
def submission( # pylint: disable=invalid-name,redefined-builtin
self, id: Optional[str] = None, *, url: Optional[str] = None
) -> "praw.models.Submission":
"""Return a lazy instance of :class:`.Submission`.
:param id: A Reddit base36 submission ID, e.g., ``"2gmzqe"``.
:param url: A URL supported by :meth:`.Submission.id_from_url`.
Either ``id`` or ``url`` can be provided, but not both.
"""
return models.Submission(self, id=id, url=url)
def username_available(self, name: str) -> bool:
"""Check to see if the username is available.
For example, to check if the username ``bboe`` is available, try:
.. code-block:: python
reddit.username_available("bboe")
"""
return self._objectify_request(
method="GET", params={"user": name}, path=API_PATH["username_available"]
)
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2020 Intel Corporation
import os
import time
from fastapi.testclient import TestClient
from onecontainer_api import models, schemas, config, startup_svc
from onecontainer_api.frontend import app
web_server_port = 80
rtmp_server_port = 1935
for svc in config.INITIAL_SERVICES:
if svc["image"] == "web-rtmp":
web_server_port = svc["port"]["80/tcp"]
rtmp_server_port = svc["port"]["1935/tcp"]
break
video_0 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/fruit-and-vegetable-detection.mp4"
video_1 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/bottle-detection.mp4"
video_2 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/face-demographics-walking.mp4"
rtmp_ip = f"{config.BACKEND_NETWORK_GATEWAY}:{rtmp_server_port}"
input_data = {
"source": video_0
}
probe_input = {'streams': [{'index': 0, 'codec_name': 'h264', 'codec_long_name': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10', 'profile': 'High', 'codec_type': 'video', 'codec_time_base': '1001/120000', 'codec_tag_string': 'avc1', 'codec_tag': '0x31637661', 'width': 960, 'height': 540, 'coded_width': 960, 'coded_height': 544, 'closed_captions': 0, 'has_b_frames': 0, 'sample_aspect_ratio': '1:1', 'display_aspect_ratio': '16:9', 'pix_fmt': 'yuv420p', 'level': 32, 'color_range': 'tv', 'color_space': 'bt709', 'color_transfer': 'bt709', 'color_primaries': 'bt709', 'chroma_location': 'left', 'field_order': 'progressive', 'refs': 1, 'is_avc': 'true', 'nal_length_size': '4', 'r_frame_rate': '60000/1001', 'avg_frame_rate': '60000/1001', 'time_base': '1/60000', 'start_pts': 0, 'start_time': '0.000000', 'duration_ts': 3636633, 'duration': '60.610550', 'bit_rate': '2335818', 'bits_per_raw_sample': '8', 'nb_frames': '3633', 'disposition': {'default': 1, 'dub': 0, 'original': 0, 'comment': 0, 'lyrics': 0, 'karaoke': 0, 'forced': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'clean_effects': 0, 'attached_pic': 0, 'timed_thumbnails': 0}, 'tags': {'creation_time': '2018-06-15T21:05:12.000000Z', 'language': 'und', 'handler_name': 'Core Media Video'}}], 'format': {'filename': 'http://172.17.0.1:5553/sample-videos/fruit-and-vegetable-detection.mp4', 'nb_streams': 1, 'nb_programs': 0, 'format_name': 'mov,mp4,m4a,3gp,3g2,mj2', 'format_long_name': 'QuickTime / MOV', 'start_time': '0.000000', 'duration': '60.610550', 'size': '17760065', 'bit_rate': '2344154', 'probe_score': 100, 'tags': {'major_brand': 'mp42', 'minor_version': '1', 'compatible_brands': 'mp41mp42isom', 'creation_time': '2018-06-15T21:05:12.000000Z'}}}
supported_containers = ["mkv", "mp4", "mov", "m4a", "avi", "webm", "wmv", "vob"]
supported_audio_codecs = {
"aac": "aac",
"ogg": "libvorbis",
"wav": "pcm_s16le",
"flac": "flac",
"ac3": "ac3",
"wma": "wmav2",
}
supported_gpu_codecs = {
"mp4": "h264_vaapi",
"mkv": "hevc_vaapi",
"mov": "mjpeg_vaapi",
"webm": "vp8_vaapi"
}
pipeline_codecs = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "libx264"
}
]
}
]
}
pipeline_h264 = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "ultrafast",
"tune": "film",
"crf": "30"
}
}
]
}
]
}
pipeline_mpegts = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "mpegts",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_rtmp = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "flv",
"rtmp_ip": rtmp_ip,
"rtmp_path": "live",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_filters = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"filters": {
"scale": {
"w": "iw/2",
"h": -1
},
"deflicker": {
"mode": "pm",
"size": 10
},
"reverse": {},
"hue": {
"s": 0
}
}
},
{
"stream_type": "audio",
"filters": {
"atrim": {
"start": 1
},
"asetpts": "PTS-STARTPTS",
"volume": {
"volume": 0.8
},
"areverse": {},
"aphaser": {}
}
}
]
}
]
}
pipeline_copy = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "copy"
},
{
"stream_type": "audio",
"codec": "copy"
}
]
}
]
}
pipeline_empty = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4"
}
]
}
pipeline_mkv = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"params": {
"metadata": "stereo_mode=left_right",
"default_mode": "infer_no_subs"
}
}
]
}
pipeline_mp4 = {
"input_file": {
"source":video_1
},
"outputs": [
{
"container": "mp4",
"params": {
"movflags": "isml+frag_keyframe"
}
}
]
}
pipeline_aac = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "aac",
"channels": [
{
"stream_type": "audio",
"codec": "aac",
"codec_params": {
"ab": 192000,
"profile": "aac_ltp",
"strict": "-2",
}
},
{
"stream_type": "video",
"params": {
"vn": None
}
}
]
}
]
}
class TestMedia():
def setup_method(self):
models.Base.metadata.create_all(bind=models.engine)
def teardown_method(self):
os.remove(config.DATABASE_URL.split("///")[1])
def test_probe(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json=input_data)
assert response.status_code == 200
assert response.json() == probe_input
def test_probe_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={})
assert response.status_code == 400
assert response.json().get("status") == "InputFile field required: source"
def test_probe_wrong_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": "wrong"})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == ["wrong: No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": ""})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == [": No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": None})
assert response.status_code == 400
assert response.json().get("status") == "InputFile none is not an allowed value: source"
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": 1})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == ["1: No such file or directory"]
def test_pipeline_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"] = [{}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,container"
json_data["outputs"][0] = {"container": "test", "channels": [{}]}
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,channels,0,stream_type"
json_data["outputs"] = []
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == "No outputs specified"
json_data.pop("input_file")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: input_file"
def test_pipeline_unsupported_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"][0]["container"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == f"{output.get("id")}.wrong: Invalid argument"
json_data["outputs"][0]["container"] = "mkv"
json_data["outputs"][0]["channels"][0]["codec"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == "Unknown encoder 'wrong'"
json_data["outputs"][0]["channels"][0]["codec"] = "libx264"
json_data["outputs"][0]["channels"][0]["stream_type"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v {outputs[index]}"
def test_pipeline_copy(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_copy)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec copy -vcodec copy {outputs[index]}"
def test_pipeline_empty(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_empty)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
def test_pipeline_mkv(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mkv)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -default_mode infer_no_subs -metadata stereo_mode=left_right {outputs[index]}"
def test_pipeline_mp4(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mp4)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -movflags isml+frag_keyframe {outputs[index]}"
def test_pipeline_aac(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_aac)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -ab 192000 -acodec aac -profile:a aac_ltp -strict -2 -vn {outputs[index]}"
def test_pipeline_h264(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_h264)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -crf 30 -preset ultrafast -tune film -vcodec libx264 {outputs[index]}"
def test_pipeline_filters(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_filters)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(5)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_2} -filter_complex [0:v]scale=h=-1:w=iw/2[s0];[s0]deflicker=mode=pm:size=10[s1];[s1]reverse[s2];[s2]hue=s=0[s3];[0:a]atrim=start=1[s4];[s4]asetpts=PTS-STARTPTS[s5];[s5]volume=volume=0.8[s6];[s6]areverse[s7];[s7]aphaser[s8] -map [s3] -map [s8] {outputs[index]}"
def test_pipeline_supported_containers(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for container in supported_containers:
json_data["outputs"][0]["container"] = container
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_audio_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_audio_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["channels"] = [{"stream_type": "audio", "codec": codec}, {"stream_type": "video", "params": {"vn": None}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec {codec} -vn {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_gpu_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_gpu_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["params"] = {"vaapi_device": "/dev/dri/renderD128"}
json_data["outputs"][0]["channels"] = [{"stream_type": "video", "codec": codec, "params": {"vf":"format=nv12,hwupload"}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished or timeout == 0:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -vaapi_device /dev/dri/renderD128 -vcodec {codec} -vf format=nv12,hwupload {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_ttl(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["ttl"] = 5
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
result = response.json()
time.sleep(6)
response = client.get(f"/media/{svc_id}/pipeline/{result["id"]}?sync=true")
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == f"Pipeline {result["id"]} doesn't exist"
def test_pipeline_azure_upload(self):
ks = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
bucket = os.getenv("CLOUD_STORAGE_BUCKET")
if ks and bucket:
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["outputs"][0]["storage"] = [{
"name": "azure",
"bucket": bucket,
"env": {
"AZURE_STORAGE_CONNECTION_STRING": ks
}
}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
# response = client.get(f"/media/{svc_id}/pipeline/{result["id"]}?sync=true")
def test_pipeline_mpegts(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_stop(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(2)
response = client.delete(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_rtmp(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_rtmp)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert outputs[index] == f"rtmp://{rtmp_ip}/live"
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f flv -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished' | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2020 Intel Corporation
import os
import time
from fastapi.testclient import TestClient
from onecontainer_api import models, schemas, config, startup_svc
from onecontainer_api.frontend import app
web_server_port = 80
rtmp_server_port = 1935
for svc in config.INITIAL_SERVICES:
if svc["image"] == "web-rtmp":
web_server_port = svc["port"]["80/tcp"]
rtmp_server_port = svc["port"]["1935/tcp"]
break
video_0 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/fruit-and-vegetable-detection.mp4"
video_1 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/bottle-detection.mp4"
video_2 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/face-demographics-walking.mp4"
rtmp_ip = f"{config.BACKEND_NETWORK_GATEWAY}:{rtmp_server_port}"
input_data = {
"source": video_0
}
probe_input = {'streams': [{'index': 0, 'codec_name': 'h264', 'codec_long_name': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10', 'profile': 'High', 'codec_type': 'video', 'codec_time_base': '1001/120000', 'codec_tag_string': 'avc1', 'codec_tag': '0x31637661', 'width': 960, 'height': 540, 'coded_width': 960, 'coded_height': 544, 'closed_captions': 0, 'has_b_frames': 0, 'sample_aspect_ratio': '1:1', 'display_aspect_ratio': '16:9', 'pix_fmt': 'yuv420p', 'level': 32, 'color_range': 'tv', 'color_space': 'bt709', 'color_transfer': 'bt709', 'color_primaries': 'bt709', 'chroma_location': 'left', 'field_order': 'progressive', 'refs': 1, 'is_avc': 'true', 'nal_length_size': '4', 'r_frame_rate': '60000/1001', 'avg_frame_rate': '60000/1001', 'time_base': '1/60000', 'start_pts': 0, 'start_time': '0.000000', 'duration_ts': 3636633, 'duration': '60.610550', 'bit_rate': '2335818', 'bits_per_raw_sample': '8', 'nb_frames': '3633', 'disposition': {'default': 1, 'dub': 0, 'original': 0, 'comment': 0, 'lyrics': 0, 'karaoke': 0, 'forced': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'clean_effects': 0, 'attached_pic': 0, 'timed_thumbnails': 0}, 'tags': {'creation_time': '2018-06-15T21:05:12.000000Z', 'language': 'und', 'handler_name': 'Core Media Video'}}], 'format': {'filename': 'http://172.17.0.1:5553/sample-videos/fruit-and-vegetable-detection.mp4', 'nb_streams': 1, 'nb_programs': 0, 'format_name': 'mov,mp4,m4a,3gp,3g2,mj2', 'format_long_name': 'QuickTime / MOV', 'start_time': '0.000000', 'duration': '60.610550', 'size': '17760065', 'bit_rate': '2344154', 'probe_score': 100, 'tags': {'major_brand': 'mp42', 'minor_version': '1', 'compatible_brands': 'mp41mp42isom', 'creation_time': '2018-06-15T21:05:12.000000Z'}}}
supported_containers = ["mkv", "mp4", "mov", "m4a", "avi", "webm", "wmv", "vob"]
supported_audio_codecs = {
"aac": "aac",
"ogg": "libvorbis",
"wav": "pcm_s16le",
"flac": "flac",
"ac3": "ac3",
"wma": "wmav2",
}
supported_gpu_codecs = {
"mp4": "h264_vaapi",
"mkv": "hevc_vaapi",
"mov": "mjpeg_vaapi",
"webm": "vp8_vaapi"
}
pipeline_codecs = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "libx264"
}
]
}
]
}
pipeline_h264 = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "ultrafast",
"tune": "film",
"crf": "30"
}
}
]
}
]
}
pipeline_mpegts = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "mpegts",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_rtmp = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "flv",
"rtmp_ip": rtmp_ip,
"rtmp_path": "live",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_filters = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"filters": {
"scale": {
"w": "iw/2",
"h": -1
},
"deflicker": {
"mode": "pm",
"size": 10
},
"reverse": {},
"hue": {
"s": 0
}
}
},
{
"stream_type": "audio",
"filters": {
"atrim": {
"start": 1
},
"asetpts": "PTS-STARTPTS",
"volume": {
"volume": 0.8
},
"areverse": {},
"aphaser": {}
}
}
]
}
]
}
pipeline_copy = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "copy"
},
{
"stream_type": "audio",
"codec": "copy"
}
]
}
]
}
pipeline_empty = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4"
}
]
}
pipeline_mkv = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"params": {
"metadata": "stereo_mode=left_right",
"default_mode": "infer_no_subs"
}
}
]
}
pipeline_mp4 = {
"input_file": {
"source":video_1
},
"outputs": [
{
"container": "mp4",
"params": {
"movflags": "isml+frag_keyframe"
}
}
]
}
pipeline_aac = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "aac",
"channels": [
{
"stream_type": "audio",
"codec": "aac",
"codec_params": {
"ab": 192000,
"profile": "aac_ltp",
"strict": "-2",
}
},
{
"stream_type": "video",
"params": {
"vn": None
}
}
]
}
]
}
class TestMedia():
def setup_method(self):
models.Base.metadata.create_all(bind=models.engine)
def teardown_method(self):
os.remove(config.DATABASE_URL.split("///")[1])
def test_probe(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json=input_data)
assert response.status_code == 200
assert response.json() == probe_input
def test_probe_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={})
assert response.status_code == 400
assert response.json().get("status") == "InputFile field required: source"
def test_probe_wrong_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": "wrong"})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == ["wrong: No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": ""})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == [": No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": None})
assert response.status_code == 400
assert response.json().get("status") == "InputFile none is not an allowed value: source"
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": 1})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == ["1: No such file or directory"]
def test_pipeline_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"] = [{}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,container"
json_data["outputs"][0] = {"container": "test", "channels": [{}]}
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,channels,0,stream_type"
json_data["outputs"] = []
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == "No outputs specified"
json_data.pop("input_file")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: input_file"
def test_pipeline_unsupported_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"][0]["container"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == f"{output.get('id')}.wrong: Invalid argument"
json_data["outputs"][0]["container"] = "mkv"
json_data["outputs"][0]["channels"][0]["codec"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == "Unknown encoder 'wrong'"
json_data["outputs"][0]["channels"][0]["codec"] = "libx264"
json_data["outputs"][0]["channels"][0]["stream_type"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v {outputs[index]}"
def test_pipeline_copy(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_copy)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec copy -vcodec copy {outputs[index]}"
def test_pipeline_empty(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_empty)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
def test_pipeline_mkv(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mkv)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -default_mode infer_no_subs -metadata stereo_mode=left_right {outputs[index]}"
def test_pipeline_mp4(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mp4)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -movflags isml+frag_keyframe {outputs[index]}"
def test_pipeline_aac(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_aac)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -ab 192000 -acodec aac -profile:a aac_ltp -strict -2 -vn {outputs[index]}"
def test_pipeline_h264(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_h264)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -crf 30 -preset ultrafast -tune film -vcodec libx264 {outputs[index]}"
def test_pipeline_filters(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_filters)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(5)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_2} -filter_complex [0:v]scale=h=-1:w=iw/2[s0];[s0]deflicker=mode=pm:size=10[s1];[s1]reverse[s2];[s2]hue=s=0[s3];[0:a]atrim=start=1[s4];[s4]asetpts=PTS-STARTPTS[s5];[s5]volume=volume=0.8[s6];[s6]areverse[s7];[s7]aphaser[s8] -map [s3] -map [s8] {outputs[index]}"
def test_pipeline_supported_containers(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for container in supported_containers:
json_data["outputs"][0]["container"] = container
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_audio_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_audio_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["channels"] = [{"stream_type": "audio", "codec": codec}, {"stream_type": "video", "params": {"vn": None}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec {codec} -vn {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_gpu_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_gpu_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["params"] = {"vaapi_device": "/dev/dri/renderD128"}
json_data["outputs"][0]["channels"] = [{"stream_type": "video", "codec": codec, "params": {"vf":"format=nv12,hwupload"}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished or timeout == 0:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -vaapi_device /dev/dri/renderD128 -vcodec {codec} -vf format=nv12,hwupload {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_ttl(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["ttl"] = 5
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
result = response.json()
time.sleep(6)
response = client.get(f"/media/{svc_id}/pipeline/{result['id']}?sync=true")
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == f"Pipeline {result['id']} doesn't exist"
def test_pipeline_azure_upload(self):
ks = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
bucket = os.getenv("CLOUD_STORAGE_BUCKET")
if ks and bucket:
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["outputs"][0]["storage"] = [{
"name": "azure",
"bucket": bucket,
"env": {
"AZURE_STORAGE_CONNECTION_STRING": ks
}
}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
# response = client.get(f"/media/{svc_id}/pipeline/{result['id']}?sync=true")
def test_pipeline_mpegts(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_stop(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(2)
response = client.delete(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_rtmp(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_rtmp)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert outputs[index] == f"rtmp://{rtmp_ip}/live"
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f flv -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished' |
import datetime
import math
import sys
from functools import partial
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from pandas.util.testing import assert_series_equal
from sklearn.pipeline import Pipeline
from greykite.common import constants as cst
from greykite.common.evaluation import ElementwiseEvaluationMetricEnum
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.python_utils import assert_equal
from greykite.common.testing_utils import gen_sliced_df
from greykite.framework.input.univariate_time_series import UnivariateTimeSeries
from greykite.framework.output.univariate_forecast import UnivariateForecast
from greykite.framework.pipeline.utils import get_forecast
from greykite.sklearn.estimator.prophet_estimator import ProphetEstimator
from greykite.sklearn.estimator.silverkite_estimator import SilverkiteEstimator
try:
import fbprophet # noqa
except ModuleNotFoundError:
pass
@pytest.fixture
def df():
return pd.DataFrame({
cst.TIME_COL: [
datetime.datetime(2018, 1, 1),
datetime.datetime(2018, 1, 2),
datetime.datetime(2018, 1, 3),
datetime.datetime(2018, 1, 4)],
cst.ACTUAL_COL: [1, 2, 3, 4],
cst.PREDICTED_COL: [1, 4, 1, 2],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]
})
@pytest.fixture
def df2():
return pd.DataFrame({
cst.TIME_COL: pd.date_range(start="2018-01-01", periods=7),
cst.ACTUAL_COL:
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
cst.PREDICTED_COL:
[1.0, 4.0, 3.0, 2.0, 3.0, 4.0, 8.0],
cst.PREDICTED_LOWER_COL:
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
cst.PREDICTED_UPPER_COL:
[4.0, 5.0, 4.0, 4.0, 5.0, 6.0, 9.0],
cst.NULL_PREDICTED_COL:
[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5]
})
def test_univariate_forecast(df):
"""Checks univariate forecast class"""
# Without test_start_date
forecast = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 2),
test_start_date=None,
forecast_horizon=2)
assert forecast.forecast_horizon == 2
assert forecast.df_train.shape == (2, 6)
assert forecast.df_test.shape == (2, 6)
assert forecast.relative_error_tolerance is None
# evaluation metrics
enum = EvaluationMetricEnum.Correlation
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 1.0
enum = EvaluationMetricEnum.MeanAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.RootMeanSquaredError
assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MedianAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MeanAbsolutePercentError
assert forecast.train_evaluation[enum.get_metric_name()] == 50.0
assert forecast.test_evaluation[enum.get_metric_name()] == pytest.approx(58.33333, 1e-4)
assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0
assert forecast.test_evaluation[cst.R2_null_model_score] == pytest.approx(0.058824, 1e-4)
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None
# validation metrics
assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0
assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 87.5
assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.5
assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
# With test_start_date, relative_error_tolerance
with pytest.warns(UserWarning):
forecast = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 2),
test_start_date=datetime.datetime(2018, 1, 4),
relative_error_tolerance=0.05)
assert forecast.forecast_horizon is None
assert forecast.df_train.shape == (2, 6)
assert forecast.df_test.shape == (1, 6)
assert forecast.relative_error_tolerance == 0.05
# evaluation metrics (train_metrics remain the same, test_metrics change)
enum = EvaluationMetricEnum.Correlation
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] is None
enum = EvaluationMetricEnum.MeanAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.RootMeanSquaredError
assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MedianAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MeanAbsolutePercentError
assert forecast.train_evaluation[enum.get_metric_name()] == 50.0
assert forecast.test_evaluation[enum.get_metric_name()] == 50.0
assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0
assert forecast.test_evaluation[cst.R2_null_model_score] == 0.36
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 1.0
# validation metrics
assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0
assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 75.0
assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.95)
def test_subset_columns(df):
"""Tests if intervals and null prediction are truly optional,
and relative_error_tolerance parameter"""
forecast = UnivariateForecast(df[[cst.TIME_COL, cst.ACTUAL_COL, cst.PREDICTED_COL]],
predicted_lower_col=None,
predicted_upper_col=None,
null_model_predicted_col=None,
train_end_date=datetime.datetime(2018, 1, 2),
relative_error_tolerance=0.7)
forecast_full = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
for enum in EvaluationMetricEnum:
assert forecast.train_evaluation[enum.get_metric_name()] == forecast_full.train_evaluation[enum.get_metric_name()]
assert forecast.test_evaluation[enum.get_metric_name()] == forecast_full.test_evaluation[enum.get_metric_name()]
for metric in [cst.R2_null_model_score, cst.PREDICTION_BAND_WIDTH, cst.PREDICTION_BAND_COVERAGE, cst.LOWER_BAND_COVERAGE,
cst.UPPER_BAND_COVERAGE, cst.COVERAGE_VS_INTENDED_DIFF]:
assert forecast.train_evaluation[metric] is None
assert forecast.test_evaluation[metric] is None
assert forecast.relative_error_tolerance == 0.7
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.0
def test_input_validation(df):
"""Tests input validation"""
with pytest.raises(ValueError, match="`coverage` must be provided"):
UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=None)
with pytest.raises(ValueError, match="`coverage` must be between 0.0 and 1.0"):
UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=80.0)
with pytest.raises(ValueError, match="2018-01-05 is not found in time column"):
UnivariateForecast(df, train_end_date="2018-01-05")
with pytest.raises(ValueError, match="Column not found in data frame"):
UnivariateForecast(df, actual_col="not_a_column")
def test_no_train_end_date(df):
"""Tests if train end date can be None"""
forecast = UnivariateForecast(
df,
train_end_date=None)
forecast2 = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 4))
assert_equal(forecast.train_evaluation, forecast2.train_evaluation)
assert forecast.test_evaluation is None
def test_partial_test_data():
"""Tests if forecast evaluation can handle partially missing data"""
df = pd.DataFrame({
cst.TIME_COL: ["2018-01-01", datetime.datetime(2018, 1, 2), "2018-01-03", "2018-01-04", "2018-01-05"],
cst.ACTUAL_COL: [1, 2, 3, 2, np.nan],
cst.PREDICTED_COL: [1, 4, 1, 2, 4],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1, 2],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4, 6],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5, 1.5]
})
with pytest.warns(UserWarning) as record:
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
forecast2 = UnivariateForecast(df.iloc[:4, ], train_end_date=datetime.datetime(2018, 1, 2))
assert forecast.test_na_count == 1
assert "1 value(s) in y_true were NA or infinite and are omitted in error calc." in record[0].message.args[0:2]
assert_equal(forecast.train_evaluation, forecast2.train_evaluation)
assert_equal(forecast.test_evaluation, forecast2.test_evaluation)
def test_no_test_data():
"""Tests if test evaluation is skipped when there are no test data"""
df = pd.DataFrame({
cst.TIME_COL: ["2018-01-01", datetime.datetime(2018, 1, 2), "2018-01-03", "2018-01-04"],
cst.ACTUAL_COL: [1, 2, np.nan, np.nan],
cst.PREDICTED_COL: [1, 4, 1, 2],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]
})
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
assert forecast.test_na_count == 2
assert forecast.train_evaluation is not None
assert forecast.test_evaluation is None
def test_custom_loss_function(df):
"""Tests the custom loss function argument"""
def custom_loss(y_pred, y_true):
"""Root mean absolute error"""
return np.sqrt(np.sum(np.abs(np.array(y_pred) - np.array(y_true))))
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), r2_loss_function=custom_loss)
assert forecast.train_evaluation[cst.R2_null_model_score] == 1 - math.sqrt(2)
assert forecast.test_evaluation[cst.R2_null_model_score] == 0
def test_plot(df):
"""Tests plot function"""
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
fig = forecast.plot()
assert fig is not None
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 4))
fig = forecast.plot(vertical_line_color="green")
assert fig is not None
def test_get_grouping_evaluation(df2):
"""Tests get_grouping_evaluation function"""
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
# MAPE, groupby_time_feature, train set
metric = EvaluationMetricEnum.MeanAbsolutePercentError
metric_name = metric.get_metric_name()
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_time_feature="dow")
expected = pd.DataFrame({
"dow": [1, 2, 3, 4, 5], # Monday, Tuesday, etc. Time feature is used as column name
f"train {metric_name}": [0.0, 100.0, 0.0, 50.0, 40.0]
})
assert_equal(grouped_df, expected)
# MSE, groupby_sliding_window_size
metric = EvaluationMetricEnum.MeanSquaredError
metric_name = metric.get_metric_name()
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_sliding_window_size=2)
expected = pd.DataFrame({
f"{cst.TIME_COL}_downsample": [
datetime.datetime(2018, 1, 1),
datetime.datetime(2018, 1, 3),
datetime.datetime(2018, 1, 5)],
f"train {metric_name}": [0.0, 2.0, 4.0]
})
assert_equal(grouped_df, expected)
# MAE, groupby_custom_column, test set
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))
metric = EvaluationMetricEnum.MeanAbsoluteError
custom_groups = pd.Series(["g1", "g2", "g1", "g3", "g2"], name="custom_groups")
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=None,
which="test",
groupby_custom_column=custom_groups)
expected = pd.DataFrame({
"custom_groups": ["g1", "g2", "g3"],
"test metric": [1.0, 1.5, 2.0]
})
assert_equal(grouped_df, expected)
def test_plot_grouping_evaluation(df2):
"""Tests plot_grouping_evaluation function"""
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
# MAPE, groupby_time_feature, train set
metric = EvaluationMetricEnum.MeanAbsolutePercentError
metric_name = metric.get_metric_name()
fig = forecast.plot_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_time_feature="dow")
assert fig.data[0].name == f"train {metric_name}"
assert fig.layout.xaxis.title.text == "dow"
assert fig.layout.yaxis.title.text == f"train {metric_name}"
assert fig.layout.title.text == f"train {metric_name} vs dow"
assert fig.data[0].x.shape[0] == 5
# MSE, groupby_sliding_window_size, train set
metric = EvaluationMetricEnum.MeanSquaredError
metric_name = metric.get_metric_name()
fig = forecast.plot_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_sliding_window_size=2) # there are 5 training points, so this creates groups of size (1, 2, 2)
assert fig.data[0].name == f"train {metric_name}"
assert fig.layout.xaxis.title.text == f"{cst.TIME_COL}_downsample"
assert fig.layout.yaxis.title.text == f"train {metric_name}"
assert fig.layout.title.text == f"train {metric_name} vs {cst.TIME_COL}_downsample"
assert fig.data[0].x.shape[0] == 3
# MAE, groupby_custom_column, test set
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))
metric = EvaluationMetricEnum.MeanAbsoluteError
metric_name = metric.get_metric_name()
custom_groups = pd.Series(["g1", "g2", "g1", "g3", "g2"], name="custom_groups")
fig = forecast.plot_grouping_evaluation(
groupby_custom_column=custom_groups,
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="test",
title=None)
assert fig.data[0].name == f"test {metric_name}"
assert fig.layout.xaxis.title.text == "custom_groups"
assert fig.layout.yaxis.title.text == f"test {metric_name}"
assert fig.layout.title.text == f"test {metric_name} vs custom_groups"
assert fig.data[0].x.shape[0] == 3
# custom xlabel, ylabel, title
fig = forecast.plot_grouping_evaluation(
groupby_custom_column=custom_groups,
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="test",
xlabel="Custom labels",
ylabel="Mean Absolute Error of y",
title="Mean Absolute Error of y by Custom labels")
assert fig.layout.xaxis.title.text == "Custom labels"
assert fig.layout.yaxis.title.text == "Mean Absolute Error of y"
assert fig.layout.title.text == "Mean Absolute Error of y by Custom labels"
def test_autocomplete_map_func_dict(df2):
"""Tests autocomplete_map_func_dict function"""
map_func_dict = {
"residual": ElementwiseEvaluationMetricEnum.Residual.name,
"squared_error": ElementwiseEvaluationMetricEnum.SquaredError.name,
"coverage": ElementwiseEvaluationMetricEnum.Coverage.name,
"custom_metric": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**4
}
df_renamed = df2.rename({
cst.TIME_COL: "custom_time_col",
cst.ACTUAL_COL: "custom_actual_col",
cst.PREDICTED_COL: "custom_predicted_col",
cst.PREDICTED_LOWER_COL: "custom_predicted_lower_col",
cst.PREDICTED_UPPER_COL: "custom_predicted_upper_col",
cst.NULL_PREDICTED_COL: "custom_null_predicted_col",
})
forecast = UnivariateForecast(df_renamed, train_end_date=datetime.datetime(2018, 1, 5))
map_func_dict = forecast.autocomplete_map_func_dict(map_func_dict)
actual = df2.apply(map_func_dict["residual"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL])
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["squared_error"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(2)
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["coverage"], axis=1)
expected = ((df2[cst.ACTUAL_COL] > df2[cst.PREDICTED_LOWER_COL]) & (df2[cst.ACTUAL_COL] < df2[cst.PREDICTED_UPPER_COL])).astype('float')
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["custom_metric"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(4)
assert_series_equal(actual, expected)
assert forecast.autocomplete_map_func_dict(None) is None
valid_names = ", ".join(ElementwiseEvaluationMetricEnum.__dict__["_member_names_"])
with pytest.raises(ValueError, match=f"unknown_func is not a recognized elementwise "
f"evaluation metric. Must be one of: {valid_names}"):
map_func_dict = {"unknown_func": "unknown_func"}
forecast.autocomplete_map_func_dict(map_func_dict)
def test_get_flexible_grouping_evaluation(df2):
"""Tests get_flexible_grouping_evaluation function"""
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
# Checks residual quantiles, MSE/median squared error, and coverage
map_func_dict = {
"residual": ElementwiseEvaluationMetricEnum.Residual.name,
"squared_error": ElementwiseEvaluationMetricEnum.SquaredError.name,
"coverage": ElementwiseEvaluationMetricEnum.Coverage.name
}
agg_kwargs = {
"residual_mean": pd.NamedAgg(column="residual", aggfunc=np.nanmean),
"residual_q05": pd.NamedAgg(column="residual", aggfunc=partial(np.nanquantile, q=0.05)),
"residual_q95": pd.NamedAgg(column="residual", aggfunc=partial(np.nanquantile, q=0.95)),
"MSE": pd.NamedAgg(column="squared_error", aggfunc=np.nanmean),
"median_squared_error": pd.NamedAgg(column="squared_error", aggfunc=np.nanmedian),
"coverage": pd.NamedAgg(column="coverage", aggfunc=np.nanmean),
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
expected = pd.DataFrame({
# Only one value per group, so the mean/median/quantiles are the same
"residual_mean": [0.0, -2.0, 0.0, 2.0, 2.0],
"residual_q05": [0.0, -2.0, 0.0, 2.0, 2.0],
"residual_q95": [0.0, -2.0, 0.0, 2.0, 2.0],
"MSE": [0.0, 4.0, 0.0, 4.0, 4.0],
"median_squared_error": [0.0, 4.0, 0.0, 4.0, 4.0],
"coverage": [0.0, 1.0, 1.0, 0.0, 0.0],
}, index=pd.Series([1, 2, 3, 4, 5], name="dow"))
assert_frame_equal(result, expected)
# Equivalent way to specify `map_func_dict` (without autocomplete)
map_func_dict = {
"residual": lambda row: ElementwiseEvaluationMetricEnum.Residual.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_col]),
"squared_error": lambda row: ElementwiseEvaluationMetricEnum.SquaredError.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_col]),
"coverage": lambda row: ElementwiseEvaluationMetricEnum.Coverage.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_lower_col],
row[forecast.predicted_upper_col]),
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
assert_frame_equal(result, expected)
# Equivalent way to specify `map_func_dict` (without autocomplete)
map_func_dict = {
"residual": lambda row: row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL],
"squared_error": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**2,
"coverage": lambda row: 1.0 if row[cst.PREDICTED_LOWER_COL] < row[cst.ACTUAL_COL] < row[cst.PREDICTED_UPPER_COL] else 0.0
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
assert_frame_equal(result, expected)
# Groupby sliding window
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=3,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
expected = pd.DataFrame({
"residual_mean": [-1.0, 4/3],
"residual_q05": [-1.9, 0.2],
"residual_q95": [-0.1, 2.0],
"MSE": [2.0, 2.0 + 2/3],
"median_squared_error": [2.0, 4.0],
"coverage": [0.5, 1/3],
}, index=pd.DatetimeIndex(["2018-01-01", "2018-01-04"], name="ts_downsample"))
assert_frame_equal(result, expected)
# On test set with custom groupby column
custom_groups = pd.Series(["val1"], name="value_group").repeat(forecast.df_test.shape[0])
result = forecast.get_flexible_grouping_evaluation(
which="test",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=custom_groups,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs)
colindex = pd.Index(
["residual_mean", "residual_q05", "residual_q95",
"MSE", "median_squared_error", "coverage"])
expected = pd.DataFrame(
[[0.5, -0.85, 1.85, 2.5, 2.5, 0.5]],
columns=colindex,
index=pd.Series(["val1"], name=custom_groups.name))
assert_frame_equal(result, expected)
def test_plot_flexible_grouping_evaluation():
"""Tests plot_flexible_grouping_evaluation function"""
df = gen_sliced_df(sample_size_dict={"a": 300, "b": 200, "c": 300, "d": 80, "e": 300})
actual_col = "y"
predicted_col = "y_hat"
groupby_col = "x"
groupby_col2 = "z"
df = df[[actual_col, predicted_col, groupby_col, groupby_col2]]
df[cst.TIME_COL] = pd.date_range(start="2020-01-01", periods=df.shape[0], freq="D")
end_index = math.floor(df.shape[0] * 0.8)
forecast = UnivariateForecast(
df,
train_end_date=df[cst.TIME_COL][end_index],
time_col=cst.TIME_COL,
actual_col=actual_col,
predicted_col=predicted_col,
predicted_lower_col=None,
predicted_upper_col=None,
null_model_predicted_col=None)
# MSE and quantiles of squared error
metric_col = "squared_err"
map_func_dict = {metric_col: ElementwiseEvaluationMetricEnum.SquaredError.name}
agg_kwargs = {f"Q{quantile}": pd.NamedAgg(column=metric_col, aggfunc=partial(np.nanquantile, q=quantile)) for quantile in [0.1, 0.9]}
agg_kwargs.update({"mean": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean)})
# group by "dom", "auto-fill" styling
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dom",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="auto-fill",
default_color="rgba(0, 145, 202, 1.0)",
xlabel=None,
ylabel=metric_col,
title=None,
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "mean", "Q0.9"]
assert fig.layout.xaxis.title.text == "dom"
assert fig.layout.yaxis.title.text == metric_col
assert fig.layout.title.text == f"{metric_col} vs dom"
assert fig.data[0].x.shape[0] == 31 # 31 unique days in month
assert fig.data[1].line["color"] == "rgba(0, 145, 202, 1.0)"
assert fig.data[1].fill == "tonexty" # from auto-fill
assert fig.layout.showlegend
# group by sliding window, "auto" styling
# provide default color, xlabel, hide legend
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=7,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="auto",
default_color="rgba(145, 0, 202, 1.0)",
xlabel="ts",
ylabel=None,
title=None,
showlegend=False)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "mean", "Q0.9"]
assert fig.layout.xaxis.title.text == "ts"
assert fig.layout.yaxis.title.text is None
assert fig.layout.title.text is None
assert fig.data[0].x[0] == datetime.datetime(2020, 1, 1, 0, 0)
assert fig.data[1].line["color"] == "rgba(145, 0, 202, 1.0)"
assert fig.data[1].fill is None
assert not fig.layout.showlegend
# custom groups, "plotly" styling, provide ylabel, title
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=forecast.df_train["x"],
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="plotly",
default_color=None,
xlabel=None,
ylabel=metric_col,
title="custom title",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "Q0.9", "mean"] # not sorted
assert fig.layout.xaxis.title.text == "x"
assert fig.layout.yaxis.title.text == metric_col
assert fig.layout.title.text == "custom title"
assert list(fig.data[0].x) == list("abcde")
assert fig.data[0].line["color"] is None # color is up to plotly
assert fig.data[1].fill is None
assert fig.layout.showlegend
# test set, absolute percent error, custom `y_col_style_dict` styling
metric_col = "squared_error"
map_func_dict = {
metric_col: ElementwiseEvaluationMetricEnum.AbsolutePercentError.name
}
agg_kwargs = {
"median": pd.NamedAgg(column=metric_col, aggfunc=np.nanmedian),
"mean": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean),
}
y_col_style_dict = {
"median": {
"mode": "lines+markers",
"line": {
"color": "rgba(202, 145, 0, 0.5)"
}
},
"mean": {
"mode": "lines+markers",
"line": {
"color": "rgba(0, 145, 202, 1.0)"
}
},
}
with pytest.warns(UserWarning, match="true_val is less than 1e-8"):
fig = forecast.plot_flexible_grouping_evaluation(
which="test",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict=y_col_style_dict,
xlabel="x value",
ylabel="y value",
title="error plot",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["median", "mean"] # not sorted
assert fig.layout.xaxis.title.text == "x value"
assert fig.layout.yaxis.title.text == "y value"
assert fig.layout.title.text == "error plot"
assert len(fig.data[0].x) == 7
assert fig.data[0].mode == "lines+markers"
assert fig.data[1].mode == "lines+markers"
assert fig.data[0].line["color"] == y_col_style_dict["median"]["line"]["color"]
assert fig.data[1].line["color"] == y_col_style_dict["mean"]["line"]["color"]
assert fig.data[1].fill is None
assert fig.layout.showlegend
# median actual vs forecast value by group
agg_kwargs = {
"y_median": pd.NamedAgg(column="y", aggfunc=np.nanmedian),
"y_hat_median": pd.NamedAgg(column="y_hat", aggfunc=np.nanmedian),
}
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=None,
agg_kwargs=agg_kwargs,
extend_col_names=True,
y_col_style_dict="plotly",
xlabel=None,
ylabel=forecast.ylabel,
title="true vs actual by dow",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["y_median", "y_hat_median"]
assert fig.layout.xaxis.title.text == "dow"
assert fig.layout.yaxis.title.text == "y"
assert fig.layout.title.text == "true vs actual by dow"
assert len(fig.data[0].x) == 7
assert fig.layout.showlegend
def test_make_univariate_time_series(df):
"""Tests make_univariate_time_series function"""
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
ts = UnivariateTimeSeries()
ts.load_data(pd.DataFrame({
cst.TIME_COL: df[cst.TIME_COL],
cst.VALUE_COL: df[cst.PREDICTED_COL]
}), cst.TIME_COL, cst.VALUE_COL)
assert forecast.make_univariate_time_series().df.equals(ts.df)
def test_plot_components():
"""Test plot_components of UnivariateForecast class"""
X = pd.DataFrame({
cst.TIME_COL: pd.date_range("2018-01-01", periods=10, freq="D"),
cst.VALUE_COL: np.arange(1, 11)
})
coverage = 0.95
# Test Silverkite
trained_model = Pipeline([("estimator", SilverkiteEstimator(coverage=coverage))])
with pytest.warns(Warning) as record:
trained_model.fit(X, X[cst.VALUE_COL])
assert "No slice had sufficient sample size" in record[0].message.args[0]
forecast = get_forecast(X, trained_model)
with pytest.warns(Warning) as record:
title = "Custom component plot"
fig = forecast.plot_components(names=["trend", "YEARLY_SEASONALITY", "DUMMY"], title=title)
expected_rows = 3
assert len(fig.data) == expected_rows
assert [fig.data[i].name for i in range(expected_rows)] == \
[cst.VALUE_COL, "trend", "YEARLY_SEASONALITY"]
assert fig.layout.xaxis.title["text"] == cst.TIME_COL
assert fig.layout.xaxis2.title["text"] == cst.TIME_COL
assert fig.layout.xaxis3.title["text"] == "Time of year"
assert fig.layout.yaxis.title["text"] == cst.VALUE_COL
assert fig.layout.yaxis2.title["text"] == "trend"
assert fig.layout.yaxis3.title["text"] == "yearly"
assert fig.layout.title["text"] == title
assert f"The following components have not been specified in the model: " \
f"{{"DUMMY"}}, plotting the rest." in record[0].message.args[0]
@pytest.mark.skipif("fbprophet" not in sys.modules,
reason="Module 'fbprophet' not installed, pytest for 'ProphetTemplate' skipped.")
def test_plot_components_prophet():
X = pd.DataFrame({
cst.TIME_COL: pd.date_range("2018-01-01", periods=10, freq="D"),
cst.VALUE_COL: np.arange(1, 11)
})
coverage = 0.95
# Test Prophet
trained_model = Pipeline([("estimator", ProphetEstimator(coverage=coverage))])
trained_model.fit(X, X[cst.VALUE_COL])
forecast = get_forecast(X, trained_model)
fig = forecast.plot_components()
assert fig is not None
| import datetime
import math
import sys
from functools import partial
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from pandas.util.testing import assert_series_equal
from sklearn.pipeline import Pipeline
from greykite.common import constants as cst
from greykite.common.evaluation import ElementwiseEvaluationMetricEnum
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.python_utils import assert_equal
from greykite.common.testing_utils import gen_sliced_df
from greykite.framework.input.univariate_time_series import UnivariateTimeSeries
from greykite.framework.output.univariate_forecast import UnivariateForecast
from greykite.framework.pipeline.utils import get_forecast
from greykite.sklearn.estimator.prophet_estimator import ProphetEstimator
from greykite.sklearn.estimator.silverkite_estimator import SilverkiteEstimator
try:
import fbprophet # noqa
except ModuleNotFoundError:
pass
@pytest.fixture
def df():
return pd.DataFrame({
cst.TIME_COL: [
datetime.datetime(2018, 1, 1),
datetime.datetime(2018, 1, 2),
datetime.datetime(2018, 1, 3),
datetime.datetime(2018, 1, 4)],
cst.ACTUAL_COL: [1, 2, 3, 4],
cst.PREDICTED_COL: [1, 4, 1, 2],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]
})
@pytest.fixture
def df2():
return pd.DataFrame({
cst.TIME_COL: pd.date_range(start="2018-01-01", periods=7),
cst.ACTUAL_COL:
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
cst.PREDICTED_COL:
[1.0, 4.0, 3.0, 2.0, 3.0, 4.0, 8.0],
cst.PREDICTED_LOWER_COL:
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
cst.PREDICTED_UPPER_COL:
[4.0, 5.0, 4.0, 4.0, 5.0, 6.0, 9.0],
cst.NULL_PREDICTED_COL:
[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5]
})
def test_univariate_forecast(df):
"""Checks univariate forecast class"""
# Without test_start_date
forecast = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 2),
test_start_date=None,
forecast_horizon=2)
assert forecast.forecast_horizon == 2
assert forecast.df_train.shape == (2, 6)
assert forecast.df_test.shape == (2, 6)
assert forecast.relative_error_tolerance is None
# evaluation metrics
enum = EvaluationMetricEnum.Correlation
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 1.0
enum = EvaluationMetricEnum.MeanAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.RootMeanSquaredError
assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MedianAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MeanAbsolutePercentError
assert forecast.train_evaluation[enum.get_metric_name()] == 50.0
assert forecast.test_evaluation[enum.get_metric_name()] == pytest.approx(58.33333, 1e-4)
assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0
assert forecast.test_evaluation[cst.R2_null_model_score] == pytest.approx(0.058824, 1e-4)
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None
# validation metrics
assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0
assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 87.5
assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.5
assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
# With test_start_date, relative_error_tolerance
with pytest.warns(UserWarning):
forecast = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 2),
test_start_date=datetime.datetime(2018, 1, 4),
relative_error_tolerance=0.05)
assert forecast.forecast_horizon is None
assert forecast.df_train.shape == (2, 6)
assert forecast.df_test.shape == (1, 6)
assert forecast.relative_error_tolerance == 0.05
# evaluation metrics (train_metrics remain the same, test_metrics change)
enum = EvaluationMetricEnum.Correlation
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] is None
enum = EvaluationMetricEnum.MeanAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.RootMeanSquaredError
assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MedianAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MeanAbsolutePercentError
assert forecast.train_evaluation[enum.get_metric_name()] == 50.0
assert forecast.test_evaluation[enum.get_metric_name()] == 50.0
assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0
assert forecast.test_evaluation[cst.R2_null_model_score] == 0.36
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 1.0
# validation metrics
assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0
assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 75.0
assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.95)
def test_subset_columns(df):
"""Tests if intervals and null prediction are truly optional,
and relative_error_tolerance parameter"""
forecast = UnivariateForecast(df[[cst.TIME_COL, cst.ACTUAL_COL, cst.PREDICTED_COL]],
predicted_lower_col=None,
predicted_upper_col=None,
null_model_predicted_col=None,
train_end_date=datetime.datetime(2018, 1, 2),
relative_error_tolerance=0.7)
forecast_full = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
for enum in EvaluationMetricEnum:
assert forecast.train_evaluation[enum.get_metric_name()] == forecast_full.train_evaluation[enum.get_metric_name()]
assert forecast.test_evaluation[enum.get_metric_name()] == forecast_full.test_evaluation[enum.get_metric_name()]
for metric in [cst.R2_null_model_score, cst.PREDICTION_BAND_WIDTH, cst.PREDICTION_BAND_COVERAGE, cst.LOWER_BAND_COVERAGE,
cst.UPPER_BAND_COVERAGE, cst.COVERAGE_VS_INTENDED_DIFF]:
assert forecast.train_evaluation[metric] is None
assert forecast.test_evaluation[metric] is None
assert forecast.relative_error_tolerance == 0.7
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.0
def test_input_validation(df):
"""Tests input validation"""
with pytest.raises(ValueError, match="`coverage` must be provided"):
UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=None)
with pytest.raises(ValueError, match="`coverage` must be between 0.0 and 1.0"):
UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=80.0)
with pytest.raises(ValueError, match="2018-01-05 is not found in time column"):
UnivariateForecast(df, train_end_date="2018-01-05")
with pytest.raises(ValueError, match="Column not found in data frame"):
UnivariateForecast(df, actual_col="not_a_column")
def test_no_train_end_date(df):
"""Tests if train end date can be None"""
forecast = UnivariateForecast(
df,
train_end_date=None)
forecast2 = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 4))
assert_equal(forecast.train_evaluation, forecast2.train_evaluation)
assert forecast.test_evaluation is None
def test_partial_test_data():
"""Tests if forecast evaluation can handle partially missing data"""
df = pd.DataFrame({
cst.TIME_COL: ["2018-01-01", datetime.datetime(2018, 1, 2), "2018-01-03", "2018-01-04", "2018-01-05"],
cst.ACTUAL_COL: [1, 2, 3, 2, np.nan],
cst.PREDICTED_COL: [1, 4, 1, 2, 4],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1, 2],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4, 6],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5, 1.5]
})
with pytest.warns(UserWarning) as record:
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
forecast2 = UnivariateForecast(df.iloc[:4, ], train_end_date=datetime.datetime(2018, 1, 2))
assert forecast.test_na_count == 1
assert "1 value(s) in y_true were NA or infinite and are omitted in error calc." in record[0].message.args[0:2]
assert_equal(forecast.train_evaluation, forecast2.train_evaluation)
assert_equal(forecast.test_evaluation, forecast2.test_evaluation)
def test_no_test_data():
"""Tests if test evaluation is skipped when there are no test data"""
df = pd.DataFrame({
cst.TIME_COL: ["2018-01-01", datetime.datetime(2018, 1, 2), "2018-01-03", "2018-01-04"],
cst.ACTUAL_COL: [1, 2, np.nan, np.nan],
cst.PREDICTED_COL: [1, 4, 1, 2],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]
})
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
assert forecast.test_na_count == 2
assert forecast.train_evaluation is not None
assert forecast.test_evaluation is None
def test_custom_loss_function(df):
"""Tests the custom loss function argument"""
def custom_loss(y_pred, y_true):
"""Root mean absolute error"""
return np.sqrt(np.sum(np.abs(np.array(y_pred) - np.array(y_true))))
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), r2_loss_function=custom_loss)
assert forecast.train_evaluation[cst.R2_null_model_score] == 1 - math.sqrt(2)
assert forecast.test_evaluation[cst.R2_null_model_score] == 0
def test_plot(df):
"""Tests plot function"""
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
fig = forecast.plot()
assert fig is not None
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 4))
fig = forecast.plot(vertical_line_color="green")
assert fig is not None
def test_get_grouping_evaluation(df2):
"""Tests get_grouping_evaluation function"""
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
# MAPE, groupby_time_feature, train set
metric = EvaluationMetricEnum.MeanAbsolutePercentError
metric_name = metric.get_metric_name()
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_time_feature="dow")
expected = pd.DataFrame({
"dow": [1, 2, 3, 4, 5], # Monday, Tuesday, etc. Time feature is used as column name
f"train {metric_name}": [0.0, 100.0, 0.0, 50.0, 40.0]
})
assert_equal(grouped_df, expected)
# MSE, groupby_sliding_window_size
metric = EvaluationMetricEnum.MeanSquaredError
metric_name = metric.get_metric_name()
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_sliding_window_size=2)
expected = pd.DataFrame({
f"{cst.TIME_COL}_downsample": [
datetime.datetime(2018, 1, 1),
datetime.datetime(2018, 1, 3),
datetime.datetime(2018, 1, 5)],
f"train {metric_name}": [0.0, 2.0, 4.0]
})
assert_equal(grouped_df, expected)
# MAE, groupby_custom_column, test set
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))
metric = EvaluationMetricEnum.MeanAbsoluteError
custom_groups = pd.Series(["g1", "g2", "g1", "g3", "g2"], name="custom_groups")
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=None,
which="test",
groupby_custom_column=custom_groups)
expected = pd.DataFrame({
"custom_groups": ["g1", "g2", "g3"],
"test metric": [1.0, 1.5, 2.0]
})
assert_equal(grouped_df, expected)
def test_plot_grouping_evaluation(df2):
"""Tests plot_grouping_evaluation function"""
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
# MAPE, groupby_time_feature, train set
metric = EvaluationMetricEnum.MeanAbsolutePercentError
metric_name = metric.get_metric_name()
fig = forecast.plot_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_time_feature="dow")
assert fig.data[0].name == f"train {metric_name}"
assert fig.layout.xaxis.title.text == "dow"
assert fig.layout.yaxis.title.text == f"train {metric_name}"
assert fig.layout.title.text == f"train {metric_name} vs dow"
assert fig.data[0].x.shape[0] == 5
# MSE, groupby_sliding_window_size, train set
metric = EvaluationMetricEnum.MeanSquaredError
metric_name = metric.get_metric_name()
fig = forecast.plot_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_sliding_window_size=2) # there are 5 training points, so this creates groups of size (1, 2, 2)
assert fig.data[0].name == f"train {metric_name}"
assert fig.layout.xaxis.title.text == f"{cst.TIME_COL}_downsample"
assert fig.layout.yaxis.title.text == f"train {metric_name}"
assert fig.layout.title.text == f"train {metric_name} vs {cst.TIME_COL}_downsample"
assert fig.data[0].x.shape[0] == 3
# MAE, groupby_custom_column, test set
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))
metric = EvaluationMetricEnum.MeanAbsoluteError
metric_name = metric.get_metric_name()
custom_groups = pd.Series(["g1", "g2", "g1", "g3", "g2"], name="custom_groups")
fig = forecast.plot_grouping_evaluation(
groupby_custom_column=custom_groups,
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="test",
title=None)
assert fig.data[0].name == f"test {metric_name}"
assert fig.layout.xaxis.title.text == "custom_groups"
assert fig.layout.yaxis.title.text == f"test {metric_name}"
assert fig.layout.title.text == f"test {metric_name} vs custom_groups"
assert fig.data[0].x.shape[0] == 3
# custom xlabel, ylabel, title
fig = forecast.plot_grouping_evaluation(
groupby_custom_column=custom_groups,
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="test",
xlabel="Custom labels",
ylabel="Mean Absolute Error of y",
title="Mean Absolute Error of y by Custom labels")
assert fig.layout.xaxis.title.text == "Custom labels"
assert fig.layout.yaxis.title.text == "Mean Absolute Error of y"
assert fig.layout.title.text == "Mean Absolute Error of y by Custom labels"
def test_autocomplete_map_func_dict(df2):
"""Tests autocomplete_map_func_dict function"""
map_func_dict = {
"residual": ElementwiseEvaluationMetricEnum.Residual.name,
"squared_error": ElementwiseEvaluationMetricEnum.SquaredError.name,
"coverage": ElementwiseEvaluationMetricEnum.Coverage.name,
"custom_metric": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**4
}
df_renamed = df2.rename({
cst.TIME_COL: "custom_time_col",
cst.ACTUAL_COL: "custom_actual_col",
cst.PREDICTED_COL: "custom_predicted_col",
cst.PREDICTED_LOWER_COL: "custom_predicted_lower_col",
cst.PREDICTED_UPPER_COL: "custom_predicted_upper_col",
cst.NULL_PREDICTED_COL: "custom_null_predicted_col",
})
forecast = UnivariateForecast(df_renamed, train_end_date=datetime.datetime(2018, 1, 5))
map_func_dict = forecast.autocomplete_map_func_dict(map_func_dict)
actual = df2.apply(map_func_dict["residual"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL])
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["squared_error"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(2)
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["coverage"], axis=1)
expected = ((df2[cst.ACTUAL_COL] > df2[cst.PREDICTED_LOWER_COL]) & (df2[cst.ACTUAL_COL] < df2[cst.PREDICTED_UPPER_COL])).astype('float')
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["custom_metric"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(4)
assert_series_equal(actual, expected)
assert forecast.autocomplete_map_func_dict(None) is None
valid_names = ", ".join(ElementwiseEvaluationMetricEnum.__dict__["_member_names_"])
with pytest.raises(ValueError, match=f"unknown_func is not a recognized elementwise "
f"evaluation metric. Must be one of: {valid_names}"):
map_func_dict = {"unknown_func": "unknown_func"}
forecast.autocomplete_map_func_dict(map_func_dict)
def test_get_flexible_grouping_evaluation(df2):
"""Tests get_flexible_grouping_evaluation function"""
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
# Checks residual quantiles, MSE/median squared error, and coverage
map_func_dict = {
"residual": ElementwiseEvaluationMetricEnum.Residual.name,
"squared_error": ElementwiseEvaluationMetricEnum.SquaredError.name,
"coverage": ElementwiseEvaluationMetricEnum.Coverage.name
}
agg_kwargs = {
"residual_mean": pd.NamedAgg(column="residual", aggfunc=np.nanmean),
"residual_q05": pd.NamedAgg(column="residual", aggfunc=partial(np.nanquantile, q=0.05)),
"residual_q95": pd.NamedAgg(column="residual", aggfunc=partial(np.nanquantile, q=0.95)),
"MSE": pd.NamedAgg(column="squared_error", aggfunc=np.nanmean),
"median_squared_error": pd.NamedAgg(column="squared_error", aggfunc=np.nanmedian),
"coverage": pd.NamedAgg(column="coverage", aggfunc=np.nanmean),
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
expected = pd.DataFrame({
# Only one value per group, so the mean/median/quantiles are the same
"residual_mean": [0.0, -2.0, 0.0, 2.0, 2.0],
"residual_q05": [0.0, -2.0, 0.0, 2.0, 2.0],
"residual_q95": [0.0, -2.0, 0.0, 2.0, 2.0],
"MSE": [0.0, 4.0, 0.0, 4.0, 4.0],
"median_squared_error": [0.0, 4.0, 0.0, 4.0, 4.0],
"coverage": [0.0, 1.0, 1.0, 0.0, 0.0],
}, index=pd.Series([1, 2, 3, 4, 5], name="dow"))
assert_frame_equal(result, expected)
# Equivalent way to specify `map_func_dict` (without autocomplete)
map_func_dict = {
"residual": lambda row: ElementwiseEvaluationMetricEnum.Residual.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_col]),
"squared_error": lambda row: ElementwiseEvaluationMetricEnum.SquaredError.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_col]),
"coverage": lambda row: ElementwiseEvaluationMetricEnum.Coverage.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_lower_col],
row[forecast.predicted_upper_col]),
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
assert_frame_equal(result, expected)
# Equivalent way to specify `map_func_dict` (without autocomplete)
map_func_dict = {
"residual": lambda row: row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL],
"squared_error": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**2,
"coverage": lambda row: 1.0 if row[cst.PREDICTED_LOWER_COL] < row[cst.ACTUAL_COL] < row[cst.PREDICTED_UPPER_COL] else 0.0
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
assert_frame_equal(result, expected)
# Groupby sliding window
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=3,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
expected = pd.DataFrame({
"residual_mean": [-1.0, 4/3],
"residual_q05": [-1.9, 0.2],
"residual_q95": [-0.1, 2.0],
"MSE": [2.0, 2.0 + 2/3],
"median_squared_error": [2.0, 4.0],
"coverage": [0.5, 1/3],
}, index=pd.DatetimeIndex(["2018-01-01", "2018-01-04"], name="ts_downsample"))
assert_frame_equal(result, expected)
# On test set with custom groupby column
custom_groups = pd.Series(["val1"], name="value_group").repeat(forecast.df_test.shape[0])
result = forecast.get_flexible_grouping_evaluation(
which="test",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=custom_groups,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs)
colindex = pd.Index(
["residual_mean", "residual_q05", "residual_q95",
"MSE", "median_squared_error", "coverage"])
expected = pd.DataFrame(
[[0.5, -0.85, 1.85, 2.5, 2.5, 0.5]],
columns=colindex,
index=pd.Series(["val1"], name=custom_groups.name))
assert_frame_equal(result, expected)
def test_plot_flexible_grouping_evaluation():
"""Tests plot_flexible_grouping_evaluation function"""
df = gen_sliced_df(sample_size_dict={"a": 300, "b": 200, "c": 300, "d": 80, "e": 300})
actual_col = "y"
predicted_col = "y_hat"
groupby_col = "x"
groupby_col2 = "z"
df = df[[actual_col, predicted_col, groupby_col, groupby_col2]]
df[cst.TIME_COL] = pd.date_range(start="2020-01-01", periods=df.shape[0], freq="D")
end_index = math.floor(df.shape[0] * 0.8)
forecast = UnivariateForecast(
df,
train_end_date=df[cst.TIME_COL][end_index],
time_col=cst.TIME_COL,
actual_col=actual_col,
predicted_col=predicted_col,
predicted_lower_col=None,
predicted_upper_col=None,
null_model_predicted_col=None)
# MSE and quantiles of squared error
metric_col = "squared_err"
map_func_dict = {metric_col: ElementwiseEvaluationMetricEnum.SquaredError.name}
agg_kwargs = {f"Q{quantile}": pd.NamedAgg(column=metric_col, aggfunc=partial(np.nanquantile, q=quantile)) for quantile in [0.1, 0.9]}
agg_kwargs.update({"mean": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean)})
# group by "dom", "auto-fill" styling
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dom",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="auto-fill",
default_color="rgba(0, 145, 202, 1.0)",
xlabel=None,
ylabel=metric_col,
title=None,
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "mean", "Q0.9"]
assert fig.layout.xaxis.title.text == "dom"
assert fig.layout.yaxis.title.text == metric_col
assert fig.layout.title.text == f"{metric_col} vs dom"
assert fig.data[0].x.shape[0] == 31 # 31 unique days in month
assert fig.data[1].line["color"] == "rgba(0, 145, 202, 1.0)"
assert fig.data[1].fill == "tonexty" # from auto-fill
assert fig.layout.showlegend
# group by sliding window, "auto" styling
# provide default color, xlabel, hide legend
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=7,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="auto",
default_color="rgba(145, 0, 202, 1.0)",
xlabel="ts",
ylabel=None,
title=None,
showlegend=False)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "mean", "Q0.9"]
assert fig.layout.xaxis.title.text == "ts"
assert fig.layout.yaxis.title.text is None
assert fig.layout.title.text is None
assert fig.data[0].x[0] == datetime.datetime(2020, 1, 1, 0, 0)
assert fig.data[1].line["color"] == "rgba(145, 0, 202, 1.0)"
assert fig.data[1].fill is None
assert not fig.layout.showlegend
# custom groups, "plotly" styling, provide ylabel, title
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=forecast.df_train["x"],
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="plotly",
default_color=None,
xlabel=None,
ylabel=metric_col,
title="custom title",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "Q0.9", "mean"] # not sorted
assert fig.layout.xaxis.title.text == "x"
assert fig.layout.yaxis.title.text == metric_col
assert fig.layout.title.text == "custom title"
assert list(fig.data[0].x) == list("abcde")
assert fig.data[0].line["color"] is None # color is up to plotly
assert fig.data[1].fill is None
assert fig.layout.showlegend
# test set, absolute percent error, custom `y_col_style_dict` styling
metric_col = "squared_error"
map_func_dict = {
metric_col: ElementwiseEvaluationMetricEnum.AbsolutePercentError.name
}
agg_kwargs = {
"median": pd.NamedAgg(column=metric_col, aggfunc=np.nanmedian),
"mean": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean),
}
y_col_style_dict = {
"median": {
"mode": "lines+markers",
"line": {
"color": "rgba(202, 145, 0, 0.5)"
}
},
"mean": {
"mode": "lines+markers",
"line": {
"color": "rgba(0, 145, 202, 1.0)"
}
},
}
with pytest.warns(UserWarning, match="true_val is less than 1e-8"):
fig = forecast.plot_flexible_grouping_evaluation(
which="test",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict=y_col_style_dict,
xlabel="x value",
ylabel="y value",
title="error plot",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["median", "mean"] # not sorted
assert fig.layout.xaxis.title.text == "x value"
assert fig.layout.yaxis.title.text == "y value"
assert fig.layout.title.text == "error plot"
assert len(fig.data[0].x) == 7
assert fig.data[0].mode == "lines+markers"
assert fig.data[1].mode == "lines+markers"
assert fig.data[0].line["color"] == y_col_style_dict["median"]["line"]["color"]
assert fig.data[1].line["color"] == y_col_style_dict["mean"]["line"]["color"]
assert fig.data[1].fill is None
assert fig.layout.showlegend
# median actual vs forecast value by group
agg_kwargs = {
"y_median": pd.NamedAgg(column="y", aggfunc=np.nanmedian),
"y_hat_median": pd.NamedAgg(column="y_hat", aggfunc=np.nanmedian),
}
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=None,
agg_kwargs=agg_kwargs,
extend_col_names=True,
y_col_style_dict="plotly",
xlabel=None,
ylabel=forecast.ylabel,
title="true vs actual by dow",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["y_median", "y_hat_median"]
assert fig.layout.xaxis.title.text == "dow"
assert fig.layout.yaxis.title.text == "y"
assert fig.layout.title.text == "true vs actual by dow"
assert len(fig.data[0].x) == 7
assert fig.layout.showlegend
def test_make_univariate_time_series(df):
"""Tests make_univariate_time_series function"""
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
ts = UnivariateTimeSeries()
ts.load_data(pd.DataFrame({
cst.TIME_COL: df[cst.TIME_COL],
cst.VALUE_COL: df[cst.PREDICTED_COL]
}), cst.TIME_COL, cst.VALUE_COL)
assert forecast.make_univariate_time_series().df.equals(ts.df)
def test_plot_components():
"""Test plot_components of UnivariateForecast class"""
X = pd.DataFrame({
cst.TIME_COL: pd.date_range("2018-01-01", periods=10, freq="D"),
cst.VALUE_COL: np.arange(1, 11)
})
coverage = 0.95
# Test Silverkite
trained_model = Pipeline([("estimator", SilverkiteEstimator(coverage=coverage))])
with pytest.warns(Warning) as record:
trained_model.fit(X, X[cst.VALUE_COL])
assert "No slice had sufficient sample size" in record[0].message.args[0]
forecast = get_forecast(X, trained_model)
with pytest.warns(Warning) as record:
title = "Custom component plot"
fig = forecast.plot_components(names=["trend", "YEARLY_SEASONALITY", "DUMMY"], title=title)
expected_rows = 3
assert len(fig.data) == expected_rows
assert [fig.data[i].name for i in range(expected_rows)] == \
[cst.VALUE_COL, "trend", "YEARLY_SEASONALITY"]
assert fig.layout.xaxis.title["text"] == cst.TIME_COL
assert fig.layout.xaxis2.title["text"] == cst.TIME_COL
assert fig.layout.xaxis3.title["text"] == "Time of year"
assert fig.layout.yaxis.title["text"] == cst.VALUE_COL
assert fig.layout.yaxis2.title["text"] == "trend"
assert fig.layout.yaxis3.title["text"] == "yearly"
assert fig.layout.title["text"] == title
assert f"The following components have not been specified in the model: " \
f"{{'DUMMY'}}, plotting the rest." in record[0].message.args[0]
@pytest.mark.skipif("fbprophet" not in sys.modules,
reason="Module 'fbprophet' not installed, pytest for 'ProphetTemplate' skipped.")
def test_plot_components_prophet():
X = pd.DataFrame({
cst.TIME_COL: pd.date_range("2018-01-01", periods=10, freq="D"),
cst.VALUE_COL: np.arange(1, 11)
})
coverage = 0.95
# Test Prophet
trained_model = Pipeline([("estimator", ProphetEstimator(coverage=coverage))])
trained_model.fit(X, X[cst.VALUE_COL])
forecast = get_forecast(X, trained_model)
fig = forecast.plot_components()
assert fig is not None
|
import argparse
import base64
import json
import random
from datetime import datetime, timezone, timedelta
from functools import partial
from typing import List, Dict, Union, NamedTuple, Optional, Callable, Set
import requests
from uqcsbot import bot, Command
from uqcsbot.api import Channel
from uqcsbot.utils.command_utils import loading_status, UsageSyntaxException
API_URL = "https://opentdb.com/api.php"
CATEGORIES_URL = "https://opentdb.com/api_category.php"
# NamedTuple for use with the data returned from the api
QuestionData = NamedTuple('QuestionData',
[('type', str), ('question', str), ('correct_answer', str),
('answers', List[str]), ('is_boolean', bool)])
# Contains information about a reaction and the list of users who used said reaction
ReactionUsers = NamedTuple('ReactionUsers', [('name', str), ('users', Set[str])])
# Customisation options
# The interval between reactions being made for the possible answers (prevents order changing)
REACT_INTERVAL = 1
MIN_SECONDS = 5
MAX_SECONDS = 300
# The channels where multiple trivia questions can be asked (prevent spam)
VALID_SEQUETIAL_CHANNELS = ['trivia', 'bot-testing']
MAX_SEQUENTIAL_QUESTIONS = 30
BOOLEAN_REACTS = ['this', 'not-this'] # Format of [ <True>, <False> ]
# Colours should match CHOICE_COLORS
MULTIPLE_CHOICE_REACTS = ['green_heart', 'yellow_heart', 'heart', 'blue_heart']
CHOICE_COLORS = ['#6C9935', '#F3C200', '#B6281E', '#3176EF']
# What arguments to use for the cron job version
CRON_CHANNEL = 'trivia'
# (One day - 15 seconds) Overrides any -s argument below and ignores MAX_SECONDS rule
CRON_SECONDS = 86385
CRON_ARGUMENTS = ''
@bot.on_command('trivia')
@loading_status
def handle_trivia(command: Command):
"""
`!trivia [-d <easy|medium|hard>] [-c <CATEGORY>]
[-t <multiple|tf>] [-s <N>] [-n <N>] [--cats]`
- Asks a new trivia question
"""
args = parse_arguments(command.channel_id, command.arg if command.has_arg() else '')
# End early if the help option was used
if args.help:
return
# Send the possible categories
if args.cats:
bot.post_message(command.channel_id, get_categories())
return
# Check if the channel is valid for sequential questions
current_channel = bot.channels.get(command.channel_id)
if all([args.count > 1, not current_channel.is_im,
current_channel.name not in VALID_SEQUETIAL_CHANNELS]):
# If no valid channels are specified
if len(VALID_SEQUETIAL_CHANNELS) == 0:
bot.post_message(command.channel_id,
'This command can only be used in private messages with the bot')
return
first_valid = bot.channels.get(VALID_SEQUETIAL_CHANNELS[0])
channel_message = ''
if first_valid:
channel_message = f'Try <#{first_valid.id}|{VALID_SEQUETIAL_CHANNELS[0]}>.'
bot.post_message(command.channel_id, f'You cannot use the sequential questions '
+ f'feature in this channel. {channel_message}')
return
handle_question(command.channel_id, args)
def parse_arguments(channel: Channel, arg_string: str) -> argparse.Namespace:
"""
Parses the arguments for the command
:param command: The command which the handle_trivia function receives
:return: An argpase Namespace object with the parsed arguments
"""
parser = argparse.ArgumentParser(prog='!trivia', add_help=False)
def usage_error(*args, **kwargs):
raise UsageSyntaxException()
parser.error = usage_error # type: ignore
parser.add_argument('-d', '--difficulty', choices=['easy', 'medium', 'hard'],
default='random', type=str.lower,
help='The difficulty of the question. (default: %(default)s)')
parser.add_argument('-c', '--category', default=-1, type=int,
help='Specifies a category (default: any)')
parser.add_argument('-t', '--type', choices=['boolean', 'multiple'],
default="random", type=str.lower,
help='The type of question. (default: %(default)s)')
parser.add_argument('-s', '--seconds', default=30, type=int,
help='Number of seconds before posting answer (default: %(default)s)')
parser.add_argument('-n', '--count', default=1, type=int, help=f"Do 'n' trivia questions in "
f"quick succession (max : {MAX_SEQUENTIAL_QUESTIONS})")
parser.add_argument('--cats', action='store_true',
help='Sends a list of valid categories to the user')
parser.add_argument('-h', '--help', action='store_true', help='Prints this help message')
args = parser.parse_args(arg_string.split())
# If the help option was used print the help message to
# the channel (needs access to the parser to do this)
if args.help:
bot.post_message(channel, parser.format_help())
# Constrain the number of seconds to a reasonable frame
args.seconds = max(MIN_SECONDS, args.seconds)
args.seconds = min(args.seconds, MAX_SECONDS)
# Constrain the number of sequential questions
args.count = max(args.count, 1)
args.count = min(args.count, MAX_SEQUENTIAL_QUESTIONS)
# Add an original count to keep track
args.original_count = args.count
return args
def get_categories() -> str:
"""
Gets the message to send if the user wants a list of the available categories.
"""
http_response = requests.get(CATEGORIES_URL)
if http_response.status_code != requests.codes.ok:
return "There was a problem getting the response"
categories = json.loads(http_response.content)['trivia_categories']
# Construct pretty results to print in a code block to avoid a large spammy message
pretty_results = '```Use the id to specify a specific category \n\nID Name\n'
for category in categories:
pretty_results += f'{category['id']:<4d}{category['name']}\n'
pretty_results += '```'
return pretty_results
def handle_question(channel: Channel, args: argparse.Namespace):
"""
Handles getting a question and posting it to the channel as well as scheduling the answer.
Returns the reaction string for the correct answer.
"""
question_data = get_question_data(channel, args)
if question_data is None:
return
question_number = args.original_count - args.count + 1
prefix = f'Q{question_number}:' if args.original_count > 1 else ''
post_question(channel, question_data, prefix)
# Get the answer message
if question_data.is_boolean:
if question_data.correct_answer == 'True':
answer_text = f':{BOOLEAN_REACTS[0]}:'
else:
answer_text = f':{BOOLEAN_REACTS[1]}:'
else:
answer_text = question_data.correct_answer
answer_message = f'The answer to the question *{question_data.question}* is: *{answer_text}*'
# Schedule the answer to be posted after the specified number of seconds has passed
post_answer = partial(bot.post_message, channel, answer_message)
schedule_action(post_answer, args.seconds)
# If more questions are to be asked schedule the question for 5 seconds after the current answer
if args.count > 1:
args.count -= 1
schedule_action(partial(handle_question, channel, args), args.seconds + 5)
def get_question_data(channel: Channel, args: argparse.Namespace) -> Optional[QuestionData]:
"""
Attempts to get a question from the api using the specified arguments.
Returns the dictionary object for the question on success
and None on failure (after posting an error message).
"""
# Base64 to help with encoding the message for slack
params: Dict[str, Union[int, str]] = {'amount': 1, 'encode': 'base64'}
# Add in any explicitly specified arguments
if args.category != -1:
params['category'] = args.category
if args.difficulty != 'random':
params['difficulty'] = args.difficulty
if args.type != 'random':
params['type'] = args.type
# Get the response and check that it is valid
http_response = requests.get(API_URL, params=params)
if http_response.status_code != requests.codes.ok:
bot.post_message(channel, "There was a problem getting the response")
return None
# Check the response codes and post a useful message in the case of an error
response_content = json.loads(http_response.content)
if response_content['response_code'] == 2:
bot.post_message(channel, "Invalid category id. "
+ "Try !trivia --cats for a list of valid categories.")
return None
elif response_content['response_code'] != 0:
bot.post_message(channel, "No results were returned")
return None
question_data = response_content['results'][0]
# Get the type of question and make the NamedTuple container for the data
is_boolean = len(question_data['incorrect_answers']) == 1
answers = [question_data['correct_answer']] + question_data['incorrect_answers']
# Delete the ones we don't need
del question_data['category']
del question_data['difficulty']
del question_data['incorrect_answers']
# Decode the ones we want. The base 64 decoding ensures
# that the formatting works properly with slack.
question_data['question'] = decode_b64(question_data['question'])
question_data['correct_answer'] = decode_b64(question_data['correct_answer'])
answers = [decode_b64(ans) for ans in answers]
question_data = QuestionData(is_boolean=is_boolean, answers=answers, **question_data)
# Shuffle the answers
random.shuffle(question_data.answers)
return question_data
def post_question(channel: Channel, question_data: QuestionData, prefix: str = '') -> float:
"""
Posts the question from the given QuestionData along with
the possible answers list if applicable.
Also creates the answer reacts.
Returns the timestamp of the posted message.
"""
# Post the question and get the timestamp for the reactions (asterisks bold it)
message_ts = bot.post_message(channel, f'*{prefix} {question_data.question}*')['ts']
# Print the questions (if multiple choice) and add the answer reactions
reactions = BOOLEAN_REACTS if question_data.is_boolean else MULTIPLE_CHOICE_REACTS
if not question_data.is_boolean:
message_ts = post_possible_answers(channel, question_data.answers)
add_reactions_interval(reactions, channel, message_ts, REACT_INTERVAL)
return message_ts
def add_reactions_interval(reactions: List[str], channel: Channel,
msg_timestamp: str, interval: float = 1):
"""
Adds the given reactions with "interval" seconds between in order
to prevent them from changing order in slack (as slack uses the
timestamp of when the reaction was added to determine the order).
:param reactions: The reactions to add
:param channel: The channel containing the desired message to react to
:param msg_timestamp: The timestamp of the required message
:param interval: The interval between posting each reaction (defaults to 1 second)
"""
# If the react interval is 0 don't do any of the scheduling stuff
if REACT_INTERVAL == 0:
for reaction in reactions:
bot.api.reactions.add(name=reaction, channel=channel, timestamp=msg_timestamp)
return
# Do the first one immediately
bot.api.reactions.add(name=reactions[0], channel=channel, timestamp=msg_timestamp)
# I am not 100% sure why this is needed. Doing it with a normal partial or
# lambda will try to post the same reacts
def add_reaction(reaction: str):
bot.api.reactions.add(name=reaction, channel=channel, timestamp=msg_timestamp)
for index, reaction in enumerate(reactions[1:]):
delay = (index + 1) * interval
schedule_action(partial(add_reaction, reaction), delay)
def decode_b64(encoded: str) -> str:
"""
Takes a base64 encoded string. Returns the decoded version to utf-8.
"""
return base64.b64decode(encoded).decode('utf-8')
def get_correct_reaction(question_data: QuestionData):
"""
Returns the reaction that matches with the correct answer
"""
if question_data.is_boolean:
if question_data.correct_answer == 'True':
correct_reaction = BOOLEAN_REACTS[0]
else:
correct_reaction = BOOLEAN_REACTS[1]
else:
correct_reaction = MULTIPLE_CHOICE_REACTS[
question_data.answers.index(question_data.correct_answer)]
return correct_reaction
def post_possible_answers(channel: Channel, answers: List[str]) -> float:
"""
Posts the possible answers for a multiple choice question in a nice way.
Returns the timestamp of the message to allow reacting to it.
"""
attachments = []
for col, answer in zip(CHOICE_COLORS, answers):
ans_att = {'text': answer, 'color': col}
attachments.append(ans_att)
return bot.post_message(channel, '', attachments=attachments)['ts']
def schedule_action(action: Callable, secs: Union[int, float]):
"""
Schedules the supplied action to be called once in the given number of seconds.
"""
run_date = datetime.now(timezone(timedelta(hours=10))) + timedelta(seconds=secs)
bot._scheduler.add_job(action, 'date', run_date=run_date)
@bot.on_schedule('cron', hour=12, timezone='Australia/Brisbane')
def daily_trivia():
"""
Adds a job that displays a random question to the specified channel at lunch time
"""
channel = bot.channels.get(CRON_CHANNEL).id
# Get arguments and update the seconds
args = parse_arguments(channel, CRON_ARGUMENTS)
args.seconds = CRON_SECONDS
# Get and post the actual question
handle_question(channel, args)
# Format a nice message to tell when the answer will be
hours = CRON_SECONDS // 3600
minutes = (CRON_SECONDS - (hours * 3600)) // 60
if minutes > 55:
hours += 1
minutes = 0
time_until_answer = 'Answer in '
if hours > 0:
time_until_answer += f'{hours} hours'
if minutes > 0:
time_until_answer += f' and {minutes} minutes' if hours > 0 else f'{minutes} minutes'
bot.post_message(channel, time_until_answer)
| import argparse
import base64
import json
import random
from datetime import datetime, timezone, timedelta
from functools import partial
from typing import List, Dict, Union, NamedTuple, Optional, Callable, Set
import requests
from uqcsbot import bot, Command
from uqcsbot.api import Channel
from uqcsbot.utils.command_utils import loading_status, UsageSyntaxException
API_URL = "https://opentdb.com/api.php"
CATEGORIES_URL = "https://opentdb.com/api_category.php"
# NamedTuple for use with the data returned from the api
QuestionData = NamedTuple('QuestionData',
[('type', str), ('question', str), ('correct_answer', str),
('answers', List[str]), ('is_boolean', bool)])
# Contains information about a reaction and the list of users who used said reaction
ReactionUsers = NamedTuple('ReactionUsers', [('name', str), ('users', Set[str])])
# Customisation options
# The interval between reactions being made for the possible answers (prevents order changing)
REACT_INTERVAL = 1
MIN_SECONDS = 5
MAX_SECONDS = 300
# The channels where multiple trivia questions can be asked (prevent spam)
VALID_SEQUETIAL_CHANNELS = ['trivia', 'bot-testing']
MAX_SEQUENTIAL_QUESTIONS = 30
BOOLEAN_REACTS = ['this', 'not-this'] # Format of [ <True>, <False> ]
# Colours should match CHOICE_COLORS
MULTIPLE_CHOICE_REACTS = ['green_heart', 'yellow_heart', 'heart', 'blue_heart']
CHOICE_COLORS = ['#6C9935', '#F3C200', '#B6281E', '#3176EF']
# What arguments to use for the cron job version
CRON_CHANNEL = 'trivia'
# (One day - 15 seconds) Overrides any -s argument below and ignores MAX_SECONDS rule
CRON_SECONDS = 86385
CRON_ARGUMENTS = ''
@bot.on_command('trivia')
@loading_status
def handle_trivia(command: Command):
"""
`!trivia [-d <easy|medium|hard>] [-c <CATEGORY>]
[-t <multiple|tf>] [-s <N>] [-n <N>] [--cats]`
- Asks a new trivia question
"""
args = parse_arguments(command.channel_id, command.arg if command.has_arg() else '')
# End early if the help option was used
if args.help:
return
# Send the possible categories
if args.cats:
bot.post_message(command.channel_id, get_categories())
return
# Check if the channel is valid for sequential questions
current_channel = bot.channels.get(command.channel_id)
if all([args.count > 1, not current_channel.is_im,
current_channel.name not in VALID_SEQUETIAL_CHANNELS]):
# If no valid channels are specified
if len(VALID_SEQUETIAL_CHANNELS) == 0:
bot.post_message(command.channel_id,
'This command can only be used in private messages with the bot')
return
first_valid = bot.channels.get(VALID_SEQUETIAL_CHANNELS[0])
channel_message = ''
if first_valid:
channel_message = f'Try <#{first_valid.id}|{VALID_SEQUETIAL_CHANNELS[0]}>.'
bot.post_message(command.channel_id, f'You cannot use the sequential questions '
+ f'feature in this channel. {channel_message}')
return
handle_question(command.channel_id, args)
def parse_arguments(channel: Channel, arg_string: str) -> argparse.Namespace:
"""
Parses the arguments for the command
:param command: The command which the handle_trivia function receives
:return: An argpase Namespace object with the parsed arguments
"""
parser = argparse.ArgumentParser(prog='!trivia', add_help=False)
def usage_error(*args, **kwargs):
raise UsageSyntaxException()
parser.error = usage_error # type: ignore
parser.add_argument('-d', '--difficulty', choices=['easy', 'medium', 'hard'],
default='random', type=str.lower,
help='The difficulty of the question. (default: %(default)s)')
parser.add_argument('-c', '--category', default=-1, type=int,
help='Specifies a category (default: any)')
parser.add_argument('-t', '--type', choices=['boolean', 'multiple'],
default="random", type=str.lower,
help='The type of question. (default: %(default)s)')
parser.add_argument('-s', '--seconds', default=30, type=int,
help='Number of seconds before posting answer (default: %(default)s)')
parser.add_argument('-n', '--count', default=1, type=int, help=f"Do 'n' trivia questions in "
f"quick succession (max : {MAX_SEQUENTIAL_QUESTIONS})")
parser.add_argument('--cats', action='store_true',
help='Sends a list of valid categories to the user')
parser.add_argument('-h', '--help', action='store_true', help='Prints this help message')
args = parser.parse_args(arg_string.split())
# If the help option was used print the help message to
# the channel (needs access to the parser to do this)
if args.help:
bot.post_message(channel, parser.format_help())
# Constrain the number of seconds to a reasonable frame
args.seconds = max(MIN_SECONDS, args.seconds)
args.seconds = min(args.seconds, MAX_SECONDS)
# Constrain the number of sequential questions
args.count = max(args.count, 1)
args.count = min(args.count, MAX_SEQUENTIAL_QUESTIONS)
# Add an original count to keep track
args.original_count = args.count
return args
def get_categories() -> str:
"""
Gets the message to send if the user wants a list of the available categories.
"""
http_response = requests.get(CATEGORIES_URL)
if http_response.status_code != requests.codes.ok:
return "There was a problem getting the response"
categories = json.loads(http_response.content)['trivia_categories']
# Construct pretty results to print in a code block to avoid a large spammy message
pretty_results = '```Use the id to specify a specific category \n\nID Name\n'
for category in categories:
pretty_results += f'{category["id"]:<4d}{category["name"]}\n'
pretty_results += '```'
return pretty_results
def handle_question(channel: Channel, args: argparse.Namespace):
"""
Handles getting a question and posting it to the channel as well as scheduling the answer.
Returns the reaction string for the correct answer.
"""
question_data = get_question_data(channel, args)
if question_data is None:
return
question_number = args.original_count - args.count + 1
prefix = f'Q{question_number}:' if args.original_count > 1 else ''
post_question(channel, question_data, prefix)
# Get the answer message
if question_data.is_boolean:
if question_data.correct_answer == 'True':
answer_text = f':{BOOLEAN_REACTS[0]}:'
else:
answer_text = f':{BOOLEAN_REACTS[1]}:'
else:
answer_text = question_data.correct_answer
answer_message = f'The answer to the question *{question_data.question}* is: *{answer_text}*'
# Schedule the answer to be posted after the specified number of seconds has passed
post_answer = partial(bot.post_message, channel, answer_message)
schedule_action(post_answer, args.seconds)
# If more questions are to be asked schedule the question for 5 seconds after the current answer
if args.count > 1:
args.count -= 1
schedule_action(partial(handle_question, channel, args), args.seconds + 5)
def get_question_data(channel: Channel, args: argparse.Namespace) -> Optional[QuestionData]:
"""
Attempts to get a question from the api using the specified arguments.
Returns the dictionary object for the question on success
and None on failure (after posting an error message).
"""
# Base64 to help with encoding the message for slack
params: Dict[str, Union[int, str]] = {'amount': 1, 'encode': 'base64'}
# Add in any explicitly specified arguments
if args.category != -1:
params['category'] = args.category
if args.difficulty != 'random':
params['difficulty'] = args.difficulty
if args.type != 'random':
params['type'] = args.type
# Get the response and check that it is valid
http_response = requests.get(API_URL, params=params)
if http_response.status_code != requests.codes.ok:
bot.post_message(channel, "There was a problem getting the response")
return None
# Check the response codes and post a useful message in the case of an error
response_content = json.loads(http_response.content)
if response_content['response_code'] == 2:
bot.post_message(channel, "Invalid category id. "
+ "Try !trivia --cats for a list of valid categories.")
return None
elif response_content['response_code'] != 0:
bot.post_message(channel, "No results were returned")
return None
question_data = response_content['results'][0]
# Get the type of question and make the NamedTuple container for the data
is_boolean = len(question_data['incorrect_answers']) == 1
answers = [question_data['correct_answer']] + question_data['incorrect_answers']
# Delete the ones we don't need
del question_data['category']
del question_data['difficulty']
del question_data['incorrect_answers']
# Decode the ones we want. The base 64 decoding ensures
# that the formatting works properly with slack.
question_data['question'] = decode_b64(question_data['question'])
question_data['correct_answer'] = decode_b64(question_data['correct_answer'])
answers = [decode_b64(ans) for ans in answers]
question_data = QuestionData(is_boolean=is_boolean, answers=answers, **question_data)
# Shuffle the answers
random.shuffle(question_data.answers)
return question_data
def post_question(channel: Channel, question_data: QuestionData, prefix: str = '') -> float:
"""
Posts the question from the given QuestionData along with
the possible answers list if applicable.
Also creates the answer reacts.
Returns the timestamp of the posted message.
"""
# Post the question and get the timestamp for the reactions (asterisks bold it)
message_ts = bot.post_message(channel, f'*{prefix} {question_data.question}*')['ts']
# Print the questions (if multiple choice) and add the answer reactions
reactions = BOOLEAN_REACTS if question_data.is_boolean else MULTIPLE_CHOICE_REACTS
if not question_data.is_boolean:
message_ts = post_possible_answers(channel, question_data.answers)
add_reactions_interval(reactions, channel, message_ts, REACT_INTERVAL)
return message_ts
def add_reactions_interval(reactions: List[str], channel: Channel,
msg_timestamp: str, interval: float = 1):
"""
Adds the given reactions with "interval" seconds between in order
to prevent them from changing order in slack (as slack uses the
timestamp of when the reaction was added to determine the order).
:param reactions: The reactions to add
:param channel: The channel containing the desired message to react to
:param msg_timestamp: The timestamp of the required message
:param interval: The interval between posting each reaction (defaults to 1 second)
"""
# If the react interval is 0 don't do any of the scheduling stuff
if REACT_INTERVAL == 0:
for reaction in reactions:
bot.api.reactions.add(name=reaction, channel=channel, timestamp=msg_timestamp)
return
# Do the first one immediately
bot.api.reactions.add(name=reactions[0], channel=channel, timestamp=msg_timestamp)
# I am not 100% sure why this is needed. Doing it with a normal partial or
# lambda will try to post the same reacts
def add_reaction(reaction: str):
bot.api.reactions.add(name=reaction, channel=channel, timestamp=msg_timestamp)
for index, reaction in enumerate(reactions[1:]):
delay = (index + 1) * interval
schedule_action(partial(add_reaction, reaction), delay)
def decode_b64(encoded: str) -> str:
"""
Takes a base64 encoded string. Returns the decoded version to utf-8.
"""
return base64.b64decode(encoded).decode('utf-8')
def get_correct_reaction(question_data: QuestionData):
"""
Returns the reaction that matches with the correct answer
"""
if question_data.is_boolean:
if question_data.correct_answer == 'True':
correct_reaction = BOOLEAN_REACTS[0]
else:
correct_reaction = BOOLEAN_REACTS[1]
else:
correct_reaction = MULTIPLE_CHOICE_REACTS[
question_data.answers.index(question_data.correct_answer)]
return correct_reaction
def post_possible_answers(channel: Channel, answers: List[str]) -> float:
"""
Posts the possible answers for a multiple choice question in a nice way.
Returns the timestamp of the message to allow reacting to it.
"""
attachments = []
for col, answer in zip(CHOICE_COLORS, answers):
ans_att = {'text': answer, 'color': col}
attachments.append(ans_att)
return bot.post_message(channel, '', attachments=attachments)['ts']
def schedule_action(action: Callable, secs: Union[int, float]):
"""
Schedules the supplied action to be called once in the given number of seconds.
"""
run_date = datetime.now(timezone(timedelta(hours=10))) + timedelta(seconds=secs)
bot._scheduler.add_job(action, 'date', run_date=run_date)
@bot.on_schedule('cron', hour=12, timezone='Australia/Brisbane')
def daily_trivia():
"""
Adds a job that displays a random question to the specified channel at lunch time
"""
channel = bot.channels.get(CRON_CHANNEL).id
# Get arguments and update the seconds
args = parse_arguments(channel, CRON_ARGUMENTS)
args.seconds = CRON_SECONDS
# Get and post the actual question
handle_question(channel, args)
# Format a nice message to tell when the answer will be
hours = CRON_SECONDS // 3600
minutes = (CRON_SECONDS - (hours * 3600)) // 60
if minutes > 55:
hours += 1
minutes = 0
time_until_answer = 'Answer in '
if hours > 0:
time_until_answer += f'{hours} hours'
if minutes > 0:
time_until_answer += f' and {minutes} minutes' if hours > 0 else f'{minutes} minutes'
bot.post_message(channel, time_until_answer)
|
from datetime import datetime
import click
from tools import background, nasa_api
from tools.utils import parse_str_to_date
@click.group()
def nasa_background():
pass
@nasa_background.command()
@click.option("--date",
default=None,
help="Enter the date as a single string in YYYYMMDD or YYYY-MM-DD format." )
@click.option("--auto",
is_flag=True,
help="Disables prompts and sets the background automatically if this can successfully be completed." )
def update(date, auto):
'''Get the newest NASA Picture of the Day and set it as background'''
# Check if date is passed as argument, set to default (today) otherwise
if date is None:
date = datetime.now()
else:
date = parse_str_to_date(date)
try:
# Download and print information about
meta_info = nasa_api.get_info(date)
click.echo(f"Title: {meta_info["title"]}\n")
click.echo(meta_info['explanation'] + "\n")
# Check if auto is selected, otherwise prompt user to set it as background
if auto or click.confirm("Do you wish to download this image and set it as background?"):
# Download and set the background
file_path = nasa_api.download_image(date)
background.change_background(file_path, auto)
except KeyError:
click.echo(f"Image not found for the selected date {date}. ")
except Exception as e:
click.echo("Fatal error encountered, exiting program.")
click.echo(e)
if __name__ == '__main__':
nasa_background()
| from datetime import datetime
import click
from tools import background, nasa_api
from tools.utils import parse_str_to_date
@click.group()
def nasa_background():
pass
@nasa_background.command()
@click.option("--date",
default=None,
help="Enter the date as a single string in YYYYMMDD or YYYY-MM-DD format." )
@click.option("--auto",
is_flag=True,
help="Disables prompts and sets the background automatically if this can successfully be completed." )
def update(date, auto):
'''Get the newest NASA Picture of the Day and set it as background'''
# Check if date is passed as argument, set to default (today) otherwise
if date is None:
date = datetime.now()
else:
date = parse_str_to_date(date)
try:
# Download and print information about
meta_info = nasa_api.get_info(date)
click.echo(f"Title: {meta_info['title']}\n")
click.echo(meta_info['explanation'] + "\n")
# Check if auto is selected, otherwise prompt user to set it as background
if auto or click.confirm("Do you wish to download this image and set it as background?"):
# Download and set the background
file_path = nasa_api.download_image(date)
background.change_background(file_path, auto)
except KeyError:
click.echo(f"Image not found for the selected date {date}. ")
except Exception as e:
click.echo("Fatal error encountered, exiting program.")
click.echo(e)
if __name__ == '__main__':
nasa_background()
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
# from django.contrib.auth.models import User
from .models import Student
from blog.views import get_college_ranking, get_student_ranking
from django.db import connection
from django.views.generic import (
DetailView,
CreateView,
UpdateView,
ListView,
)
class UserRegistration(CreateView):
model = Student
form_class = UserRegisterForm
def dispatch(self, request, *args, **kwargs):
if self.request.user.is_authenticated:
return redirect('blog-home')
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['registration_form'] = UserRegisterForm()
get_college_ranking(context)
get_student_ranking(context)
return context
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
messages.error(request, "There are some errors with your registration, please check below: ")
return render(request, 'users/register.html', {'registration_form': form})
@method_decorator(login_required, name='dispatch')
class UserProfile(DetailView):
model = Student
context_object_name = 'user_object'
def post(self, request, *args, **kwargs):
user = self.get_object()
user_following = self.request.user.profile
if request.POST.get('follow'):
user.profile.follower.add(user_following)
user_following.following.add(user.profile)
user_following.save()
user.save()
elif request.POST.get('unfollow'):
user.profile.follower.remove(user_following)
user_following.following.remove(user.profile)
user.save()
user_following.save()
return HttpResponseRedirect(user.profile.get_absolute_url())
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(**kwargs)
following = self.get_object().profile.following.all()
followers = self.get_object().profile.follower.all()
context['following'] = following
context['followers'] = followers
get_college_ranking(context)
get_student_ranking(context)
return context
class UserUpdateProfile(UserPassesTestMixin, UpdateView):
model = Student
user_details_form = UserUpdateForm
context_object_name = 'user_object'
fields = ['first_name', 'last_name']
success_url = '/'
def post(self, request, *args, **kwargs):
# Call the parent before overriding to save the UserUpdateForm and the ProfileUpdate
super().post(self, request, *args, **kwargs)
p_form = ProfileUpdateForm(self.request.POST, self.request.FILES, instance=self.request.user.profile)
if p_form.is_valid():
p_form.save()
return redirect(f"/users/{self.kwargs.get("pk")}/{self.kwargs.get("username")}")
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
p_form = ProfileUpdateForm(instance=self.request.user.profile)
data['relevant_post'] = None
data['p_form'] = p_form
get_college_ranking(data)
get_student_ranking(data)
return data
def test_func(self):
user = self.get_object()
return False if self.request.user != user else True
class UserDetailView(DetailView):
model = Student
template_name = 'users/user_detail.html'
class UserProfileFollowing(ListView):
model = Student
template_name = 'users/users_following.html'
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
user = Student.objects.filter(id=self.kwargs['pk']).first()
following = user.profile.following.all()
context['following_list'] = following
get_college_ranking(context)
get_student_ranking(context)
return context
class UserProfileFollowers(ListView):
model = Student
template_name = 'users/user_followers.html'
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
user = Student.objects.filter(id=self.kwargs['pk']).first()
followers = user.profile.follower.all()
context['followers_list'] = followers
get_college_ranking(context)
get_student_ranking(context)
return context
| from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
# from django.contrib.auth.models import User
from .models import Student
from blog.views import get_college_ranking, get_student_ranking
from django.db import connection
from django.views.generic import (
DetailView,
CreateView,
UpdateView,
ListView,
)
class UserRegistration(CreateView):
model = Student
form_class = UserRegisterForm
def dispatch(self, request, *args, **kwargs):
if self.request.user.is_authenticated:
return redirect('blog-home')
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['registration_form'] = UserRegisterForm()
get_college_ranking(context)
get_student_ranking(context)
return context
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
messages.error(request, "There are some errors with your registration, please check below: ")
return render(request, 'users/register.html', {'registration_form': form})
@method_decorator(login_required, name='dispatch')
class UserProfile(DetailView):
model = Student
context_object_name = 'user_object'
def post(self, request, *args, **kwargs):
user = self.get_object()
user_following = self.request.user.profile
if request.POST.get('follow'):
user.profile.follower.add(user_following)
user_following.following.add(user.profile)
user_following.save()
user.save()
elif request.POST.get('unfollow'):
user.profile.follower.remove(user_following)
user_following.following.remove(user.profile)
user.save()
user_following.save()
return HttpResponseRedirect(user.profile.get_absolute_url())
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(**kwargs)
following = self.get_object().profile.following.all()
followers = self.get_object().profile.follower.all()
context['following'] = following
context['followers'] = followers
get_college_ranking(context)
get_student_ranking(context)
return context
class UserUpdateProfile(UserPassesTestMixin, UpdateView):
model = Student
user_details_form = UserUpdateForm
context_object_name = 'user_object'
fields = ['first_name', 'last_name']
success_url = '/'
def post(self, request, *args, **kwargs):
# Call the parent before overriding to save the UserUpdateForm and the ProfileUpdate
super().post(self, request, *args, **kwargs)
p_form = ProfileUpdateForm(self.request.POST, self.request.FILES, instance=self.request.user.profile)
if p_form.is_valid():
p_form.save()
return redirect(f"/users/{self.kwargs.get('pk')}/{self.kwargs.get('username')}")
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
p_form = ProfileUpdateForm(instance=self.request.user.profile)
data['relevant_post'] = None
data['p_form'] = p_form
get_college_ranking(data)
get_student_ranking(data)
return data
def test_func(self):
user = self.get_object()
return False if self.request.user != user else True
class UserDetailView(DetailView):
model = Student
template_name = 'users/user_detail.html'
class UserProfileFollowing(ListView):
model = Student
template_name = 'users/users_following.html'
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
user = Student.objects.filter(id=self.kwargs['pk']).first()
following = user.profile.following.all()
context['following_list'] = following
get_college_ranking(context)
get_student_ranking(context)
return context
class UserProfileFollowers(ListView):
model = Student
template_name = 'users/user_followers.html'
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
user = Student.objects.filter(id=self.kwargs['pk']).first()
followers = user.profile.follower.all()
context['followers_list'] = followers
get_college_ranking(context)
get_student_ranking(context)
return context
|
"""
Command-line user interface.
"""
import argparse
import sys
from map_machine import __version__
from map_machine.map_configuration import BuildingMode, DrawingMode, LabelMode
from map_machine.osm.osm_reader import STAGES_OF_DECAY
__author__ = "Sergey Vartanov"
__email__ = "me@enzet.ru"
BOXES: str = " ▏▎▍▌▋▊▉"
BOXES_LENGTH: int = len(BOXES)
COMMAND_LINES: dict[str, list[str]] = {
"render": ["render", "-b", "10.000,20.000,10.001,20.001"],
"render_with_tooltips": [
"render",
"-b",
"10.000,20.000,10.001,20.001",
"--tooltips",
],
"icons": ["icons"],
"mapcss": ["mapcss"],
"element": ["element", "--node", "amenity=bench,material=wood"],
"tile": ["tile", "--coordinates", "50.000,40.000"],
}
COMMANDS: list[str] = [
"render",
"server",
"tile",
"element",
"mapcss",
"icons",
"taginfo",
]
def parse_arguments(args: list[str]) -> argparse.Namespace:
"""Parse Map Machine command-line arguments."""
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Map Machine. OpenStreetMap renderer with custom icon set"
)
parser.add_argument(
"-v",
"--version",
action="version",
version="Map Machine " + __version__,
)
subparser = parser.add_subparsers(dest="command")
render_parser = subparser.add_parser(
"render",
description="Render SVG map. Use --boundary-box to specify geo "
"boundaries, --input to specify OSM XML or JSON input file, or "
"--coordinates and --size to specify central point and resulting image "
"size.",
help="draw SVG map",
)
add_render_arguments(render_parser)
add_map_arguments(render_parser)
tile_parser = subparser.add_parser(
"tile",
description="Generate SVG and PNG 256 × 256 px tiles for slippy maps. "
"You can use server command to run server in order to display "
"generated tiles as a map (e.g. with Leaflet).",
help="generate SVG and PNG tiles for slippy maps",
)
add_tile_arguments(tile_parser)
add_map_arguments(tile_parser)
add_server_arguments(
subparser.add_parser(
"server",
description="Run in order to display generated tiles as a map "
"(e.g. with Leaflet).",
help="run tile server",
)
)
add_element_arguments(
subparser.add_parser(
"element",
description="Draw map element separately.",
help="draw OSM element: node, way, relation",
)
)
add_mapcss_arguments(
subparser.add_parser(
"mapcss",
description="Write directory with MapCSS file and generated "
"Röntgen icons.",
help="write MapCSS file",
)
)
subparser.add_parser(
"icons",
description="Generate Röntgen icons as a grid and as separate SVG "
"icons",
help="draw Röntgen icons",
)
subparser.add_parser(
"taginfo",
description="Generate JSON file for Taginfo project.",
help="write Taginfo JSON file",
)
arguments: argparse.Namespace = parser.parse_args(args[1:])
return arguments
def add_map_arguments(parser: argparse.ArgumentParser) -> None:
"""Add map-specific arguments."""
parser.add_argument(
"--buildings",
metavar="<mode>",
default="flat",
choices=(mode.value for mode in BuildingMode),
help="building drawing mode: "
+ ", ".join(mode.value for mode in BuildingMode),
)
parser.add_argument(
"--mode",
default="normal",
metavar="<string>",
choices=(mode.value for mode in DrawingMode),
help="map drawing mode: "
+ ", ".join(mode.value for mode in DrawingMode),
)
parser.add_argument(
"--overlap",
dest="overlap",
default=12,
type=int,
help="how many pixels should be left around icons and text",
metavar="<integer>",
)
parser.add_argument(
"--labels",
dest="label_mode",
default="main",
metavar="<string>",
choices=(mode.value for mode in LabelMode),
help="label drawing mode: "
+ ", ".join(mode.value for mode in LabelMode),
)
parser.add_argument(
"--level",
default="overground",
help="display only this floor level",
)
parser.add_argument(
"--seed",
default="",
help="seed for random",
metavar="<string>",
)
parser.add_argument(
"--tooltips",
help="add tooltips with tags for icons in SVG files",
action=argparse.BooleanOptionalAction,
default=False,
)
parser.add_argument(
"--country",
help="two-letter code (ISO 3166-1 alpha-2) of country, that should be "
"used for location restrictions",
default="world",
)
parser.add_argument(
"--ignore-level-matching",
help="draw all map features ignoring the current level",
action=argparse.BooleanOptionalAction,
default=False,
)
parser.add_argument(
"--roofs",
help="draw building roofs",
action=argparse.BooleanOptionalAction,
default=True,
)
def add_tile_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for tile command."""
parser.add_argument(
"-c",
"--coordinates",
metavar="<latitude>,<longitude>",
help="coordinates of any location inside the tile",
)
parser.add_argument(
"-t",
"--tile",
metavar="<zoom level>/<x>/<y>",
help="tile specification",
)
parser.add_argument(
"--cache",
help="path for temporary OSM files",
default="cache",
metavar="<path>",
)
parser.add_argument(
"-b",
"--boundary-box",
help="construct the minimum amount of tiles that cover the requested "
"boundary box",
metavar="<lon1>,<lat1>,<lon2>,<lat2>",
)
parser.add_argument(
"-z",
"--zoom",
type=str,
metavar="<range>",
help="OSM zoom levels; can be list of numbers or ranges, e.g. `16-18`, "
"`16,17,18`, or `16,18-20`",
default="18",
)
parser.add_argument(
"-i",
"--input",
dest="input_file_name",
metavar="<path>",
help="input OSM XML file name (if not specified, the file will be "
"downloaded using OpenStreetMap API)",
)
def add_server_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for server command."""
parser.add_argument(
"--cache",
help="path for temporary OSM files",
default="cache",
metavar="<path>",
)
parser.add_argument(
"--port",
help="port number",
default=8080,
type=int,
metavar="<integer>",
)
def add_element_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for element command."""
parser.add_argument("-n", "--node")
parser.add_argument("-w", "--way")
parser.add_argument("-r", "--relation")
def add_render_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for render command."""
parser.add_argument(
"-i",
"--input",
dest="input_file_names",
metavar="<path>",
nargs="*",
help="input XML file name or names (if not specified, file will be "
"downloaded using OpenStreetMap API)",
)
parser.add_argument(
"-o",
"--output",
dest="output_file_name",
metavar="<path>",
default="out/map.svg",
help="output SVG file name",
)
parser.add_argument(
"-b",
"--boundary-box",
metavar="<lon1>,<lat1>,<lon2>,<lat2>",
help="geo boundary box; if the first value is negative, enclose the "
"value with quotes and use space before `-`",
)
parser.add_argument(
"--cache",
help="path for temporary OSM files",
default="cache",
metavar="<path>",
)
parser.add_argument(
"-z",
"--zoom",
type=float,
metavar="<float>",
help="OSM zoom level",
default=18.0,
)
parser.add_argument(
"-c",
"--coordinates",
metavar="<latitude>,<longitude>",
help="coordinates of any location inside the tile",
)
parser.add_argument(
"-s",
"--size",
metavar="<width>,<height>",
help="resulted image size",
)
def add_mapcss_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for mapcss command."""
parser.add_argument(
"--icons",
action=argparse.BooleanOptionalAction,
default=True,
help="add icons for nodes and areas",
)
parser.add_argument(
"--ways",
action=argparse.BooleanOptionalAction,
default=False,
help="add style for ways and relations",
)
parser.add_argument(
"--lifecycle",
action=argparse.BooleanOptionalAction,
default=True,
help="add icons for lifecycle tags; be careful: this will increase the "
f"number of node and area selectors by {len(STAGES_OF_DECAY) + 1} "
f"times",
)
def progress_bar(
number: int, total: int, length: int = 20, step: int = 1000, text: str = ""
) -> None:
"""
Draw progress bar using Unicode symbols.
:param number: current value
:param total: maximum value
:param length: progress bar length.
:param step: frequency of progress bar updating (assuming that numbers go
subsequently)
:param text: short description
"""
if number == -1:
sys.stdout.write(f"100 % {length * "█"}▏{text}\n")
elif number % step == 0:
ratio: float = number / total
parts: int = int(ratio * length * BOXES_LENGTH)
fill_length: int = int(parts / BOXES_LENGTH)
box: str = BOXES[int(parts - fill_length * BOXES_LENGTH)]
sys.stdout.write(
f"{str(int(int(ratio * 1000.0) / 10.0)):>3} % "
f"{fill_length * "█"}{box}"
f"{int(length - fill_length - 1) * " "}▏{text}\n\033[F"
)
| """
Command-line user interface.
"""
import argparse
import sys
from map_machine import __version__
from map_machine.map_configuration import BuildingMode, DrawingMode, LabelMode
from map_machine.osm.osm_reader import STAGES_OF_DECAY
__author__ = "Sergey Vartanov"
__email__ = "me@enzet.ru"
BOXES: str = " ▏▎▍▌▋▊▉"
BOXES_LENGTH: int = len(BOXES)
COMMAND_LINES: dict[str, list[str]] = {
"render": ["render", "-b", "10.000,20.000,10.001,20.001"],
"render_with_tooltips": [
"render",
"-b",
"10.000,20.000,10.001,20.001",
"--tooltips",
],
"icons": ["icons"],
"mapcss": ["mapcss"],
"element": ["element", "--node", "amenity=bench,material=wood"],
"tile": ["tile", "--coordinates", "50.000,40.000"],
}
COMMANDS: list[str] = [
"render",
"server",
"tile",
"element",
"mapcss",
"icons",
"taginfo",
]
def parse_arguments(args: list[str]) -> argparse.Namespace:
"""Parse Map Machine command-line arguments."""
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Map Machine. OpenStreetMap renderer with custom icon set"
)
parser.add_argument(
"-v",
"--version",
action="version",
version="Map Machine " + __version__,
)
subparser = parser.add_subparsers(dest="command")
render_parser = subparser.add_parser(
"render",
description="Render SVG map. Use --boundary-box to specify geo "
"boundaries, --input to specify OSM XML or JSON input file, or "
"--coordinates and --size to specify central point and resulting image "
"size.",
help="draw SVG map",
)
add_render_arguments(render_parser)
add_map_arguments(render_parser)
tile_parser = subparser.add_parser(
"tile",
description="Generate SVG and PNG 256 × 256 px tiles for slippy maps. "
"You can use server command to run server in order to display "
"generated tiles as a map (e.g. with Leaflet).",
help="generate SVG and PNG tiles for slippy maps",
)
add_tile_arguments(tile_parser)
add_map_arguments(tile_parser)
add_server_arguments(
subparser.add_parser(
"server",
description="Run in order to display generated tiles as a map "
"(e.g. with Leaflet).",
help="run tile server",
)
)
add_element_arguments(
subparser.add_parser(
"element",
description="Draw map element separately.",
help="draw OSM element: node, way, relation",
)
)
add_mapcss_arguments(
subparser.add_parser(
"mapcss",
description="Write directory with MapCSS file and generated "
"Röntgen icons.",
help="write MapCSS file",
)
)
subparser.add_parser(
"icons",
description="Generate Röntgen icons as a grid and as separate SVG "
"icons",
help="draw Röntgen icons",
)
subparser.add_parser(
"taginfo",
description="Generate JSON file for Taginfo project.",
help="write Taginfo JSON file",
)
arguments: argparse.Namespace = parser.parse_args(args[1:])
return arguments
def add_map_arguments(parser: argparse.ArgumentParser) -> None:
"""Add map-specific arguments."""
parser.add_argument(
"--buildings",
metavar="<mode>",
default="flat",
choices=(mode.value for mode in BuildingMode),
help="building drawing mode: "
+ ", ".join(mode.value for mode in BuildingMode),
)
parser.add_argument(
"--mode",
default="normal",
metavar="<string>",
choices=(mode.value for mode in DrawingMode),
help="map drawing mode: "
+ ", ".join(mode.value for mode in DrawingMode),
)
parser.add_argument(
"--overlap",
dest="overlap",
default=12,
type=int,
help="how many pixels should be left around icons and text",
metavar="<integer>",
)
parser.add_argument(
"--labels",
dest="label_mode",
default="main",
metavar="<string>",
choices=(mode.value for mode in LabelMode),
help="label drawing mode: "
+ ", ".join(mode.value for mode in LabelMode),
)
parser.add_argument(
"--level",
default="overground",
help="display only this floor level",
)
parser.add_argument(
"--seed",
default="",
help="seed for random",
metavar="<string>",
)
parser.add_argument(
"--tooltips",
help="add tooltips with tags for icons in SVG files",
action=argparse.BooleanOptionalAction,
default=False,
)
parser.add_argument(
"--country",
help="two-letter code (ISO 3166-1 alpha-2) of country, that should be "
"used for location restrictions",
default="world",
)
parser.add_argument(
"--ignore-level-matching",
help="draw all map features ignoring the current level",
action=argparse.BooleanOptionalAction,
default=False,
)
parser.add_argument(
"--roofs",
help="draw building roofs",
action=argparse.BooleanOptionalAction,
default=True,
)
def add_tile_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for tile command."""
parser.add_argument(
"-c",
"--coordinates",
metavar="<latitude>,<longitude>",
help="coordinates of any location inside the tile",
)
parser.add_argument(
"-t",
"--tile",
metavar="<zoom level>/<x>/<y>",
help="tile specification",
)
parser.add_argument(
"--cache",
help="path for temporary OSM files",
default="cache",
metavar="<path>",
)
parser.add_argument(
"-b",
"--boundary-box",
help="construct the minimum amount of tiles that cover the requested "
"boundary box",
metavar="<lon1>,<lat1>,<lon2>,<lat2>",
)
parser.add_argument(
"-z",
"--zoom",
type=str,
metavar="<range>",
help="OSM zoom levels; can be list of numbers or ranges, e.g. `16-18`, "
"`16,17,18`, or `16,18-20`",
default="18",
)
parser.add_argument(
"-i",
"--input",
dest="input_file_name",
metavar="<path>",
help="input OSM XML file name (if not specified, the file will be "
"downloaded using OpenStreetMap API)",
)
def add_server_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for server command."""
parser.add_argument(
"--cache",
help="path for temporary OSM files",
default="cache",
metavar="<path>",
)
parser.add_argument(
"--port",
help="port number",
default=8080,
type=int,
metavar="<integer>",
)
def add_element_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for element command."""
parser.add_argument("-n", "--node")
parser.add_argument("-w", "--way")
parser.add_argument("-r", "--relation")
def add_render_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for render command."""
parser.add_argument(
"-i",
"--input",
dest="input_file_names",
metavar="<path>",
nargs="*",
help="input XML file name or names (if not specified, file will be "
"downloaded using OpenStreetMap API)",
)
parser.add_argument(
"-o",
"--output",
dest="output_file_name",
metavar="<path>",
default="out/map.svg",
help="output SVG file name",
)
parser.add_argument(
"-b",
"--boundary-box",
metavar="<lon1>,<lat1>,<lon2>,<lat2>",
help="geo boundary box; if the first value is negative, enclose the "
"value with quotes and use space before `-`",
)
parser.add_argument(
"--cache",
help="path for temporary OSM files",
default="cache",
metavar="<path>",
)
parser.add_argument(
"-z",
"--zoom",
type=float,
metavar="<float>",
help="OSM zoom level",
default=18.0,
)
parser.add_argument(
"-c",
"--coordinates",
metavar="<latitude>,<longitude>",
help="coordinates of any location inside the tile",
)
parser.add_argument(
"-s",
"--size",
metavar="<width>,<height>",
help="resulted image size",
)
def add_mapcss_arguments(parser: argparse.ArgumentParser) -> None:
"""Add arguments for mapcss command."""
parser.add_argument(
"--icons",
action=argparse.BooleanOptionalAction,
default=True,
help="add icons for nodes and areas",
)
parser.add_argument(
"--ways",
action=argparse.BooleanOptionalAction,
default=False,
help="add style for ways and relations",
)
parser.add_argument(
"--lifecycle",
action=argparse.BooleanOptionalAction,
default=True,
help="add icons for lifecycle tags; be careful: this will increase the "
f"number of node and area selectors by {len(STAGES_OF_DECAY) + 1} "
f"times",
)
def progress_bar(
number: int, total: int, length: int = 20, step: int = 1000, text: str = ""
) -> None:
"""
Draw progress bar using Unicode symbols.
:param number: current value
:param total: maximum value
:param length: progress bar length.
:param step: frequency of progress bar updating (assuming that numbers go
subsequently)
:param text: short description
"""
if number == -1:
sys.stdout.write(f"100 % {length * '█'}▏{text}\n")
elif number % step == 0:
ratio: float = number / total
parts: int = int(ratio * length * BOXES_LENGTH)
fill_length: int = int(parts / BOXES_LENGTH)
box: str = BOXES[int(parts - fill_length * BOXES_LENGTH)]
sys.stdout.write(
f"{str(int(int(ratio * 1000.0) / 10.0)):>3} % "
f"{fill_length * '█'}{box}"
f"{int(length - fill_length - 1) * ' '}▏{text}\n\033[F"
)
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import gc
import inspect
import math
import os
import re
import shutil
import time
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
)
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
else:
FullyShardedDDP = None
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {", ".join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"])
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Gather the number of processes and this process index.
if self.args.parallel_mode == ParallelMode.TPU:
num_processes = xm.xrt_world_size()
process_index = xm.get_ordinal()
elif (
self.args.parallel_mode == ParallelMode.DISTRIBUTED
or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED
):
num_processes = dist.get_world_size()
process_index = dist.get_rank()
else:
num_processes = 1
process_index = 0
# Build the sampler.
if self.args.group_by_length:
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if num_processes <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=num_processes,
rank=process_index,
model_input_name=model_input_name,
)
else:
if num_processes <= 1:
return RandomSampler(self.train_dataset)
else:
return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
self.model = model = FullyShardedDDP(
model, mixed_precision=mixed_precision, reshard_after_forward=zero_3, cpu_offload=cpu_offload
).to(self.args.device)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
self.is_in_train = True
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {", ".join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({self.args.output_dir})")
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if train_dataset_is_sized
else self.args.max_steps * self.args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
and self.args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
else:
if self.is_world_process_zero():
self._save(output_dir)
if self.args.local_rank != -1:
dist.barrier()
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(output_dir, state_dict=self.model.state_dict())
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = 1
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
world_size = max(1, world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
| # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import gc
import inspect
import math
import os
import re
import shutil
import time
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
)
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
else:
FullyShardedDDP = None
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"])
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Gather the number of processes and this process index.
if self.args.parallel_mode == ParallelMode.TPU:
num_processes = xm.xrt_world_size()
process_index = xm.get_ordinal()
elif (
self.args.parallel_mode == ParallelMode.DISTRIBUTED
or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED
):
num_processes = dist.get_world_size()
process_index = dist.get_rank()
else:
num_processes = 1
process_index = 0
# Build the sampler.
if self.args.group_by_length:
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if num_processes <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=num_processes,
rank=process_index,
model_input_name=model_input_name,
)
else:
if num_processes <= 1:
return RandomSampler(self.train_dataset)
else:
return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
self.model = model = FullyShardedDDP(
model, mixed_precision=mixed_precision, reshard_after_forward=zero_3, cpu_offload=cpu_offload
).to(self.args.device)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
self.is_in_train = True
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({self.args.output_dir})")
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if train_dataset_is_sized
else self.args.max_steps * self.args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
and self.args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
else:
if self.is_world_process_zero():
self._save(output_dir)
if self.args.local_rank != -1:
dist.barrier()
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(output_dir, state_dict=self.model.state_dict())
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = 1
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
world_size = max(1, world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__copyright__ = """
MIT License
Copyright (c) 2021 Samapriya Roy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__license__ = "MIT License"
import requests
import json
import sys
import pkg_resources
import argparse
import time
import csv
import getpass
import os
import pytz
from itertools import groupby
from dateutil import parser
from os.path import expanduser
from bs4 import BeautifulSoup
from timezonefinder import TimezoneFinder
class Solution:
def compareVersion(self, version1, version2):
versions1 = [int(v) for v in version1.split(".")]
versions2 = [int(v) for v in version2.split(".")]
for i in range(max(len(versions1), len(versions2))):
v1 = versions1[i] if i < len(versions1) else 0
v2 = versions2[i] if i < len(versions2) else 0
if v1 > v2:
return 1
elif v1 < v2:
return -1
return 0
ob1 = Solution()
# Get package version
def pyspotter_version():
url = "https://pypi.org/project/pyspotter/"
source = requests.get(url)
html_content = source.text
soup = BeautifulSoup(html_content, "html.parser")
company = soup.find("h1")
vcheck = ob1.compareVersion(
company.string.strip().split(" ")[-1],
pkg_resources.get_distribution("pyspotter").version,
)
if vcheck == 1:
print(
"\n"
+ "========================================================================="
)
print(
"Current version of pyspotter is {} upgrade to lastest version: {}".format(
pkg_resources.get_distribution("pyspotter").version,
company.string.strip().split(" ")[-1],
)
)
print(
"========================================================================="
)
elif vcheck == -1:
print(
"\n"
+ "========================================================================="
)
print(
"Possibly running staging code {} compared to pypi release {}".format(
pkg_resources.get_distribution("pyspotter").version,
company.string.strip().split(" ")[-1],
)
)
print(
"========================================================================="
)
pyspotter_version()
# set credentials
def auth(usr):
headers = {
"authority": "api.sofarocean.com",
"sec-ch-ua": '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
"accept": "application/json, text/plain, */*",
"sec-ch-ua-mobile": "?0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
"content-type": "application/x-www-form-urlencoded",
"origin": "https://weather.sofarocean.com",
"sec-fetch-site": "same-site",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://weather.sofarocean.com/",
"accept-language": "en-US,en;q=0.9",
}
home = expanduser("~/sofarocean.json")
if usr is None:
usr = input("Enter email: ")
pwd = getpass.getpass("Enter password: ")
data = {"username": usr, "password": pwd, "skipRedirect": "true"}
response = requests.post(
"https://api.sofarocean.com/login/", headers=headers, data=data
)
if response.status_code == 200:
print("Authentication successful")
data = {"token": response.json()["token"]}
with open(home, "w") as outfile:
json.dump(data, outfile)
else:
print(f"Authentication failed with error {response.status_code}")
def auth_from_parser(args):
auth(usr=args.username)
def reset():
home = expanduser("~/sofarocean.json")
usr = input("Enter email: ")
if not os.path.exists(home):
auth(usr)
with open(home) as json_file:
data = json.load(json_file)
token = data.get("token")
else:
with open(home) as json_file:
data = json.load(json_file)
token = data.get("token")
headers = {
"token": token,
}
response = requests.post(
f"https://api.sofarocean.com/users/{usr}/tokens/", headers=headers
)
if response.status_code == 200:
print("Token reset successful")
data = {"token": response.json()["token"]}
with open(home, "w") as outfile:
json.dump(data, outfile)
else:
print("Token reset failed")
def reset_from_parser(args):
reset()
def tokenize():
home = expanduser("~/sofarocean.json")
if not os.path.exists(home):
auth(usr=None)
with open(home) as json_file:
data = json.load(json_file)
token = data.get("token")
else:
with open(home) as json_file:
data = json.load(json_file)
token = data.get("token")
return token
def devlist():
headers = {
"token": tokenize(),
}
response = requests.get("https://api.sofarocean.com/api/devices", headers=headers)
response = response.json()
print(f"Total of {response["message"]}" + "\n")
for device in response["data"]["devices"]:
print(device["spotterId"])
def devlist_from_parser(args):
devlist()
def spot_check(spot_id):
if not spot_id.startswith("SPOT-"):
spot_id = f"SPOT-{spot_id}"
dic = {}
obj = TimezoneFinder()
headers = {
"token": tokenize(),
}
response = requests.get(
f"https://api.sofarocean.com/api/latest-data?spotterId={spot_id}",
headers=headers,
)
if response.status_code == 200:
spotter = response.json()
print(f"Fetching info for Spotter {spot_id}" + "\n")
for key, value in spotter["data"].items():
if key != "frequencyData" and key != "track" and key != "waves":
dic[key] = value
# print(key,value)
latitude = spotter["data"]["waves"][-1]["latitude"]
longitude = spotter["data"]["waves"][-1]["longitude"]
time_zone = obj.timezone_at(lat=float(latitude), lng=float(longitude))
tz = pytz.timezone(time_zone)
now_utc = parser.parse(spotter["data"]["waves"][-1]["timestamp"])
now_kl = now_utc.replace(tzinfo=pytz.utc).astimezone(tz)
dic["last updated (UTC time)"] = str(now_utc)
dic["last updated (spotter local time)"] = str(now_kl)
dic["latitude"] = spotter["data"]["waves"][-1]["latitude"]
dic["longitude"] = spotter["data"]["waves"][-1]["longitude"]
print(json.dumps(dic, indent=2, sort_keys=False))
else:
print(
f"Spot check failed with error code {response.status_code}: {response.json()["message"]}"
)
def spotcheck_from_parser(args):
spot_check(spot_id=args.sid)
def spot_data(spot_id, dtype, folder): #'SPOT-0222'
waves_list = []
wind_list = []
sst_list = []
if not spot_id.startswith("SPOT-"):
spot_id = f"SPOT-{spot_id}"
obj = TimezoneFinder()
params = {
"spotterId": [spot_id],
"includeSurfaceTempData": True,
"includeWindData": True,
}
headers = {
"token": tokenize(),
}
response = requests.get(
"https://api.sofarocean.com/api/wave-data", headers=headers, params=params
)
if response.status_code == 200:
spotter = response.json()
print("\n" + f"Fetching info for Spotter {spot_id}" + "\n")
if (
not "surfaceTemp" in spotter["data"]
or len(spotter["data"]["surfaceTemp"]) == 0
and dtype == "sst"
):
sys.exit("No surfaceTemp data found")
else:
for readings in spotter["data"]["surfaceTemp"]:
readings["date"] = readings["timestamp"].split("T")[0]
readings["spotter_id"] = spot_id
sst_list.append(readings)
if (
not "waves" in spotter["data"]
or len(spotter["data"]["waves"]) == 0
and dtype == "wave"
):
sys.exit("No waves data found")
else:
for readings in spotter["data"]["waves"]:
readings["date"] = readings["timestamp"].split("T")[0]
readings["spotter_id"] = spot_id
waves_list.append(readings)
if (
not "wind" in spotter["data"]
or len(spotter["data"]["wind"]) == 0
and dtype == "wind"
):
sys.exit("No wind data found")
else:
for readings in spotter["data"]["wind"]:
readings["date"] = readings["timestamp"].split("T")[0]
readings["spotter_id"] = spot_id
wind_list.append(readings)
else:
sys.exit(
f"Failed with status_code: {response.status_code}: {response.json()["message"]}"
)
if dtype == "wave":
csv_columns = [
"significantWaveHeight",
"peakPeriod",
"meanPeriod",
"peakDirection",
"peakDirectionalSpread",
"meanDirection",
"meanDirectionalSpread",
"timestamp",
"latitude",
"longitude",
"date",
"spotter_id",
]
main_list = waves_list
elif dtype == "wind":
csv_columns = [
"speed",
"direction",
"seasurfaceId",
"latitude",
"longitude",
"timestamp",
"date",
"spotter_id",
]
main_list = wind_list
elif dtype == "sst":
csv_columns = [
"degrees",
"latitude",
"longitude",
"timestamp",
"date",
"spotter_id",
]
main_list = sst_list
# define a fuction for key
def key_func(k):
return k["date"]
# sort INFO data by 'company' key.
INFO = sorted(main_list, key=key_func)
for key, value in groupby(INFO, key_func):
print(f"Processing {spot_id}_{key}_{dtype}.csv")
dict_data = list(value)
try:
with open(
os.path.join(folder, f"{spot_id}_{key}_{dtype}.csv"), "w"
) as csvfile:
writer = csv.DictWriter(
csvfile, fieldnames=csv_columns, delimiter=",", lineterminator="\n"
)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError:
print("I/O error")
def spot_data_from_parser(args):
spot_data(spot_id=args.sid, dtype=args.dtype, folder=args.folder)
def main(args=None):
parser = argparse.ArgumentParser(description="Simple CLI for Sofarocean API")
subparsers = parser.add_subparsers()
parser_auth = subparsers.add_parser(
"auth", help="Authenticates and saves your API token"
)
optional_named = parser_auth.add_argument_group("Optional named arguments")
optional_named.add_argument("--username", help="Username", default=None)
parser_auth.set_defaults(func=auth_from_parser)
parser_reset = subparsers.add_parser("reset", help="Regenerates your API token")
parser_reset.set_defaults(func=reset_from_parser)
parser_devlist = subparsers.add_parser(
"devlist", help="Print lists of devices available under your account"
)
parser_devlist.set_defaults(func=devlist_from_parser)
parser_spotcheck = subparsers.add_parser(
"spot-check", help="Spot check a Spotter location and time"
)
required_named = parser_spotcheck.add_argument_group("Required named arguments.")
required_named.add_argument("--sid", help="Spotter ID", required=True)
parser_spotcheck.set_defaults(func=spotcheck_from_parser)
parser_spot_data = subparsers.add_parser(
"spot-data", help="Export Spotter Data based on Spotter ID & grouped by date"
)
required_named = parser_spot_data.add_argument_group("Required named arguments.")
required_named.add_argument("--sid", help="Spotter ID", required=True)
required_named.add_argument(
"--dtype", help="Data type: wind/wave/sst", required=True
)
required_named.add_argument(
"--folder", help="Folder to export CSV data", required=True
)
parser_spot_data.set_defaults(func=spot_data_from_parser)
args = parser.parse_args()
try:
func = args.func
except AttributeError:
parser.error("too few arguments")
func(args)
if __name__ == "__main__":
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
__copyright__ = """
MIT License
Copyright (c) 2021 Samapriya Roy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__license__ = "MIT License"
import requests
import json
import sys
import pkg_resources
import argparse
import time
import csv
import getpass
import os
import pytz
from itertools import groupby
from dateutil import parser
from os.path import expanduser
from bs4 import BeautifulSoup
from timezonefinder import TimezoneFinder
class Solution:
def compareVersion(self, version1, version2):
versions1 = [int(v) for v in version1.split(".")]
versions2 = [int(v) for v in version2.split(".")]
for i in range(max(len(versions1), len(versions2))):
v1 = versions1[i] if i < len(versions1) else 0
v2 = versions2[i] if i < len(versions2) else 0
if v1 > v2:
return 1
elif v1 < v2:
return -1
return 0
ob1 = Solution()
# Get package version
def pyspotter_version():
url = "https://pypi.org/project/pyspotter/"
source = requests.get(url)
html_content = source.text
soup = BeautifulSoup(html_content, "html.parser")
company = soup.find("h1")
vcheck = ob1.compareVersion(
company.string.strip().split(" ")[-1],
pkg_resources.get_distribution("pyspotter").version,
)
if vcheck == 1:
print(
"\n"
+ "========================================================================="
)
print(
"Current version of pyspotter is {} upgrade to lastest version: {}".format(
pkg_resources.get_distribution("pyspotter").version,
company.string.strip().split(" ")[-1],
)
)
print(
"========================================================================="
)
elif vcheck == -1:
print(
"\n"
+ "========================================================================="
)
print(
"Possibly running staging code {} compared to pypi release {}".format(
pkg_resources.get_distribution("pyspotter").version,
company.string.strip().split(" ")[-1],
)
)
print(
"========================================================================="
)
pyspotter_version()
# set credentials
def auth(usr):
headers = {
"authority": "api.sofarocean.com",
"sec-ch-ua": '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
"accept": "application/json, text/plain, */*",
"sec-ch-ua-mobile": "?0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
"content-type": "application/x-www-form-urlencoded",
"origin": "https://weather.sofarocean.com",
"sec-fetch-site": "same-site",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://weather.sofarocean.com/",
"accept-language": "en-US,en;q=0.9",
}
home = expanduser("~/sofarocean.json")
if usr is None:
usr = input("Enter email: ")
pwd = getpass.getpass("Enter password: ")
data = {"username": usr, "password": pwd, "skipRedirect": "true"}
response = requests.post(
"https://api.sofarocean.com/login/", headers=headers, data=data
)
if response.status_code == 200:
print("Authentication successful")
data = {"token": response.json()["token"]}
with open(home, "w") as outfile:
json.dump(data, outfile)
else:
print(f"Authentication failed with error {response.status_code}")
def auth_from_parser(args):
auth(usr=args.username)
def reset():
home = expanduser("~/sofarocean.json")
usr = input("Enter email: ")
if not os.path.exists(home):
auth(usr)
with open(home) as json_file:
data = json.load(json_file)
token = data.get("token")
else:
with open(home) as json_file:
data = json.load(json_file)
token = data.get("token")
headers = {
"token": token,
}
response = requests.post(
f"https://api.sofarocean.com/users/{usr}/tokens/", headers=headers
)
if response.status_code == 200:
print("Token reset successful")
data = {"token": response.json()["token"]}
with open(home, "w") as outfile:
json.dump(data, outfile)
else:
print("Token reset failed")
def reset_from_parser(args):
reset()
def tokenize():
home = expanduser("~/sofarocean.json")
if not os.path.exists(home):
auth(usr=None)
with open(home) as json_file:
data = json.load(json_file)
token = data.get("token")
else:
with open(home) as json_file:
data = json.load(json_file)
token = data.get("token")
return token
def devlist():
headers = {
"token": tokenize(),
}
response = requests.get("https://api.sofarocean.com/api/devices", headers=headers)
response = response.json()
print(f"Total of {response['message']}" + "\n")
for device in response["data"]["devices"]:
print(device["spotterId"])
def devlist_from_parser(args):
devlist()
def spot_check(spot_id):
if not spot_id.startswith("SPOT-"):
spot_id = f"SPOT-{spot_id}"
dic = {}
obj = TimezoneFinder()
headers = {
"token": tokenize(),
}
response = requests.get(
f"https://api.sofarocean.com/api/latest-data?spotterId={spot_id}",
headers=headers,
)
if response.status_code == 200:
spotter = response.json()
print(f"Fetching info for Spotter {spot_id}" + "\n")
for key, value in spotter["data"].items():
if key != "frequencyData" and key != "track" and key != "waves":
dic[key] = value
# print(key,value)
latitude = spotter["data"]["waves"][-1]["latitude"]
longitude = spotter["data"]["waves"][-1]["longitude"]
time_zone = obj.timezone_at(lat=float(latitude), lng=float(longitude))
tz = pytz.timezone(time_zone)
now_utc = parser.parse(spotter["data"]["waves"][-1]["timestamp"])
now_kl = now_utc.replace(tzinfo=pytz.utc).astimezone(tz)
dic["last updated (UTC time)"] = str(now_utc)
dic["last updated (spotter local time)"] = str(now_kl)
dic["latitude"] = spotter["data"]["waves"][-1]["latitude"]
dic["longitude"] = spotter["data"]["waves"][-1]["longitude"]
print(json.dumps(dic, indent=2, sort_keys=False))
else:
print(
f"Spot check failed with error code {response.status_code}: {response.json()['message']}"
)
def spotcheck_from_parser(args):
spot_check(spot_id=args.sid)
def spot_data(spot_id, dtype, folder): #'SPOT-0222'
waves_list = []
wind_list = []
sst_list = []
if not spot_id.startswith("SPOT-"):
spot_id = f"SPOT-{spot_id}"
obj = TimezoneFinder()
params = {
"spotterId": [spot_id],
"includeSurfaceTempData": True,
"includeWindData": True,
}
headers = {
"token": tokenize(),
}
response = requests.get(
"https://api.sofarocean.com/api/wave-data", headers=headers, params=params
)
if response.status_code == 200:
spotter = response.json()
print("\n" + f"Fetching info for Spotter {spot_id}" + "\n")
if (
not "surfaceTemp" in spotter["data"]
or len(spotter["data"]["surfaceTemp"]) == 0
and dtype == "sst"
):
sys.exit("No surfaceTemp data found")
else:
for readings in spotter["data"]["surfaceTemp"]:
readings["date"] = readings["timestamp"].split("T")[0]
readings["spotter_id"] = spot_id
sst_list.append(readings)
if (
not "waves" in spotter["data"]
or len(spotter["data"]["waves"]) == 0
and dtype == "wave"
):
sys.exit("No waves data found")
else:
for readings in spotter["data"]["waves"]:
readings["date"] = readings["timestamp"].split("T")[0]
readings["spotter_id"] = spot_id
waves_list.append(readings)
if (
not "wind" in spotter["data"]
or len(spotter["data"]["wind"]) == 0
and dtype == "wind"
):
sys.exit("No wind data found")
else:
for readings in spotter["data"]["wind"]:
readings["date"] = readings["timestamp"].split("T")[0]
readings["spotter_id"] = spot_id
wind_list.append(readings)
else:
sys.exit(
f"Failed with status_code: {response.status_code}: {response.json()['message']}"
)
if dtype == "wave":
csv_columns = [
"significantWaveHeight",
"peakPeriod",
"meanPeriod",
"peakDirection",
"peakDirectionalSpread",
"meanDirection",
"meanDirectionalSpread",
"timestamp",
"latitude",
"longitude",
"date",
"spotter_id",
]
main_list = waves_list
elif dtype == "wind":
csv_columns = [
"speed",
"direction",
"seasurfaceId",
"latitude",
"longitude",
"timestamp",
"date",
"spotter_id",
]
main_list = wind_list
elif dtype == "sst":
csv_columns = [
"degrees",
"latitude",
"longitude",
"timestamp",
"date",
"spotter_id",
]
main_list = sst_list
# define a fuction for key
def key_func(k):
return k["date"]
# sort INFO data by 'company' key.
INFO = sorted(main_list, key=key_func)
for key, value in groupby(INFO, key_func):
print(f"Processing {spot_id}_{key}_{dtype}.csv")
dict_data = list(value)
try:
with open(
os.path.join(folder, f"{spot_id}_{key}_{dtype}.csv"), "w"
) as csvfile:
writer = csv.DictWriter(
csvfile, fieldnames=csv_columns, delimiter=",", lineterminator="\n"
)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError:
print("I/O error")
def spot_data_from_parser(args):
spot_data(spot_id=args.sid, dtype=args.dtype, folder=args.folder)
def main(args=None):
parser = argparse.ArgumentParser(description="Simple CLI for Sofarocean API")
subparsers = parser.add_subparsers()
parser_auth = subparsers.add_parser(
"auth", help="Authenticates and saves your API token"
)
optional_named = parser_auth.add_argument_group("Optional named arguments")
optional_named.add_argument("--username", help="Username", default=None)
parser_auth.set_defaults(func=auth_from_parser)
parser_reset = subparsers.add_parser("reset", help="Regenerates your API token")
parser_reset.set_defaults(func=reset_from_parser)
parser_devlist = subparsers.add_parser(
"devlist", help="Print lists of devices available under your account"
)
parser_devlist.set_defaults(func=devlist_from_parser)
parser_spotcheck = subparsers.add_parser(
"spot-check", help="Spot check a Spotter location and time"
)
required_named = parser_spotcheck.add_argument_group("Required named arguments.")
required_named.add_argument("--sid", help="Spotter ID", required=True)
parser_spotcheck.set_defaults(func=spotcheck_from_parser)
parser_spot_data = subparsers.add_parser(
"spot-data", help="Export Spotter Data based on Spotter ID & grouped by date"
)
required_named = parser_spot_data.add_argument_group("Required named arguments.")
required_named.add_argument("--sid", help="Spotter ID", required=True)
required_named.add_argument(
"--dtype", help="Data type: wind/wave/sst", required=True
)
required_named.add_argument(
"--folder", help="Folder to export CSV data", required=True
)
parser_spot_data.set_defaults(func=spot_data_from_parser)
args = parser.parse_args()
try:
func = args.func
except AttributeError:
parser.error("too few arguments")
func(args)
if __name__ == "__main__":
main()
|
"""
Meteostat JSON API Server
The code is licensed under the MIT license.
"""
from datetime import datetime
import json
from flask import abort
from meteostat import Point, Monthly, units
from server import app, utils
"""
Meteostat configuration
"""
Point.radius = 120000
Monthly.threads = 4
Monthly.autoclean = False
"""
Endpoint configuration
"""
# Query parameters
parameters = [
('lat', float, None),
('lon', float, None),
('alt', int, None),
('start', str, None),
('end', str, None),
('model', bool, True),
('freq', str, None),
('units', str, None)
]
@app.route('/point/monthly')
def point_monthly():
"""
Return monthly point data in JSON format
"""
# Get query parameters
args = utils.get_parameters(parameters)
# Check if required parameters are set
if args['lat'] and args['lon'] and len(
args['start']) == 10 and len(args['end']) == 10:
try:
# Convert start & end date strings to datetime
start = datetime.strptime(args['start'], '%Y-%m-%d')
end = datetime.strptime(f'{args['end']} 23:59:59', '%Y-%m-%d %H:%M:%S')
# Get number of days between start and end date
date_diff = (end - start).days
# Check date range
if date_diff < 0:
# Bad request
abort(400)
# Caching
now_diff = (datetime.now() - end).days
if now_diff < 90:
cache_time = 60 * 60 * 24 * 7
else:
cache_time = 60 * 60 * 24 * 30
Monthly.max_age = cache_time
# Create a point
location = Point(args['lat'], args['lon'], args['alt'])
# Get data
data = Monthly(location, start, end, model=args['model'])
# Check if any data
if data.count() > 0:
# Normalize data
data = data.normalize()
# Aggregate
if args['freq']:
data = data.aggregate(args['freq'])
# Unit conversion
if args['units'] == 'imperial':
data = data.convert(units.imperial)
elif args['units'] == 'scientific':
data = data.convert(units.scientific)
# Fetch DataFrame
data = data.fetch()
# Convert to integer
data['tsun'] = data['tsun'].astype('Int64')
# DateTime Index to String
data.index = data.index.strftime('%Y-%m-%d')
data.index.rename('date', inplace=True)
data = data.reset_index().to_json(orient="records")
else:
# No data
data = '[]'
# Inject meta data
meta = {}
meta['generated'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
meta['stations'] = location.stations.to_list()
# Generate output string
output = f'''{{'meta':{json.dumps(meta)},"data":{data}}}'''
# Return
return utils.send_response(output, cache_time)
except BaseException:
# Bad request
abort(400)
else:
# Bad request
abort(400)
| """
Meteostat JSON API Server
The code is licensed under the MIT license.
"""
from datetime import datetime
import json
from flask import abort
from meteostat import Point, Monthly, units
from server import app, utils
"""
Meteostat configuration
"""
Point.radius = 120000
Monthly.threads = 4
Monthly.autoclean = False
"""
Endpoint configuration
"""
# Query parameters
parameters = [
('lat', float, None),
('lon', float, None),
('alt', int, None),
('start', str, None),
('end', str, None),
('model', bool, True),
('freq', str, None),
('units', str, None)
]
@app.route('/point/monthly')
def point_monthly():
"""
Return monthly point data in JSON format
"""
# Get query parameters
args = utils.get_parameters(parameters)
# Check if required parameters are set
if args['lat'] and args['lon'] and len(
args['start']) == 10 and len(args['end']) == 10:
try:
# Convert start & end date strings to datetime
start = datetime.strptime(args['start'], '%Y-%m-%d')
end = datetime.strptime(f'{args["end"]} 23:59:59', '%Y-%m-%d %H:%M:%S')
# Get number of days between start and end date
date_diff = (end - start).days
# Check date range
if date_diff < 0:
# Bad request
abort(400)
# Caching
now_diff = (datetime.now() - end).days
if now_diff < 90:
cache_time = 60 * 60 * 24 * 7
else:
cache_time = 60 * 60 * 24 * 30
Monthly.max_age = cache_time
# Create a point
location = Point(args['lat'], args['lon'], args['alt'])
# Get data
data = Monthly(location, start, end, model=args['model'])
# Check if any data
if data.count() > 0:
# Normalize data
data = data.normalize()
# Aggregate
if args['freq']:
data = data.aggregate(args['freq'])
# Unit conversion
if args['units'] == 'imperial':
data = data.convert(units.imperial)
elif args['units'] == 'scientific':
data = data.convert(units.scientific)
# Fetch DataFrame
data = data.fetch()
# Convert to integer
data['tsun'] = data['tsun'].astype('Int64')
# DateTime Index to String
data.index = data.index.strftime('%Y-%m-%d')
data.index.rename('date', inplace=True)
data = data.reset_index().to_json(orient="records")
else:
# No data
data = '[]'
# Inject meta data
meta = {}
meta['generated'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
meta['stations'] = location.stations.to_list()
# Generate output string
output = f'''{{"meta":{json.dumps(meta)},"data":{data}}}'''
# Return
return utils.send_response(output, cache_time)
except BaseException:
# Bad request
abort(400)
else:
# Bad request
abort(400)
|
MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 15.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 20.0,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 25.0,
}
}
resources = {
"water": 900,
"milk": 700,
"coffee": 300,
}
profit = 0
password = "Admin"
# TODO: 1. Print the report of the coffee machine resources
# TODO: 2 coin processing system
def coin_processing():
"""Process the coins and returns the total calculation"""
print("Please insert coins.")
total = int(input("How many tens? (N$10): ")) * 10
total += int(input("How manny fives? (N$5): ")) * 5
total += int(input("How many ones? (N$1): ")) * 1
total += int(input("How many 50 cents? (N$0.5): ")) * 0.5
return total
def coffee(drink_name, order_ingredients):
for item in order_ingredients:
resources[item] -= order_ingredients[item]
print(f"Here is your {drink_name}, Enjoy.")
def successful_transaction(money_payed, drink_cost):
if money_payed >= drink_cost:
change = round(money_payed - drink_cost, 2)
print(f"Here is N${change} change")
global profit
profit += drink_cost
return True
else:
print("Sorry that's not enough money. Money returned.")
return False
def sufficient_resources(order_ingredients):
"""Returns true if there is enough resources and false if there isn't"""
for item in order_ingredients:
if order_ingredients[item] > resources[item]:
print(f"Sorry there is not enough {item}.")
return False
return True
is_machine_on = True
while is_machine_on:
order = input("What would you like? (espresso/latte/cappuccino): ").lower()
if order == "off":
ps = input("Enter Password: ")
if ps == password:
is_machine_on = False
else:
print("Wrong Password")
is_machine_on = True
elif order == "report":
ps = input("Enter Password: ")
if ps == password:
print(f"Water: {resources["water"]}ml")
print(f"Milk: {resources["milk"]}ml")
print(f"Coffee: {resources["coffee"]}ml")
print(f"Money: N${profit}")
else:
print("Wrong Password")
else:
drink = MENU[order]
if sufficient_resources(drink['ingredients']):
payment = coin_processing()
if successful_transaction(payment, drink['cost']):
coffee(order, drink['ingredients'])
| MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 15.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 20.0,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 25.0,
}
}
resources = {
"water": 900,
"milk": 700,
"coffee": 300,
}
profit = 0
password = "Admin"
# TODO: 1. Print the report of the coffee machine resources
# TODO: 2 coin processing system
def coin_processing():
"""Process the coins and returns the total calculation"""
print("Please insert coins.")
total = int(input("How many tens? (N$10): ")) * 10
total += int(input("How manny fives? (N$5): ")) * 5
total += int(input("How many ones? (N$1): ")) * 1
total += int(input("How many 50 cents? (N$0.5): ")) * 0.5
return total
def coffee(drink_name, order_ingredients):
for item in order_ingredients:
resources[item] -= order_ingredients[item]
print(f"Here is your {drink_name}, Enjoy.")
def successful_transaction(money_payed, drink_cost):
if money_payed >= drink_cost:
change = round(money_payed - drink_cost, 2)
print(f"Here is N${change} change")
global profit
profit += drink_cost
return True
else:
print("Sorry that's not enough money. Money returned.")
return False
def sufficient_resources(order_ingredients):
"""Returns true if there is enough resources and false if there isn't"""
for item in order_ingredients:
if order_ingredients[item] > resources[item]:
print(f"Sorry there is not enough {item}.")
return False
return True
is_machine_on = True
while is_machine_on:
order = input("What would you like? (espresso/latte/cappuccino): ").lower()
if order == "off":
ps = input("Enter Password: ")
if ps == password:
is_machine_on = False
else:
print("Wrong Password")
is_machine_on = True
elif order == "report":
ps = input("Enter Password: ")
if ps == password:
print(f"Water: {resources['water']}ml")
print(f"Milk: {resources['milk']}ml")
print(f"Coffee: {resources['coffee']}ml")
print(f"Money: N${profit}")
else:
print("Wrong Password")
else:
drink = MENU[order]
if sufficient_resources(drink['ingredients']):
payment = coin_processing()
if successful_transaction(payment, drink['cost']):
coffee(order, drink['ingredients'])
|
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit Tests for CloudantV1
"""
from datetime import datetime, timezone
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime
import base64
import inspect
import io
import json
import os
import pytest
import re
import requests
import requests.models
import responses
import tempfile
import urllib
import gzip
from ibmcloudant.cloudant_v1 import *
_service = CloudantV1(
authenticator=NoAuthAuthenticator()
)
_base_url = 'http://localhost:5984'
_service.set_service_url(_base_url)
##############################################################################
# Start of Service: Server
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetServerInformation():
"""
Test Class for get_server_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_server_information_all_params(self):
"""
get_server_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/')
mock_response = '{"couchdb": "couchdb", "features": ["features"], "vendor": {"name": "name", "variant": "variant", "version": "version"}, "version": "version", "features_flags": ["features_flags"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_server_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_server_information_all_params_with_retries(self):
# Enable retries and run test_get_server_information_all_params.
_service.enable_retries()
self.test_get_server_information_all_params()
# Disable retries and run test_get_server_information_all_params.
_service.disable_retries()
self.test_get_server_information_all_params()
class TestGetMembershipInformation():
"""
Test Class for get_membership_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_membership_information_all_params(self):
"""
get_membership_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_membership')
mock_response = '{"all_nodes": ["all_nodes"], "cluster_nodes": ["cluster_nodes"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_membership_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_membership_information_all_params_with_retries(self):
# Enable retries and run test_get_membership_information_all_params.
_service.enable_retries()
self.test_get_membership_information_all_params()
# Disable retries and run test_get_membership_information_all_params.
_service.disable_retries()
self.test_get_membership_information_all_params()
class TestGetUuids():
"""
Test Class for get_uuids
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_uuids_all_params(self):
"""
get_uuids()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_uuids')
mock_response = '{"uuids": ["uuids"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
count = 1
# Invoke method
response = _service.get_uuids(
count=count,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'count={}'.format(count) in query_string
def test_get_uuids_all_params_with_retries(self):
# Enable retries and run test_get_uuids_all_params.
_service.enable_retries()
self.test_get_uuids_all_params()
# Disable retries and run test_get_uuids_all_params.
_service.disable_retries()
self.test_get_uuids_all_params()
@responses.activate
def test_get_uuids_required_params(self):
"""
test_get_uuids_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_uuids')
mock_response = '{"uuids": ["uuids"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_uuids()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_uuids_required_params_with_retries(self):
# Enable retries and run test_get_uuids_required_params.
_service.enable_retries()
self.test_get_uuids_required_params()
# Disable retries and run test_get_uuids_required_params.
_service.disable_retries()
self.test_get_uuids_required_params()
class TestGetCapacityThroughputInformation():
"""
Test Class for get_capacity_throughput_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_capacity_throughput_information_all_params(self):
"""
get_capacity_throughput_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/capacity/throughput')
mock_response = '{"current": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}, "target": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_capacity_throughput_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_capacity_throughput_information_all_params_with_retries(self):
# Enable retries and run test_get_capacity_throughput_information_all_params.
_service.enable_retries()
self.test_get_capacity_throughput_information_all_params()
# Disable retries and run test_get_capacity_throughput_information_all_params.
_service.disable_retries()
self.test_get_capacity_throughput_information_all_params()
class TestPutCapacityThroughputConfiguration():
"""
Test Class for put_capacity_throughput_configuration
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_capacity_throughput_configuration_all_params(self):
"""
put_capacity_throughput_configuration()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/capacity/throughput')
mock_response = '{"current": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}, "target": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
blocks = 0
# Invoke method
response = _service.put_capacity_throughput_configuration(
blocks,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['blocks'] == 0
def test_put_capacity_throughput_configuration_all_params_with_retries(self):
# Enable retries and run test_put_capacity_throughput_configuration_all_params.
_service.enable_retries()
self.test_put_capacity_throughput_configuration_all_params()
# Disable retries and run test_put_capacity_throughput_configuration_all_params.
_service.disable_retries()
self.test_put_capacity_throughput_configuration_all_params()
@responses.activate
def test_put_capacity_throughput_configuration_value_error(self):
"""
test_put_capacity_throughput_configuration_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/capacity/throughput')
mock_response = '{"current": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}, "target": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
blocks = 0
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"blocks": blocks,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_capacity_throughput_configuration(**req_copy)
def test_put_capacity_throughput_configuration_value_error_with_retries(self):
# Enable retries and run test_put_capacity_throughput_configuration_value_error.
_service.enable_retries()
self.test_put_capacity_throughput_configuration_value_error()
# Disable retries and run test_put_capacity_throughput_configuration_value_error.
_service.disable_retries()
self.test_put_capacity_throughput_configuration_value_error()
# endregion
##############################################################################
# End of Service: Server
##############################################################################
##############################################################################
# Start of Service: Changes
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetDbUpdates():
"""
Test Class for get_db_updates
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_db_updates_all_params(self):
"""
get_db_updates()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_db_updates')
mock_response = '{"last_seq": "last_seq", "results": [{"account": "account", "db_name": "db_name", "seq": "seq", "type": "created"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
feed = 'normal'
heartbeat = 0
timeout = 0
since = '0'
# Invoke method
response = _service.get_db_updates(
feed=feed,
heartbeat=heartbeat,
timeout=timeout,
since=since,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'feed={}'.format(feed) in query_string
assert 'heartbeat={}'.format(heartbeat) in query_string
assert 'timeout={}'.format(timeout) in query_string
assert 'since={}'.format(since) in query_string
def test_get_db_updates_all_params_with_retries(self):
# Enable retries and run test_get_db_updates_all_params.
_service.enable_retries()
self.test_get_db_updates_all_params()
# Disable retries and run test_get_db_updates_all_params.
_service.disable_retries()
self.test_get_db_updates_all_params()
@responses.activate
def test_get_db_updates_required_params(self):
"""
test_get_db_updates_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_db_updates')
mock_response = '{"last_seq": "last_seq", "results": [{"account": "account", "db_name": "db_name", "seq": "seq", "type": "created"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_db_updates()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_db_updates_required_params_with_retries(self):
# Enable retries and run test_get_db_updates_required_params.
_service.enable_retries()
self.test_get_db_updates_required_params()
# Disable retries and run test_get_db_updates_required_params.
_service.disable_retries()
self.test_get_db_updates_required_params()
class TestPostChanges():
"""
Test Class for post_changes
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_changes_all_params(self):
"""
post_changes()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"last_seq": "last_seq", "pending": 7, "results": [{"changes": [{"rev": "rev"}], "deleted": false, "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "seq": "seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['testString']
fields = ['testString']
selector = {}
last_event_id = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
feed = 'normal'
filter = 'testString'
heartbeat = 0
include_docs = False
limit = 0
seq_interval = 1
since = '0'
style = 'main_only'
timeout = 0
view = 'testString'
# Invoke method
response = _service.post_changes(
db,
doc_ids=doc_ids,
fields=fields,
selector=selector,
last_event_id=last_event_id,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
feed=feed,
filter=filter,
heartbeat=heartbeat,
include_docs=include_docs,
limit=limit,
seq_interval=seq_interval,
since=since,
style=style,
timeout=timeout,
view=view,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'descending={}'.format('true' if descending else 'false') in query_string
assert 'feed={}'.format(feed) in query_string
assert 'filter={}'.format(filter) in query_string
assert 'heartbeat={}'.format(heartbeat) in query_string
assert 'include_docs={}'.format('true' if include_docs else 'false') in query_string
assert 'limit={}'.format(limit) in query_string
assert 'seq_interval={}'.format(seq_interval) in query_string
assert 'since={}'.format(since) in query_string
assert 'style={}'.format(style) in query_string
assert 'timeout={}'.format(timeout) in query_string
assert 'view={}'.format(view) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['doc_ids'] == ['testString']
assert req_body['fields'] == ['testString']
assert req_body['selector'] == {}
def test_post_changes_all_params_with_retries(self):
# Enable retries and run test_post_changes_all_params.
_service.enable_retries()
self.test_post_changes_all_params()
# Disable retries and run test_post_changes_all_params.
_service.disable_retries()
self.test_post_changes_all_params()
@responses.activate
def test_post_changes_required_params(self):
"""
test_post_changes_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"last_seq": "last_seq", "pending": 7, "results": [{"changes": [{"rev": "rev"}], "deleted": false, "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "seq": "seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['testString']
fields = ['testString']
selector = {}
# Invoke method
response = _service.post_changes(
db,
doc_ids=doc_ids,
fields=fields,
selector=selector,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['doc_ids'] == ['testString']
assert req_body['fields'] == ['testString']
assert req_body['selector'] == {}
def test_post_changes_required_params_with_retries(self):
# Enable retries and run test_post_changes_required_params.
_service.enable_retries()
self.test_post_changes_required_params()
# Disable retries and run test_post_changes_required_params.
_service.disable_retries()
self.test_post_changes_required_params()
@responses.activate
def test_post_changes_value_error(self):
"""
test_post_changes_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"last_seq": "last_seq", "pending": 7, "results": [{"changes": [{"rev": "rev"}], "deleted": false, "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "seq": "seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['testString']
fields = ['testString']
selector = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_changes(**req_copy)
def test_post_changes_value_error_with_retries(self):
# Enable retries and run test_post_changes_value_error.
_service.enable_retries()
self.test_post_changes_value_error()
# Disable retries and run test_post_changes_value_error.
_service.disable_retries()
self.test_post_changes_value_error()
class TestPostChangesAsStream():
"""
Test Class for post_changes_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_changes_as_stream_all_params(self):
"""
post_changes_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['0007741142412418284']
fields = ['testString']
selector = {}
last_event_id = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
feed = 'normal'
filter = 'testString'
heartbeat = 0
include_docs = False
limit = 0
seq_interval = 1
since = '0'
style = 'main_only'
timeout = 0
view = 'testString'
# Invoke method
response = _service.post_changes_as_stream(
db,
doc_ids=doc_ids,
fields=fields,
selector=selector,
last_event_id=last_event_id,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
feed=feed,
filter=filter,
heartbeat=heartbeat,
include_docs=include_docs,
limit=limit,
seq_interval=seq_interval,
since=since,
style=style,
timeout=timeout,
view=view,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'descending={}'.format('true' if descending else 'false') in query_string
assert 'feed={}'.format(feed) in query_string
assert 'filter={}'.format(filter) in query_string
assert 'heartbeat={}'.format(heartbeat) in query_string
assert 'include_docs={}'.format('true' if include_docs else 'false') in query_string
assert 'limit={}'.format(limit) in query_string
assert 'seq_interval={}'.format(seq_interval) in query_string
assert 'since={}'.format(since) in query_string
assert 'style={}'.format(style) in query_string
assert 'timeout={}'.format(timeout) in query_string
assert 'view={}'.format(view) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['doc_ids'] == ['0007741142412418284']
assert req_body['fields'] == ['testString']
assert req_body['selector'] == {}
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_changes_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_changes_as_stream_all_params.
_service.enable_retries()
self.test_post_changes_as_stream_all_params()
# Disable retries and run test_post_changes_as_stream_all_params.
_service.disable_retries()
self.test_post_changes_as_stream_all_params()
@responses.activate
def test_post_changes_as_stream_required_params(self):
"""
test_post_changes_as_stream_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['0007741142412418284']
fields = ['testString']
selector = {}
# Invoke method
response = _service.post_changes_as_stream(
db,
doc_ids=doc_ids,
fields=fields,
selector=selector,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['doc_ids'] == ['0007741142412418284']
assert req_body['fields'] == ['testString']
assert req_body['selector'] == {}
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_changes_as_stream_required_params_with_retries(self):
# Enable retries and run test_post_changes_as_stream_required_params.
_service.enable_retries()
self.test_post_changes_as_stream_required_params()
# Disable retries and run test_post_changes_as_stream_required_params.
_service.disable_retries()
self.test_post_changes_as_stream_required_params()
@responses.activate
def test_post_changes_as_stream_value_error(self):
"""
test_post_changes_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['0007741142412418284']
fields = ['testString']
selector = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_changes_as_stream(**req_copy)
def test_post_changes_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_changes_as_stream_value_error.
_service.enable_retries()
self.test_post_changes_as_stream_value_error()
# Disable retries and run test_post_changes_as_stream_value_error.
_service.disable_retries()
self.test_post_changes_as_stream_value_error()
# endregion
##############################################################################
# End of Service: Changes
##############################################################################
##############################################################################
# Start of Service: Databases
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadDatabase():
"""
Test Class for head_database
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_database_all_params(self):
"""
head_database()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.head_database(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_database_all_params_with_retries(self):
# Enable retries and run test_head_database_all_params.
_service.enable_retries()
self.test_head_database_all_params()
# Disable retries and run test_head_database_all_params.
_service.disable_retries()
self.test_head_database_all_params()
@responses.activate
def test_head_database_value_error(self):
"""
test_head_database_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_database(**req_copy)
def test_head_database_value_error_with_retries(self):
# Enable retries and run test_head_database_value_error.
_service.enable_retries()
self.test_head_database_value_error()
# Disable retries and run test_head_database_value_error.
_service.disable_retries()
self.test_head_database_value_error()
class TestGetAllDbs():
"""
Test Class for get_all_dbs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_all_dbs_all_params(self):
"""
get_all_dbs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_all_dbs')
mock_response = '["operation_response"]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
descending = False
endkey = 'testString'
limit = 0
skip = 0
startkey = 'testString'
# Invoke method
response = _service.get_all_dbs(
descending=descending,
endkey=endkey,
limit=limit,
skip=skip,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'descending={}'.format('true' if descending else 'false') in query_string
assert 'endkey={}'.format(endkey) in query_string
assert 'limit={}'.format(limit) in query_string
assert 'skip={}'.format(skip) in query_string
assert 'startkey={}'.format(startkey) in query_string
def test_get_all_dbs_all_params_with_retries(self):
# Enable retries and run test_get_all_dbs_all_params.
_service.enable_retries()
self.test_get_all_dbs_all_params()
# Disable retries and run test_get_all_dbs_all_params.
_service.disable_retries()
self.test_get_all_dbs_all_params()
@responses.activate
def test_get_all_dbs_required_params(self):
"""
test_get_all_dbs_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_all_dbs')
mock_response = '["operation_response"]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_all_dbs()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_all_dbs_required_params_with_retries(self):
# Enable retries and run test_get_all_dbs_required_params.
_service.enable_retries()
self.test_get_all_dbs_required_params()
# Disable retries and run test_get_all_dbs_required_params.
_service.disable_retries()
self.test_get_all_dbs_required_params()
class TestPostDbsInfo():
"""
Test Class for post_dbs_info
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_dbs_info_all_params(self):
"""
post_dbs_info()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_dbs_info')
mock_response = '[{"error": "error", "info": {"cluster": {"n": 1, "q": 1, "r": 1, "w": 1}, "committed_update_seq": "committed_update_seq", "compact_running": false, "compacted_seq": "compacted_seq", "db_name": "db_name", "disk_format_version": 19, "doc_count": 0, "doc_del_count": 0, "engine": "engine", "props": {"partitioned": false}, "sizes": {"active": 6, "external": 8, "file": 4}, "update_seq": "update_seq", "uuid": "uuid"}, "key": "key"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
keys = ['testString']
# Invoke method
response = _service.post_dbs_info(
keys,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['keys'] == ['testString']
def test_post_dbs_info_all_params_with_retries(self):
# Enable retries and run test_post_dbs_info_all_params.
_service.enable_retries()
self.test_post_dbs_info_all_params()
# Disable retries and run test_post_dbs_info_all_params.
_service.disable_retries()
self.test_post_dbs_info_all_params()
@responses.activate
def test_post_dbs_info_value_error(self):
"""
test_post_dbs_info_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_dbs_info')
mock_response = '[{"error": "error", "info": {"cluster": {"n": 1, "q": 1, "r": 1, "w": 1}, "committed_update_seq": "committed_update_seq", "compact_running": false, "compacted_seq": "compacted_seq", "db_name": "db_name", "disk_format_version": 19, "doc_count": 0, "doc_del_count": 0, "engine": "engine", "props": {"partitioned": false}, "sizes": {"active": 6, "external": 8, "file": 4}, "update_seq": "update_seq", "uuid": "uuid"}, "key": "key"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
keys = ['testString']
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"keys": keys,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_dbs_info(**req_copy)
def test_post_dbs_info_value_error_with_retries(self):
# Enable retries and run test_post_dbs_info_value_error.
_service.enable_retries()
self.test_post_dbs_info_value_error()
# Disable retries and run test_post_dbs_info_value_error.
_service.disable_retries()
self.test_post_dbs_info_value_error()
class TestDeleteDatabase():
"""
Test Class for delete_database
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_database_all_params(self):
"""
delete_database()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.delete_database(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_database_all_params_with_retries(self):
# Enable retries and run test_delete_database_all_params.
_service.enable_retries()
self.test_delete_database_all_params()
# Disable retries and run test_delete_database_all_params.
_service.disable_retries()
self.test_delete_database_all_params()
@responses.activate
def test_delete_database_value_error(self):
"""
test_delete_database_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_database(**req_copy)
def test_delete_database_value_error_with_retries(self):
# Enable retries and run test_delete_database_value_error.
_service.enable_retries()
self.test_delete_database_value_error()
# Disable retries and run test_delete_database_value_error.
_service.disable_retries()
self.test_delete_database_value_error()
class TestGetDatabaseInformation():
"""
Test Class for get_database_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_database_information_all_params(self):
"""
get_database_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"cluster": {"n": 1, "q": 1, "r": 1, "w": 1}, "committed_update_seq": "committed_update_seq", "compact_running": false, "compacted_seq": "compacted_seq", "db_name": "db_name", "disk_format_version": 19, "doc_count": 0, "doc_del_count": 0, "engine": "engine", "props": {"partitioned": false}, "sizes": {"active": 6, "external": 8, "file": 4}, "update_seq": "update_seq", "uuid": "uuid"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.get_database_information(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_database_information_all_params_with_retries(self):
# Enable retries and run test_get_database_information_all_params.
_service.enable_retries()
self.test_get_database_information_all_params()
# Disable retries and run test_get_database_information_all_params.
_service.disable_retries()
self.test_get_database_information_all_params()
@responses.activate
def test_get_database_information_value_error(self):
"""
test_get_database_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"cluster": {"n": 1, "q": 1, "r": 1, "w": 1}, "committed_update_seq": "committed_update_seq", "compact_running": false, "compacted_seq": "compacted_seq", "db_name": "db_name", "disk_format_version": 19, "doc_count": 0, "doc_del_count": 0, "engine": "engine", "props": {"partitioned": false}, "sizes": {"active": 6, "external": 8, "file": 4}, "update_seq": "update_seq", "uuid": "uuid"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_database_information(**req_copy)
def test_get_database_information_value_error_with_retries(self):
# Enable retries and run test_get_database_information_value_error.
_service.enable_retries()
self.test_get_database_information_value_error()
# Disable retries and run test_get_database_information_value_error.
_service.disable_retries()
self.test_get_database_information_value_error()
class TestPutDatabase():
"""
Test Class for put_database
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_database_all_params(self):
"""
put_database()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
partitioned = False
q = 1
# Invoke method
response = _service.put_database(
db,
partitioned=partitioned,
q=q,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'partitioned={}'.format('true' if partitioned else 'false') in query_string
assert 'q={}'.format(q) in query_string
def test_put_database_all_params_with_retries(self):
# Enable retries and run test_put_database_all_params.
_service.enable_retries()
self.test_put_database_all_params()
# Disable retries and run test_put_database_all_params.
_service.disable_retries()
self.test_put_database_all_params()
@responses.activate
def test_put_database_required_params(self):
"""
test_put_database_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.put_database(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
def test_put_database_required_params_with_retries(self):
# Enable retries and run test_put_database_required_params.
_service.enable_retries()
self.test_put_database_required_params()
# Disable retries and run test_put_database_required_params.
_service.disable_retries()
self.test_put_database_required_params()
@responses.activate
def test_put_database_value_error(self):
"""
test_put_database_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_database(**req_copy)
def test_put_database_value_error_with_retries(self):
# Enable retries and run test_put_database_value_error.
_service.enable_retries()
self.test_put_database_value_error()
# Disable retries and run test_put_database_value_error.
_service.disable_retries()
self.test_put_database_value_error()
# endregion
##############################################################################
# End of Service: Databases
##############################################################################
##############################################################################
# Start of Service: Documents
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadDocument():
"""
Test Class for head_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_document_all_params(self):
"""
head_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
latest = False
rev = 'testString'
# Invoke method
response = _service.head_document(
db,
doc_id,
if_none_match=if_none_match,
latest=latest,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
def test_head_document_all_params_with_retries(self):
# Enable retries and run test_head_document_all_params.
_service.enable_retries()
self.test_head_document_all_params()
# Disable retries and run test_head_document_all_params.
_service.disable_retries()
self.test_head_document_all_params()
@responses.activate
def test_head_document_required_params(self):
"""
test_head_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.head_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_document_required_params_with_retries(self):
# Enable retries and run test_head_document_required_params.
_service.enable_retries()
self.test_head_document_required_params()
# Disable retries and run test_head_document_required_params.
_service.disable_retries()
self.test_head_document_required_params()
@responses.activate
def test_head_document_value_error(self):
"""
test_head_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_document(**req_copy)
def test_head_document_value_error_with_retries(self):
# Enable retries and run test_head_document_value_error.
_service.enable_retries()
self.test_head_document_value_error()
# Disable retries and run test_head_document_value_error.
_service.disable_retries()
self.test_head_document_value_error()
class TestPostDocument():
"""
Test Class for post_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_document_all_params(self):
"""
post_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
document = document_model
content_type = 'application/json'
batch = 'ok'
# Invoke method
response = _service.post_document(
db,
document,
content_type=content_type,
batch=batch,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_post_document_all_params_with_retries(self):
# Enable retries and run test_post_document_all_params.
_service.enable_retries()
self.test_post_document_all_params()
# Disable retries and run test_post_document_all_params.
_service.disable_retries()
self.test_post_document_all_params()
@responses.activate
def test_post_document_required_params(self):
"""
test_post_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
document = document_model
# Invoke method
response = _service.post_document(
db,
document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_post_document_required_params_with_retries(self):
# Enable retries and run test_post_document_required_params.
_service.enable_retries()
self.test_post_document_required_params()
# Disable retries and run test_post_document_required_params.
_service.disable_retries()
self.test_post_document_required_params()
@responses.activate
def test_post_document_value_error(self):
"""
test_post_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
document = document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"document": document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_document(**req_copy)
def test_post_document_value_error_with_retries(self):
# Enable retries and run test_post_document_value_error.
_service.enable_retries()
self.test_post_document_value_error()
# Disable retries and run test_post_document_value_error.
_service.disable_retries()
self.test_post_document_value_error()
class TestPostAllDocs():
"""
Test Class for post_all_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_all_docs_all_params(self):
"""
post_all_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 0
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = 'testString'
# Invoke method
response = _service.post_all_docs(
db,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == 'testString'
def test_post_all_docs_all_params_with_retries(self):
# Enable retries and run test_post_all_docs_all_params.
_service.enable_retries()
self.test_post_all_docs_all_params()
# Disable retries and run test_post_all_docs_all_params.
_service.disable_retries()
self.test_post_all_docs_all_params()
@responses.activate
def test_post_all_docs_value_error(self):
"""
test_post_all_docs_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 0
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_all_docs(**req_copy)
def test_post_all_docs_value_error_with_retries(self):
# Enable retries and run test_post_all_docs_value_error.
_service.enable_retries()
self.test_post_all_docs_value_error()
# Disable retries and run test_post_all_docs_value_error.
_service.disable_retries()
self.test_post_all_docs_value_error()
class TestPostAllDocsAsStream():
"""
Test Class for post_all_docs_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_all_docs_as_stream_all_params(self):
"""
post_all_docs_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Invoke method
response = _service.post_all_docs_as_stream(
db,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_all_docs_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_all_docs_as_stream_all_params.
_service.enable_retries()
self.test_post_all_docs_as_stream_all_params()
# Disable retries and run test_post_all_docs_as_stream_all_params.
_service.disable_retries()
self.test_post_all_docs_as_stream_all_params()
@responses.activate
def test_post_all_docs_as_stream_value_error(self):
"""
test_post_all_docs_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_all_docs_as_stream(**req_copy)
def test_post_all_docs_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_all_docs_as_stream_value_error.
_service.enable_retries()
self.test_post_all_docs_as_stream_value_error()
# Disable retries and run test_post_all_docs_as_stream_value_error.
_service.disable_retries()
self.test_post_all_docs_as_stream_value_error()
class TestPostAllDocsQueries():
"""
Test Class for post_all_docs_queries
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_all_docs_queries_all_params(self):
"""
post_all_docs_queries()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['testString']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Invoke method
response = _service.post_all_docs_queries(
db,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [all_docs_query_model]
def test_post_all_docs_queries_all_params_with_retries(self):
# Enable retries and run test_post_all_docs_queries_all_params.
_service.enable_retries()
self.test_post_all_docs_queries_all_params()
# Disable retries and run test_post_all_docs_queries_all_params.
_service.disable_retries()
self.test_post_all_docs_queries_all_params()
@responses.activate
def test_post_all_docs_queries_value_error(self):
"""
test_post_all_docs_queries_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['testString']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_all_docs_queries(**req_copy)
def test_post_all_docs_queries_value_error_with_retries(self):
# Enable retries and run test_post_all_docs_queries_value_error.
_service.enable_retries()
self.test_post_all_docs_queries_value_error()
# Disable retries and run test_post_all_docs_queries_value_error.
_service.disable_retries()
self.test_post_all_docs_queries_value_error()
class TestPostAllDocsQueriesAsStream():
"""
Test Class for post_all_docs_queries_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_all_docs_queries_as_stream_all_params(self):
"""
post_all_docs_queries_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs/queries')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Invoke method
response = _service.post_all_docs_queries_as_stream(
db,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [all_docs_query_model]
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_all_docs_queries_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_all_docs_queries_as_stream_all_params.
_service.enable_retries()
self.test_post_all_docs_queries_as_stream_all_params()
# Disable retries and run test_post_all_docs_queries_as_stream_all_params.
_service.disable_retries()
self.test_post_all_docs_queries_as_stream_all_params()
@responses.activate
def test_post_all_docs_queries_as_stream_value_error(self):
"""
test_post_all_docs_queries_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs/queries')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_all_docs_queries_as_stream(**req_copy)
def test_post_all_docs_queries_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_all_docs_queries_as_stream_value_error.
_service.enable_retries()
self.test_post_all_docs_queries_as_stream_value_error()
# Disable retries and run test_post_all_docs_queries_as_stream_value_error.
_service.disable_retries()
self.test_post_all_docs_queries_as_stream_value_error()
class TestPostBulkDocs():
"""
Test Class for post_bulk_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_docs_all_params(self):
"""
post_bulk_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_docs')
mock_response = '[{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a dict representation of a BulkDocs model
bulk_docs_model = {}
bulk_docs_model['docs'] = [document_model]
bulk_docs_model['new_edits'] = True
# Set up parameter values
db = 'testString'
bulk_docs = bulk_docs_model
# Invoke method
response = _service.post_bulk_docs(
db,
bulk_docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == bulk_docs
def test_post_bulk_docs_all_params_with_retries(self):
# Enable retries and run test_post_bulk_docs_all_params.
_service.enable_retries()
self.test_post_bulk_docs_all_params()
# Disable retries and run test_post_bulk_docs_all_params.
_service.disable_retries()
self.test_post_bulk_docs_all_params()
@responses.activate
def test_post_bulk_docs_value_error(self):
"""
test_post_bulk_docs_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_docs')
mock_response = '[{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a dict representation of a BulkDocs model
bulk_docs_model = {}
bulk_docs_model['docs'] = [document_model]
bulk_docs_model['new_edits'] = True
# Set up parameter values
db = 'testString'
bulk_docs = bulk_docs_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"bulk_docs": bulk_docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_docs(**req_copy)
def test_post_bulk_docs_value_error_with_retries(self):
# Enable retries and run test_post_bulk_docs_value_error.
_service.enable_retries()
self.test_post_bulk_docs_value_error()
# Disable retries and run test_post_bulk_docs_value_error.
_service.disable_retries()
self.test_post_bulk_docs_value_error()
class TestPostBulkGet():
"""
Test Class for post_bulk_get
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_get_all_params(self):
"""
post_bulk_get()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"results": [{"docs": [{"error": {"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}, "ok": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}}], "id": "id"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'testString'
bulk_get_query_document_model['rev'] = 'testString'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
attachments = False
att_encoding_info = False
latest = False
revs = False
# Invoke method
response = _service.post_bulk_get(
db,
docs,
attachments=attachments,
att_encoding_info=att_encoding_info,
latest=latest,
revs=revs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_all_params_with_retries(self):
# Enable retries and run test_post_bulk_get_all_params.
_service.enable_retries()
self.test_post_bulk_get_all_params()
# Disable retries and run test_post_bulk_get_all_params.
_service.disable_retries()
self.test_post_bulk_get_all_params()
@responses.activate
def test_post_bulk_get_required_params(self):
"""
test_post_bulk_get_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"results": [{"docs": [{"error": {"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}, "ok": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}}], "id": "id"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'testString'
bulk_get_query_document_model['rev'] = 'testString'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Invoke method
response = _service.post_bulk_get(
db,
docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_required_params_with_retries(self):
# Enable retries and run test_post_bulk_get_required_params.
_service.enable_retries()
self.test_post_bulk_get_required_params()
# Disable retries and run test_post_bulk_get_required_params.
_service.disable_retries()
self.test_post_bulk_get_required_params()
@responses.activate
def test_post_bulk_get_value_error(self):
"""
test_post_bulk_get_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"results": [{"docs": [{"error": {"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}, "ok": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}}], "id": "id"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'testString'
bulk_get_query_document_model['rev'] = 'testString'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"docs": docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_get(**req_copy)
def test_post_bulk_get_value_error_with_retries(self):
# Enable retries and run test_post_bulk_get_value_error.
_service.enable_retries()
self.test_post_bulk_get_value_error()
# Disable retries and run test_post_bulk_get_value_error.
_service.disable_retries()
self.test_post_bulk_get_value_error()
class TestPostBulkGetAsMixed():
"""
Test Class for post_bulk_get_as_mixed
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_get_as_mixed_all_params(self):
"""
post_bulk_get_as_mixed()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
attachments = False
att_encoding_info = False
latest = False
revs = False
# Invoke method
response = _service.post_bulk_get_as_mixed(
db,
docs,
attachments=attachments,
att_encoding_info=att_encoding_info,
latest=latest,
revs=revs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_as_mixed_all_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_mixed_all_params.
_service.enable_retries()
self.test_post_bulk_get_as_mixed_all_params()
# Disable retries and run test_post_bulk_get_as_mixed_all_params.
_service.disable_retries()
self.test_post_bulk_get_as_mixed_all_params()
@responses.activate
def test_post_bulk_get_as_mixed_required_params(self):
"""
test_post_bulk_get_as_mixed_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Invoke method
response = _service.post_bulk_get_as_mixed(
db,
docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_as_mixed_required_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_mixed_required_params.
_service.enable_retries()
self.test_post_bulk_get_as_mixed_required_params()
# Disable retries and run test_post_bulk_get_as_mixed_required_params.
_service.disable_retries()
self.test_post_bulk_get_as_mixed_required_params()
@responses.activate
def test_post_bulk_get_as_mixed_value_error(self):
"""
test_post_bulk_get_as_mixed_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"docs": docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_get_as_mixed(**req_copy)
def test_post_bulk_get_as_mixed_value_error_with_retries(self):
# Enable retries and run test_post_bulk_get_as_mixed_value_error.
_service.enable_retries()
self.test_post_bulk_get_as_mixed_value_error()
# Disable retries and run test_post_bulk_get_as_mixed_value_error.
_service.disable_retries()
self.test_post_bulk_get_as_mixed_value_error()
class TestPostBulkGetAsRelated():
"""
Test Class for post_bulk_get_as_related
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_get_as_related_all_params(self):
"""
post_bulk_get_as_related()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
attachments = False
att_encoding_info = False
latest = False
revs = False
# Invoke method
response = _service.post_bulk_get_as_related(
db,
docs,
attachments=attachments,
att_encoding_info=att_encoding_info,
latest=latest,
revs=revs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_as_related_all_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_related_all_params.
_service.enable_retries()
self.test_post_bulk_get_as_related_all_params()
# Disable retries and run test_post_bulk_get_as_related_all_params.
_service.disable_retries()
self.test_post_bulk_get_as_related_all_params()
@responses.activate
def test_post_bulk_get_as_related_required_params(self):
"""
test_post_bulk_get_as_related_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Invoke method
response = _service.post_bulk_get_as_related(
db,
docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_as_related_required_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_related_required_params.
_service.enable_retries()
self.test_post_bulk_get_as_related_required_params()
# Disable retries and run test_post_bulk_get_as_related_required_params.
_service.disable_retries()
self.test_post_bulk_get_as_related_required_params()
@responses.activate
def test_post_bulk_get_as_related_value_error(self):
"""
test_post_bulk_get_as_related_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"docs": docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_get_as_related(**req_copy)
def test_post_bulk_get_as_related_value_error_with_retries(self):
# Enable retries and run test_post_bulk_get_as_related_value_error.
_service.enable_retries()
self.test_post_bulk_get_as_related_value_error()
# Disable retries and run test_post_bulk_get_as_related_value_error.
_service.disable_retries()
self.test_post_bulk_get_as_related_value_error()
class TestPostBulkGetAsStream():
"""
Test Class for post_bulk_get_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_get_as_stream_all_params(self):
"""
post_bulk_get_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
attachments = False
att_encoding_info = False
latest = False
revs = False
# Invoke method
response = _service.post_bulk_get_as_stream(
db,
docs,
attachments=attachments,
att_encoding_info=att_encoding_info,
latest=latest,
revs=revs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_bulk_get_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_stream_all_params.
_service.enable_retries()
self.test_post_bulk_get_as_stream_all_params()
# Disable retries and run test_post_bulk_get_as_stream_all_params.
_service.disable_retries()
self.test_post_bulk_get_as_stream_all_params()
@responses.activate
def test_post_bulk_get_as_stream_required_params(self):
"""
test_post_bulk_get_as_stream_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Invoke method
response = _service.post_bulk_get_as_stream(
db,
docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_bulk_get_as_stream_required_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_stream_required_params.
_service.enable_retries()
self.test_post_bulk_get_as_stream_required_params()
# Disable retries and run test_post_bulk_get_as_stream_required_params.
_service.disable_retries()
self.test_post_bulk_get_as_stream_required_params()
@responses.activate
def test_post_bulk_get_as_stream_value_error(self):
"""
test_post_bulk_get_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"docs": docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_get_as_stream(**req_copy)
def test_post_bulk_get_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_bulk_get_as_stream_value_error.
_service.enable_retries()
self.test_post_bulk_get_as_stream_value_error()
# Disable retries and run test_post_bulk_get_as_stream_value_error.
_service.disable_retries()
self.test_post_bulk_get_as_stream_value_error()
class TestDeleteDocument():
"""
Test Class for delete_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_document_all_params(self):
"""
delete_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_match = 'testString'
batch = 'ok'
rev = 'testString'
# Invoke method
response = _service.delete_document(
db,
doc_id,
if_match=if_match,
batch=batch,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'rev={}'.format(rev) in query_string
def test_delete_document_all_params_with_retries(self):
# Enable retries and run test_delete_document_all_params.
_service.enable_retries()
self.test_delete_document_all_params()
# Disable retries and run test_delete_document_all_params.
_service.disable_retries()
self.test_delete_document_all_params()
@responses.activate
def test_delete_document_required_params(self):
"""
test_delete_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.delete_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_document_required_params_with_retries(self):
# Enable retries and run test_delete_document_required_params.
_service.enable_retries()
self.test_delete_document_required_params()
# Disable retries and run test_delete_document_required_params.
_service.disable_retries()
self.test_delete_document_required_params()
@responses.activate
def test_delete_document_value_error(self):
"""
test_delete_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_document(**req_copy)
def test_delete_document_value_error_with_retries(self):
# Enable retries and run test_delete_document_value_error.
_service.enable_retries()
self.test_delete_document_value_error()
# Disable retries and run test_delete_document_value_error.
_service.disable_retries()
self.test_delete_document_value_error()
class TestGetDocument():
"""
Test Class for get_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_all_params(self):
"""
get_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_document(
db,
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_document_all_params_with_retries(self):
# Enable retries and run test_get_document_all_params.
_service.enable_retries()
self.test_get_document_all_params()
# Disable retries and run test_get_document_all_params.
_service.disable_retries()
self.test_get_document_all_params()
@responses.activate
def test_get_document_required_params(self):
"""
test_get_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_document_required_params_with_retries(self):
# Enable retries and run test_get_document_required_params.
_service.enable_retries()
self.test_get_document_required_params()
# Disable retries and run test_get_document_required_params.
_service.disable_retries()
self.test_get_document_required_params()
@responses.activate
def test_get_document_value_error(self):
"""
test_get_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document(**req_copy)
def test_get_document_value_error_with_retries(self):
# Enable retries and run test_get_document_value_error.
_service.enable_retries()
self.test_get_document_value_error()
# Disable retries and run test_get_document_value_error.
_service.disable_retries()
self.test_get_document_value_error()
class TestGetDocumentAsMixed():
"""
Test Class for get_document_as_mixed
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_as_mixed_all_params(self):
"""
get_document_as_mixed()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_document_as_mixed(
db,
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_document_as_mixed_all_params_with_retries(self):
# Enable retries and run test_get_document_as_mixed_all_params.
_service.enable_retries()
self.test_get_document_as_mixed_all_params()
# Disable retries and run test_get_document_as_mixed_all_params.
_service.disable_retries()
self.test_get_document_as_mixed_all_params()
@responses.activate
def test_get_document_as_mixed_required_params(self):
"""
test_get_document_as_mixed_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document_as_mixed(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_document_as_mixed_required_params_with_retries(self):
# Enable retries and run test_get_document_as_mixed_required_params.
_service.enable_retries()
self.test_get_document_as_mixed_required_params()
# Disable retries and run test_get_document_as_mixed_required_params.
_service.disable_retries()
self.test_get_document_as_mixed_required_params()
@responses.activate
def test_get_document_as_mixed_value_error(self):
"""
test_get_document_as_mixed_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document_as_mixed(**req_copy)
def test_get_document_as_mixed_value_error_with_retries(self):
# Enable retries and run test_get_document_as_mixed_value_error.
_service.enable_retries()
self.test_get_document_as_mixed_value_error()
# Disable retries and run test_get_document_as_mixed_value_error.
_service.disable_retries()
self.test_get_document_as_mixed_value_error()
class TestGetDocumentAsRelated():
"""
Test Class for get_document_as_related
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_as_related_all_params(self):
"""
get_document_as_related()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_document_as_related(
db,
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_document_as_related_all_params_with_retries(self):
# Enable retries and run test_get_document_as_related_all_params.
_service.enable_retries()
self.test_get_document_as_related_all_params()
# Disable retries and run test_get_document_as_related_all_params.
_service.disable_retries()
self.test_get_document_as_related_all_params()
@responses.activate
def test_get_document_as_related_required_params(self):
"""
test_get_document_as_related_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document_as_related(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_document_as_related_required_params_with_retries(self):
# Enable retries and run test_get_document_as_related_required_params.
_service.enable_retries()
self.test_get_document_as_related_required_params()
# Disable retries and run test_get_document_as_related_required_params.
_service.disable_retries()
self.test_get_document_as_related_required_params()
@responses.activate
def test_get_document_as_related_value_error(self):
"""
test_get_document_as_related_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document_as_related(**req_copy)
def test_get_document_as_related_value_error_with_retries(self):
# Enable retries and run test_get_document_as_related_value_error.
_service.enable_retries()
self.test_get_document_as_related_value_error()
# Disable retries and run test_get_document_as_related_value_error.
_service.disable_retries()
self.test_get_document_as_related_value_error()
class TestGetDocumentAsStream():
"""
Test Class for get_document_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_as_stream_all_params(self):
"""
get_document_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_document_as_stream(
db,
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_get_document_as_stream_all_params_with_retries(self):
# Enable retries and run test_get_document_as_stream_all_params.
_service.enable_retries()
self.test_get_document_as_stream_all_params()
# Disable retries and run test_get_document_as_stream_all_params.
_service.disable_retries()
self.test_get_document_as_stream_all_params()
@responses.activate
def test_get_document_as_stream_required_params(self):
"""
test_get_document_as_stream_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document_as_stream(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_get_document_as_stream_required_params_with_retries(self):
# Enable retries and run test_get_document_as_stream_required_params.
_service.enable_retries()
self.test_get_document_as_stream_required_params()
# Disable retries and run test_get_document_as_stream_required_params.
_service.disable_retries()
self.test_get_document_as_stream_required_params()
@responses.activate
def test_get_document_as_stream_value_error(self):
"""
test_get_document_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document_as_stream(**req_copy)
def test_get_document_as_stream_value_error_with_retries(self):
# Enable retries and run test_get_document_as_stream_value_error.
_service.enable_retries()
self.test_get_document_as_stream_value_error()
# Disable retries and run test_get_document_as_stream_value_error.
_service.disable_retries()
self.test_get_document_as_stream_value_error()
class TestPutDocument():
"""
Test Class for put_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_document_all_params(self):
"""
put_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
content_type = 'application/json'
if_match = 'testString'
batch = 'ok'
new_edits = False
rev = 'testString'
# Invoke method
response = _service.put_document(
db,
doc_id,
document,
content_type=content_type,
if_match=if_match,
batch=batch,
new_edits=new_edits,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'new_edits={}'.format('true' if new_edits else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_document_all_params_with_retries(self):
# Enable retries and run test_put_document_all_params.
_service.enable_retries()
self.test_put_document_all_params()
# Disable retries and run test_put_document_all_params.
_service.disable_retries()
self.test_put_document_all_params()
@responses.activate
def test_put_document_required_params(self):
"""
test_put_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
# Invoke method
response = _service.put_document(
db,
doc_id,
document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_document_required_params_with_retries(self):
# Enable retries and run test_put_document_required_params.
_service.enable_retries()
self.test_put_document_required_params()
# Disable retries and run test_put_document_required_params.
_service.disable_retries()
self.test_put_document_required_params()
@responses.activate
def test_put_document_value_error(self):
"""
test_put_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"document": document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_document(**req_copy)
def test_put_document_value_error_with_retries(self):
# Enable retries and run test_put_document_value_error.
_service.enable_retries()
self.test_put_document_value_error()
# Disable retries and run test_put_document_value_error.
_service.disable_retries()
self.test_put_document_value_error()
# endregion
##############################################################################
# End of Service: Documents
##############################################################################
##############################################################################
# Start of Service: DesignDocuments
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadDesignDocument():
"""
Test Class for head_design_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_design_document_all_params(self):
"""
head_design_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
if_none_match = 'testString'
# Invoke method
response = _service.head_design_document(
db,
ddoc,
if_none_match=if_none_match,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_design_document_all_params_with_retries(self):
# Enable retries and run test_head_design_document_all_params.
_service.enable_retries()
self.test_head_design_document_all_params()
# Disable retries and run test_head_design_document_all_params.
_service.disable_retries()
self.test_head_design_document_all_params()
@responses.activate
def test_head_design_document_required_params(self):
"""
test_head_design_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Invoke method
response = _service.head_design_document(
db,
ddoc,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_design_document_required_params_with_retries(self):
# Enable retries and run test_head_design_document_required_params.
_service.enable_retries()
self.test_head_design_document_required_params()
# Disable retries and run test_head_design_document_required_params.
_service.disable_retries()
self.test_head_design_document_required_params()
@responses.activate
def test_head_design_document_value_error(self):
"""
test_head_design_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_design_document(**req_copy)
def test_head_design_document_value_error_with_retries(self):
# Enable retries and run test_head_design_document_value_error.
_service.enable_retries()
self.test_head_design_document_value_error()
# Disable retries and run test_head_design_document_value_error.
_service.disable_retries()
self.test_head_design_document_value_error()
class TestDeleteDesignDocument():
"""
Test Class for delete_design_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_design_document_all_params(self):
"""
delete_design_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
if_match = 'testString'
batch = 'ok'
rev = 'testString'
# Invoke method
response = _service.delete_design_document(
db,
ddoc,
if_match=if_match,
batch=batch,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'rev={}'.format(rev) in query_string
def test_delete_design_document_all_params_with_retries(self):
# Enable retries and run test_delete_design_document_all_params.
_service.enable_retries()
self.test_delete_design_document_all_params()
# Disable retries and run test_delete_design_document_all_params.
_service.disable_retries()
self.test_delete_design_document_all_params()
@responses.activate
def test_delete_design_document_required_params(self):
"""
test_delete_design_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Invoke method
response = _service.delete_design_document(
db,
ddoc,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_design_document_required_params_with_retries(self):
# Enable retries and run test_delete_design_document_required_params.
_service.enable_retries()
self.test_delete_design_document_required_params()
# Disable retries and run test_delete_design_document_required_params.
_service.disable_retries()
self.test_delete_design_document_required_params()
@responses.activate
def test_delete_design_document_value_error(self):
"""
test_delete_design_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_design_document(**req_copy)
def test_delete_design_document_value_error_with_retries(self):
# Enable retries and run test_delete_design_document_value_error.
_service.enable_retries()
self.test_delete_design_document_value_error()
# Disable retries and run test_delete_design_document_value_error.
_service.disable_retries()
self.test_delete_design_document_value_error()
class TestGetDesignDocument():
"""
Test Class for get_design_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_design_document_all_params(self):
"""
get_design_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "autoupdate": true, "filters": {"mapKey": "inner"}, "indexes": {"mapKey": {"analyzer": {"name": "classic", "stopwords": ["stopwords"], "fields": {"mapKey": {"name": "classic", "stopwords": ["stopwords"]}}}, "index": "index"}}, "language": "javascript", "options": {"partitioned": false}, "validate_doc_update": "validate_doc_update", "views": {"mapKey": {"map": "map", "reduce": "reduce"}}, "st_indexes": {"mapKey": {"index": "index"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_design_document(
db,
ddoc,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_design_document_all_params_with_retries(self):
# Enable retries and run test_get_design_document_all_params.
_service.enable_retries()
self.test_get_design_document_all_params()
# Disable retries and run test_get_design_document_all_params.
_service.disable_retries()
self.test_get_design_document_all_params()
@responses.activate
def test_get_design_document_required_params(self):
"""
test_get_design_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "autoupdate": true, "filters": {"mapKey": "inner"}, "indexes": {"mapKey": {"analyzer": {"name": "classic", "stopwords": ["stopwords"], "fields": {"mapKey": {"name": "classic", "stopwords": ["stopwords"]}}}, "index": "index"}}, "language": "javascript", "options": {"partitioned": false}, "validate_doc_update": "validate_doc_update", "views": {"mapKey": {"map": "map", "reduce": "reduce"}}, "st_indexes": {"mapKey": {"index": "index"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Invoke method
response = _service.get_design_document(
db,
ddoc,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_design_document_required_params_with_retries(self):
# Enable retries and run test_get_design_document_required_params.
_service.enable_retries()
self.test_get_design_document_required_params()
# Disable retries and run test_get_design_document_required_params.
_service.disable_retries()
self.test_get_design_document_required_params()
@responses.activate
def test_get_design_document_value_error(self):
"""
test_get_design_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "autoupdate": true, "filters": {"mapKey": "inner"}, "indexes": {"mapKey": {"analyzer": {"name": "classic", "stopwords": ["stopwords"], "fields": {"mapKey": {"name": "classic", "stopwords": ["stopwords"]}}}, "index": "index"}}, "language": "javascript", "options": {"partitioned": false}, "validate_doc_update": "validate_doc_update", "views": {"mapKey": {"map": "map", "reduce": "reduce"}}, "st_indexes": {"mapKey": {"index": "index"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_design_document(**req_copy)
def test_get_design_document_value_error_with_retries(self):
# Enable retries and run test_get_design_document_value_error.
_service.enable_retries()
self.test_get_design_document_value_error()
# Disable retries and run test_get_design_document_value_error.
_service.disable_retries()
self.test_get_design_document_value_error()
class TestPutDesignDocument():
"""
Test Class for put_design_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_design_document_all_params(self):
"""
put_design_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a AnalyzerConfiguration model
analyzer_configuration_model = {}
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
# Construct a dict representation of a SearchIndexDefinition model
search_index_definition_model = {}
search_index_definition_model['analyzer'] = analyzer_configuration_model
search_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocumentOptions model
design_document_options_model = {}
design_document_options_model['partitioned'] = True
# Construct a dict representation of a DesignDocumentViewsMapReduce model
design_document_views_map_reduce_model = {}
design_document_views_map_reduce_model['map'] = 'testString'
design_document_views_map_reduce_model['reduce'] = 'testString'
# Construct a dict representation of a GeoIndexDefinition model
geo_index_definition_model = {}
geo_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocument model
design_document_model = {}
design_document_model['_attachments'] = {}
design_document_model['_conflicts'] = ['testString']
design_document_model['_deleted'] = True
design_document_model['_deleted_conflicts'] = ['testString']
design_document_model['_id'] = 'testString'
design_document_model['_local_seq'] = 'testString'
design_document_model['_rev'] = 'testString'
design_document_model['_revisions'] = revisions_model
design_document_model['_revs_info'] = [document_revision_status_model]
design_document_model['autoupdate'] = True
design_document_model['filters'] = {}
design_document_model['indexes'] = {}
design_document_model['language'] = 'javascript'
design_document_model['options'] = design_document_options_model
design_document_model['validate_doc_update'] = 'testString'
design_document_model['views'] = {}
design_document_model['st_indexes'] = {}
design_document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
design_document = design_document_model
if_match = 'testString'
batch = 'ok'
new_edits = False
rev = 'testString'
# Invoke method
response = _service.put_design_document(
db,
ddoc,
design_document,
if_match=if_match,
batch=batch,
new_edits=new_edits,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'new_edits={}'.format('true' if new_edits else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == design_document
def test_put_design_document_all_params_with_retries(self):
# Enable retries and run test_put_design_document_all_params.
_service.enable_retries()
self.test_put_design_document_all_params()
# Disable retries and run test_put_design_document_all_params.
_service.disable_retries()
self.test_put_design_document_all_params()
@responses.activate
def test_put_design_document_required_params(self):
"""
test_put_design_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a AnalyzerConfiguration model
analyzer_configuration_model = {}
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
# Construct a dict representation of a SearchIndexDefinition model
search_index_definition_model = {}
search_index_definition_model['analyzer'] = analyzer_configuration_model
search_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocumentOptions model
design_document_options_model = {}
design_document_options_model['partitioned'] = True
# Construct a dict representation of a DesignDocumentViewsMapReduce model
design_document_views_map_reduce_model = {}
design_document_views_map_reduce_model['map'] = 'testString'
design_document_views_map_reduce_model['reduce'] = 'testString'
# Construct a dict representation of a GeoIndexDefinition model
geo_index_definition_model = {}
geo_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocument model
design_document_model = {}
design_document_model['_attachments'] = {}
design_document_model['_conflicts'] = ['testString']
design_document_model['_deleted'] = True
design_document_model['_deleted_conflicts'] = ['testString']
design_document_model['_id'] = 'testString'
design_document_model['_local_seq'] = 'testString'
design_document_model['_rev'] = 'testString'
design_document_model['_revisions'] = revisions_model
design_document_model['_revs_info'] = [document_revision_status_model]
design_document_model['autoupdate'] = True
design_document_model['filters'] = {}
design_document_model['indexes'] = {}
design_document_model['language'] = 'javascript'
design_document_model['options'] = design_document_options_model
design_document_model['validate_doc_update'] = 'testString'
design_document_model['views'] = {}
design_document_model['st_indexes'] = {}
design_document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
design_document = design_document_model
# Invoke method
response = _service.put_design_document(
db,
ddoc,
design_document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == design_document
def test_put_design_document_required_params_with_retries(self):
# Enable retries and run test_put_design_document_required_params.
_service.enable_retries()
self.test_put_design_document_required_params()
# Disable retries and run test_put_design_document_required_params.
_service.disable_retries()
self.test_put_design_document_required_params()
@responses.activate
def test_put_design_document_value_error(self):
"""
test_put_design_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a AnalyzerConfiguration model
analyzer_configuration_model = {}
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
# Construct a dict representation of a SearchIndexDefinition model
search_index_definition_model = {}
search_index_definition_model['analyzer'] = analyzer_configuration_model
search_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocumentOptions model
design_document_options_model = {}
design_document_options_model['partitioned'] = True
# Construct a dict representation of a DesignDocumentViewsMapReduce model
design_document_views_map_reduce_model = {}
design_document_views_map_reduce_model['map'] = 'testString'
design_document_views_map_reduce_model['reduce'] = 'testString'
# Construct a dict representation of a GeoIndexDefinition model
geo_index_definition_model = {}
geo_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocument model
design_document_model = {}
design_document_model['_attachments'] = {}
design_document_model['_conflicts'] = ['testString']
design_document_model['_deleted'] = True
design_document_model['_deleted_conflicts'] = ['testString']
design_document_model['_id'] = 'testString'
design_document_model['_local_seq'] = 'testString'
design_document_model['_rev'] = 'testString'
design_document_model['_revisions'] = revisions_model
design_document_model['_revs_info'] = [document_revision_status_model]
design_document_model['autoupdate'] = True
design_document_model['filters'] = {}
design_document_model['indexes'] = {}
design_document_model['language'] = 'javascript'
design_document_model['options'] = design_document_options_model
design_document_model['validate_doc_update'] = 'testString'
design_document_model['views'] = {}
design_document_model['st_indexes'] = {}
design_document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
design_document = design_document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"design_document": design_document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_design_document(**req_copy)
def test_put_design_document_value_error_with_retries(self):
# Enable retries and run test_put_design_document_value_error.
_service.enable_retries()
self.test_put_design_document_value_error()
# Disable retries and run test_put_design_document_value_error.
_service.disable_retries()
self.test_put_design_document_value_error()
class TestGetDesignDocumentInformation():
"""
Test Class for get_design_document_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_design_document_information_all_params(self):
"""
get_design_document_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_info')
mock_response = '{"name": "name", "view_index": {"compact_running": false, "language": "language", "signature": "signature", "sizes": {"active": 6, "external": 8, "file": 4}, "updater_running": false, "waiting_clients": 0, "waiting_commit": true}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Invoke method
response = _service.get_design_document_information(
db,
ddoc,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_design_document_information_all_params_with_retries(self):
# Enable retries and run test_get_design_document_information_all_params.
_service.enable_retries()
self.test_get_design_document_information_all_params()
# Disable retries and run test_get_design_document_information_all_params.
_service.disable_retries()
self.test_get_design_document_information_all_params()
@responses.activate
def test_get_design_document_information_value_error(self):
"""
test_get_design_document_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_info')
mock_response = '{"name": "name", "view_index": {"compact_running": false, "language": "language", "signature": "signature", "sizes": {"active": 6, "external": 8, "file": 4}, "updater_running": false, "waiting_clients": 0, "waiting_commit": true}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_design_document_information(**req_copy)
def test_get_design_document_information_value_error_with_retries(self):
# Enable retries and run test_get_design_document_information_value_error.
_service.enable_retries()
self.test_get_design_document_information_value_error()
# Disable retries and run test_get_design_document_information_value_error.
_service.disable_retries()
self.test_get_design_document_information_value_error()
class TestPostDesignDocs():
"""
Test Class for post_design_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_design_docs_all_params(self):
"""
post_design_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
accept = 'application/json'
# Invoke method
response = _service.post_design_docs(
db,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
accept=accept,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
def test_post_design_docs_all_params_with_retries(self):
# Enable retries and run test_post_design_docs_all_params.
_service.enable_retries()
self.test_post_design_docs_all_params()
# Disable retries and run test_post_design_docs_all_params.
_service.disable_retries()
self.test_post_design_docs_all_params()
@responses.activate
def test_post_design_docs_required_params(self):
"""
test_post_design_docs_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Invoke method
response = _service.post_design_docs(
db,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
def test_post_design_docs_required_params_with_retries(self):
# Enable retries and run test_post_design_docs_required_params.
_service.enable_retries()
self.test_post_design_docs_required_params()
# Disable retries and run test_post_design_docs_required_params.
_service.disable_retries()
self.test_post_design_docs_required_params()
@responses.activate
def test_post_design_docs_value_error(self):
"""
test_post_design_docs_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_design_docs(**req_copy)
def test_post_design_docs_value_error_with_retries(self):
# Enable retries and run test_post_design_docs_value_error.
_service.enable_retries()
self.test_post_design_docs_value_error()
# Disable retries and run test_post_design_docs_value_error.
_service.disable_retries()
self.test_post_design_docs_value_error()
class TestPostDesignDocsQueries():
"""
Test Class for post_design_docs_queries
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_design_docs_queries_all_params(self):
"""
post_design_docs_queries()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
accept = 'application/json'
# Invoke method
response = _service.post_design_docs_queries(
db,
queries,
accept=accept,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [all_docs_query_model]
def test_post_design_docs_queries_all_params_with_retries(self):
# Enable retries and run test_post_design_docs_queries_all_params.
_service.enable_retries()
self.test_post_design_docs_queries_all_params()
# Disable retries and run test_post_design_docs_queries_all_params.
_service.disable_retries()
self.test_post_design_docs_queries_all_params()
@responses.activate
def test_post_design_docs_queries_required_params(self):
"""
test_post_design_docs_queries_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Invoke method
response = _service.post_design_docs_queries(
db,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [all_docs_query_model]
def test_post_design_docs_queries_required_params_with_retries(self):
# Enable retries and run test_post_design_docs_queries_required_params.
_service.enable_retries()
self.test_post_design_docs_queries_required_params()
# Disable retries and run test_post_design_docs_queries_required_params.
_service.disable_retries()
self.test_post_design_docs_queries_required_params()
@responses.activate
def test_post_design_docs_queries_value_error(self):
"""
test_post_design_docs_queries_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_design_docs_queries(**req_copy)
def test_post_design_docs_queries_value_error_with_retries(self):
# Enable retries and run test_post_design_docs_queries_value_error.
_service.enable_retries()
self.test_post_design_docs_queries_value_error()
# Disable retries and run test_post_design_docs_queries_value_error.
_service.disable_retries()
self.test_post_design_docs_queries_value_error()
# endregion
##############################################################################
# End of Service: DesignDocuments
##############################################################################
##############################################################################
# Start of Service: Views
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestPostView():
"""
Test Class for post_view
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_view_all_params(self):
"""
post_view()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString')
mock_response = '{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 0
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['testString']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Invoke method
response = _service.post_view(
db,
ddoc,
view,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
endkey_docid=endkey_docid,
group=group,
group_level=group_level,
key=key,
keys=keys,
reduce=reduce,
stable=stable,
startkey=startkey,
startkey_docid=startkey_docid,
update=update,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['endkey_docid'] == 'testString'
assert req_body['group'] == False
assert req_body['group_level'] == 1
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['reduce'] == True
assert req_body['stable'] == False
assert req_body['startkey'] == 'testString'
assert req_body['startkey_docid'] == 'testString'
assert req_body['update'] == 'true'
def test_post_view_all_params_with_retries(self):
# Enable retries and run test_post_view_all_params.
_service.enable_retries()
self.test_post_view_all_params()
# Disable retries and run test_post_view_all_params.
_service.disable_retries()
self.test_post_view_all_params()
@responses.activate
def test_post_view_value_error(self):
"""
test_post_view_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString')
mock_response = '{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 0
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['testString']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"view": view,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_view(**req_copy)
def test_post_view_value_error_with_retries(self):
# Enable retries and run test_post_view_value_error.
_service.enable_retries()
self.test_post_view_value_error()
# Disable retries and run test_post_view_value_error.
_service.disable_retries()
self.test_post_view_value_error()
class TestPostViewAsStream():
"""
Test Class for post_view_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_view_as_stream_all_params(self):
"""
post_view_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Invoke method
response = _service.post_view_as_stream(
db,
ddoc,
view,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
endkey_docid=endkey_docid,
group=group,
group_level=group_level,
key=key,
keys=keys,
reduce=reduce,
stable=stable,
startkey=startkey,
startkey_docid=startkey_docid,
update=update,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == True
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['endkey_docid'] == 'testString'
assert req_body['group'] == False
assert req_body['group_level'] == 1
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['examplekey']
assert req_body['reduce'] == True
assert req_body['stable'] == False
assert req_body['startkey'] == 'testString'
assert req_body['startkey_docid'] == 'testString'
assert req_body['update'] == 'true'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_view_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_view_as_stream_all_params.
_service.enable_retries()
self.test_post_view_as_stream_all_params()
# Disable retries and run test_post_view_as_stream_all_params.
_service.disable_retries()
self.test_post_view_as_stream_all_params()
@responses.activate
def test_post_view_as_stream_value_error(self):
"""
test_post_view_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"view": view,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_view_as_stream(**req_copy)
def test_post_view_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_view_as_stream_value_error.
_service.enable_retries()
self.test_post_view_as_stream_value_error()
# Disable retries and run test_post_view_as_stream_value_error.
_service.disable_retries()
self.test_post_view_as_stream_value_error()
class TestPostViewQueries():
"""
Test Class for post_view_queries
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_view_queries_all_params(self):
"""
post_view_queries()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString/queries')
mock_response = '{"results": [{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a ViewQuery model
view_query_model = {}
view_query_model['att_encoding_info'] = False
view_query_model['attachments'] = False
view_query_model['conflicts'] = False
view_query_model['descending'] = False
view_query_model['include_docs'] = False
view_query_model['inclusive_end'] = True
view_query_model['limit'] = 0
view_query_model['skip'] = 0
view_query_model['update_seq'] = False
view_query_model['endkey'] = 'testString'
view_query_model['endkey_docid'] = 'testString'
view_query_model['group'] = False
view_query_model['group_level'] = 1
view_query_model['key'] = 'testString'
view_query_model['keys'] = ['testString']
view_query_model['reduce'] = True
view_query_model['stable'] = False
view_query_model['startkey'] = 'testString'
view_query_model['startkey_docid'] = 'testString'
view_query_model['update'] = 'true'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
queries = [view_query_model]
# Invoke method
response = _service.post_view_queries(
db,
ddoc,
view,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [view_query_model]
def test_post_view_queries_all_params_with_retries(self):
# Enable retries and run test_post_view_queries_all_params.
_service.enable_retries()
self.test_post_view_queries_all_params()
# Disable retries and run test_post_view_queries_all_params.
_service.disable_retries()
self.test_post_view_queries_all_params()
@responses.activate
def test_post_view_queries_value_error(self):
"""
test_post_view_queries_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString/queries')
mock_response = '{"results": [{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a ViewQuery model
view_query_model = {}
view_query_model['att_encoding_info'] = False
view_query_model['attachments'] = False
view_query_model['conflicts'] = False
view_query_model['descending'] = False
view_query_model['include_docs'] = False
view_query_model['inclusive_end'] = True
view_query_model['limit'] = 0
view_query_model['skip'] = 0
view_query_model['update_seq'] = False
view_query_model['endkey'] = 'testString'
view_query_model['endkey_docid'] = 'testString'
view_query_model['group'] = False
view_query_model['group_level'] = 1
view_query_model['key'] = 'testString'
view_query_model['keys'] = ['testString']
view_query_model['reduce'] = True
view_query_model['stable'] = False
view_query_model['startkey'] = 'testString'
view_query_model['startkey_docid'] = 'testString'
view_query_model['update'] = 'true'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
queries = [view_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"view": view,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_view_queries(**req_copy)
def test_post_view_queries_value_error_with_retries(self):
# Enable retries and run test_post_view_queries_value_error.
_service.enable_retries()
self.test_post_view_queries_value_error()
# Disable retries and run test_post_view_queries_value_error.
_service.disable_retries()
self.test_post_view_queries_value_error()
class TestPostViewQueriesAsStream():
"""
Test Class for post_view_queries_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_view_queries_as_stream_all_params(self):
"""
post_view_queries_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString/queries')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a ViewQuery model
view_query_model = {}
view_query_model['att_encoding_info'] = False
view_query_model['attachments'] = False
view_query_model['conflicts'] = False
view_query_model['descending'] = False
view_query_model['include_docs'] = True
view_query_model['inclusive_end'] = True
view_query_model['limit'] = 5
view_query_model['skip'] = 0
view_query_model['update_seq'] = False
view_query_model['endkey'] = 'testString'
view_query_model['endkey_docid'] = 'testString'
view_query_model['group'] = False
view_query_model['group_level'] = 1
view_query_model['key'] = 'testString'
view_query_model['keys'] = ['testString']
view_query_model['reduce'] = True
view_query_model['stable'] = False
view_query_model['startkey'] = 'testString'
view_query_model['startkey_docid'] = 'testString'
view_query_model['update'] = 'true'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
queries = [view_query_model]
# Invoke method
response = _service.post_view_queries_as_stream(
db,
ddoc,
view,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [view_query_model]
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_view_queries_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_view_queries_as_stream_all_params.
_service.enable_retries()
self.test_post_view_queries_as_stream_all_params()
# Disable retries and run test_post_view_queries_as_stream_all_params.
_service.disable_retries()
self.test_post_view_queries_as_stream_all_params()
@responses.activate
def test_post_view_queries_as_stream_value_error(self):
"""
test_post_view_queries_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString/queries')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a ViewQuery model
view_query_model = {}
view_query_model['att_encoding_info'] = False
view_query_model['attachments'] = False
view_query_model['conflicts'] = False
view_query_model['descending'] = False
view_query_model['include_docs'] = True
view_query_model['inclusive_end'] = True
view_query_model['limit'] = 5
view_query_model['skip'] = 0
view_query_model['update_seq'] = False
view_query_model['endkey'] = 'testString'
view_query_model['endkey_docid'] = 'testString'
view_query_model['group'] = False
view_query_model['group_level'] = 1
view_query_model['key'] = 'testString'
view_query_model['keys'] = ['testString']
view_query_model['reduce'] = True
view_query_model['stable'] = False
view_query_model['startkey'] = 'testString'
view_query_model['startkey_docid'] = 'testString'
view_query_model['update'] = 'true'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
queries = [view_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"view": view,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_view_queries_as_stream(**req_copy)
def test_post_view_queries_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_view_queries_as_stream_value_error.
_service.enable_retries()
self.test_post_view_queries_as_stream_value_error()
# Disable retries and run test_post_view_queries_as_stream_value_error.
_service.disable_retries()
self.test_post_view_queries_as_stream_value_error()
# endregion
##############################################################################
# End of Service: Views
##############################################################################
##############################################################################
# Start of Service: PartitionedDatabases
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetPartitionInformation():
"""
Test Class for get_partition_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_partition_information_all_params(self):
"""
get_partition_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString')
mock_response = '{"db_name": "db_name", "doc_count": 0, "doc_del_count": 0, "partition": "partition", "partitioned_indexes": {"count": 0, "indexes": {"search": 0, "view": 0}, "limit": 0}, "sizes": {"active": 0, "external": 0}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
# Invoke method
response = _service.get_partition_information(
db,
partition_key,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_partition_information_all_params_with_retries(self):
# Enable retries and run test_get_partition_information_all_params.
_service.enable_retries()
self.test_get_partition_information_all_params()
# Disable retries and run test_get_partition_information_all_params.
_service.disable_retries()
self.test_get_partition_information_all_params()
@responses.activate
def test_get_partition_information_value_error(self):
"""
test_get_partition_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString')
mock_response = '{"db_name": "db_name", "doc_count": 0, "doc_del_count": 0, "partition": "partition", "partitioned_indexes": {"count": 0, "indexes": {"search": 0, "view": 0}, "limit": 0}, "sizes": {"active": 0, "external": 0}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_partition_information(**req_copy)
def test_get_partition_information_value_error_with_retries(self):
# Enable retries and run test_get_partition_information_value_error.
_service.enable_retries()
self.test_get_partition_information_value_error()
# Disable retries and run test_get_partition_information_value_error.
_service.disable_retries()
self.test_get_partition_information_value_error()
class TestPostPartitionAllDocs():
"""
Test Class for post_partition_all_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_all_docs_all_params(self):
"""
post_partition_all_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_all_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Invoke method
response = _service.post_partition_all_docs(
db,
partition_key,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
def test_post_partition_all_docs_all_params_with_retries(self):
# Enable retries and run test_post_partition_all_docs_all_params.
_service.enable_retries()
self.test_post_partition_all_docs_all_params()
# Disable retries and run test_post_partition_all_docs_all_params.
_service.disable_retries()
self.test_post_partition_all_docs_all_params()
@responses.activate
def test_post_partition_all_docs_value_error(self):
"""
test_post_partition_all_docs_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_all_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_all_docs(**req_copy)
def test_post_partition_all_docs_value_error_with_retries(self):
# Enable retries and run test_post_partition_all_docs_value_error.
_service.enable_retries()
self.test_post_partition_all_docs_value_error()
# Disable retries and run test_post_partition_all_docs_value_error.
_service.disable_retries()
self.test_post_partition_all_docs_value_error()
class TestPostPartitionAllDocsAsStream():
"""
Test Class for post_partition_all_docs_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_all_docs_as_stream_all_params(self):
"""
post_partition_all_docs_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_all_docs')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Invoke method
response = _service.post_partition_all_docs_as_stream(
db,
partition_key,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_partition_all_docs_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_partition_all_docs_as_stream_all_params.
_service.enable_retries()
self.test_post_partition_all_docs_as_stream_all_params()
# Disable retries and run test_post_partition_all_docs_as_stream_all_params.
_service.disable_retries()
self.test_post_partition_all_docs_as_stream_all_params()
@responses.activate
def test_post_partition_all_docs_as_stream_value_error(self):
"""
test_post_partition_all_docs_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_all_docs')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_all_docs_as_stream(**req_copy)
def test_post_partition_all_docs_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_partition_all_docs_as_stream_value_error.
_service.enable_retries()
self.test_post_partition_all_docs_as_stream_value_error()
# Disable retries and run test_post_partition_all_docs_as_stream_value_error.
_service.disable_retries()
self.test_post_partition_all_docs_as_stream_value_error()
class TestPostPartitionSearch():
"""
Test Class for post_partition_search
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_search_all_params(self):
"""
post_partition_search()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_search/testString')
mock_response = '{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}], "groups": [{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 0
sort = ['testString']
stale = 'ok'
# Invoke method
response = _service.post_partition_search(
db,
partition_key,
ddoc,
index,
query,
bookmark=bookmark,
highlight_fields=highlight_fields,
highlight_number=highlight_number,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
highlight_size=highlight_size,
include_docs=include_docs,
include_fields=include_fields,
limit=limit,
sort=sort,
stale=stale,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['query'] == 'testString'
assert req_body['bookmark'] == 'testString'
assert req_body['highlight_fields'] == ['testString']
assert req_body['highlight_number'] == 1
assert req_body['highlight_post_tag'] == '</em>'
assert req_body['highlight_pre_tag'] == '<em>'
assert req_body['highlight_size'] == 1
assert req_body['include_docs'] == False
assert req_body['include_fields'] == ['testString']
assert req_body['limit'] == 0
assert req_body['sort'] == ['testString']
assert req_body['stale'] == 'ok'
def test_post_partition_search_all_params_with_retries(self):
# Enable retries and run test_post_partition_search_all_params.
_service.enable_retries()
self.test_post_partition_search_all_params()
# Disable retries and run test_post_partition_search_all_params.
_service.disable_retries()
self.test_post_partition_search_all_params()
@responses.activate
def test_post_partition_search_value_error(self):
"""
test_post_partition_search_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_search/testString')
mock_response = '{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}], "groups": [{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 0
sort = ['testString']
stale = 'ok'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"ddoc": ddoc,
"index": index,
"query": query,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_search(**req_copy)
def test_post_partition_search_value_error_with_retries(self):
# Enable retries and run test_post_partition_search_value_error.
_service.enable_retries()
self.test_post_partition_search_value_error()
# Disable retries and run test_post_partition_search_value_error.
_service.disable_retries()
self.test_post_partition_search_value_error()
class TestPostPartitionSearchAsStream():
"""
Test Class for post_partition_search_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_search_as_stream_all_params(self):
"""
post_partition_search_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_search/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 3
sort = ['testString']
stale = 'ok'
# Invoke method
response = _service.post_partition_search_as_stream(
db,
partition_key,
ddoc,
index,
query,
bookmark=bookmark,
highlight_fields=highlight_fields,
highlight_number=highlight_number,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
highlight_size=highlight_size,
include_docs=include_docs,
include_fields=include_fields,
limit=limit,
sort=sort,
stale=stale,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['query'] == 'testString'
assert req_body['bookmark'] == 'testString'
assert req_body['highlight_fields'] == ['testString']
assert req_body['highlight_number'] == 1
assert req_body['highlight_post_tag'] == '</em>'
assert req_body['highlight_pre_tag'] == '<em>'
assert req_body['highlight_size'] == 1
assert req_body['include_docs'] == False
assert req_body['include_fields'] == ['testString']
assert req_body['limit'] == 3
assert req_body['sort'] == ['testString']
assert req_body['stale'] == 'ok'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_partition_search_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_partition_search_as_stream_all_params.
_service.enable_retries()
self.test_post_partition_search_as_stream_all_params()
# Disable retries and run test_post_partition_search_as_stream_all_params.
_service.disable_retries()
self.test_post_partition_search_as_stream_all_params()
@responses.activate
def test_post_partition_search_as_stream_value_error(self):
"""
test_post_partition_search_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_search/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 3
sort = ['testString']
stale = 'ok'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"ddoc": ddoc,
"index": index,
"query": query,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_search_as_stream(**req_copy)
def test_post_partition_search_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_partition_search_as_stream_value_error.
_service.enable_retries()
self.test_post_partition_search_as_stream_value_error()
# Disable retries and run test_post_partition_search_as_stream_value_error.
_service.disable_retries()
self.test_post_partition_search_as_stream_value_error()
class TestPostPartitionView():
"""
Test Class for post_partition_view
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_view_all_params(self):
"""
post_partition_view()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_view/testString')
mock_response = '{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Invoke method
response = _service.post_partition_view(
db,
partition_key,
ddoc,
view,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
endkey_docid=endkey_docid,
group=group,
group_level=group_level,
key=key,
keys=keys,
reduce=reduce,
stable=stable,
startkey=startkey,
startkey_docid=startkey_docid,
update=update,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == True
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['endkey_docid'] == 'testString'
assert req_body['group'] == False
assert req_body['group_level'] == 1
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['examplekey']
assert req_body['reduce'] == True
assert req_body['stable'] == False
assert req_body['startkey'] == 'testString'
assert req_body['startkey_docid'] == 'testString'
assert req_body['update'] == 'true'
def test_post_partition_view_all_params_with_retries(self):
# Enable retries and run test_post_partition_view_all_params.
_service.enable_retries()
self.test_post_partition_view_all_params()
# Disable retries and run test_post_partition_view_all_params.
_service.disable_retries()
self.test_post_partition_view_all_params()
@responses.activate
def test_post_partition_view_value_error(self):
"""
test_post_partition_view_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_view/testString')
mock_response = '{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"ddoc": ddoc,
"view": view,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_view(**req_copy)
def test_post_partition_view_value_error_with_retries(self):
# Enable retries and run test_post_partition_view_value_error.
_service.enable_retries()
self.test_post_partition_view_value_error()
# Disable retries and run test_post_partition_view_value_error.
_service.disable_retries()
self.test_post_partition_view_value_error()
class TestPostPartitionViewAsStream():
"""
Test Class for post_partition_view_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_view_as_stream_all_params(self):
"""
post_partition_view_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_view/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Invoke method
response = _service.post_partition_view_as_stream(
db,
partition_key,
ddoc,
view,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
endkey_docid=endkey_docid,
group=group,
group_level=group_level,
key=key,
keys=keys,
reduce=reduce,
stable=stable,
startkey=startkey,
startkey_docid=startkey_docid,
update=update,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == True
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['endkey_docid'] == 'testString'
assert req_body['group'] == False
assert req_body['group_level'] == 1
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['examplekey']
assert req_body['reduce'] == True
assert req_body['stable'] == False
assert req_body['startkey'] == 'testString'
assert req_body['startkey_docid'] == 'testString'
assert req_body['update'] == 'true'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_partition_view_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_partition_view_as_stream_all_params.
_service.enable_retries()
self.test_post_partition_view_as_stream_all_params()
# Disable retries and run test_post_partition_view_as_stream_all_params.
_service.disable_retries()
self.test_post_partition_view_as_stream_all_params()
@responses.activate
def test_post_partition_view_as_stream_value_error(self):
"""
test_post_partition_view_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_view/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"ddoc": ddoc,
"view": view,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_view_as_stream(**req_copy)
def test_post_partition_view_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_partition_view_as_stream_value_error.
_service.enable_retries()
self.test_post_partition_view_as_stream_value_error()
# Disable retries and run test_post_partition_view_as_stream_value_error.
_service.disable_retries()
self.test_post_partition_view_as_stream_value_error()
class TestPostPartitionFind():
"""
Test Class for post_partition_find
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_find_all_params(self):
"""
post_partition_find()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_find')
mock_response = '{"bookmark": "bookmark", "docs": [{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}], "execution_stats": {"execution_time_ms": 17, "results_returned": 0, "total_docs_examined": 0, "total_keys_examined": 0, "total_quorum_docs_examined": 0}, "warning": "warning"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['testString']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
# Invoke method
response = _service.post_partition_find(
db,
partition_key,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['testString']
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
def test_post_partition_find_all_params_with_retries(self):
# Enable retries and run test_post_partition_find_all_params.
_service.enable_retries()
self.test_post_partition_find_all_params()
# Disable retries and run test_post_partition_find_all_params.
_service.disable_retries()
self.test_post_partition_find_all_params()
@responses.activate
def test_post_partition_find_value_error(self):
"""
test_post_partition_find_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_find')
mock_response = '{"bookmark": "bookmark", "docs": [{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}], "execution_stats": {"execution_time_ms": 17, "results_returned": 0, "total_docs_examined": 0, "total_keys_examined": 0, "total_quorum_docs_examined": 0}, "warning": "warning"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['testString']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_find(**req_copy)
def test_post_partition_find_value_error_with_retries(self):
# Enable retries and run test_post_partition_find_value_error.
_service.enable_retries()
self.test_post_partition_find_value_error()
# Disable retries and run test_post_partition_find_value_error.
_service.disable_retries()
self.test_post_partition_find_value_error()
class TestPostPartitionFindAsStream():
"""
Test Class for post_partition_find_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_find_as_stream_all_params(self):
"""
post_partition_find_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_find')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['productid', 'name', 'description']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
# Invoke method
response = _service.post_partition_find_as_stream(
db,
partition_key,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['productid', 'name', 'description']
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_partition_find_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_partition_find_as_stream_all_params.
_service.enable_retries()
self.test_post_partition_find_as_stream_all_params()
# Disable retries and run test_post_partition_find_as_stream_all_params.
_service.disable_retries()
self.test_post_partition_find_as_stream_all_params()
@responses.activate
def test_post_partition_find_as_stream_value_error(self):
"""
test_post_partition_find_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_find')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['productid', 'name', 'description']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_find_as_stream(**req_copy)
def test_post_partition_find_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_partition_find_as_stream_value_error.
_service.enable_retries()
self.test_post_partition_find_as_stream_value_error()
# Disable retries and run test_post_partition_find_as_stream_value_error.
_service.disable_retries()
self.test_post_partition_find_as_stream_value_error()
# endregion
##############################################################################
# End of Service: PartitionedDatabases
##############################################################################
##############################################################################
# Start of Service: Queries
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestPostExplain():
"""
Test Class for post_explain
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_explain_all_params(self):
"""
post_explain()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_explain')
mock_response = '{"dbname": "dbname", "fields": ["fields"], "index": {"ddoc": "ddoc", "def": {"default_analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "default_field": {"analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "enabled": true}, "fields": [{"name": "name", "type": "boolean"}], "index_array_lengths": true, "partial_filter_selector": {"mapKey": "anyValue"}}, "name": "name", "type": "json"}, "limit": 0, "opts": {"mapKey": "anyValue"}, "range": {"end_key": ["anyValue"], "start_key": ["anyValue"]}, "selector": {"mapKey": "anyValue"}, "skip": 0}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['testString']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Invoke method
response = _service.post_explain(
db,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
r=r,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['testString']
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
assert req_body['r'] == 1
def test_post_explain_all_params_with_retries(self):
# Enable retries and run test_post_explain_all_params.
_service.enable_retries()
self.test_post_explain_all_params()
# Disable retries and run test_post_explain_all_params.
_service.disable_retries()
self.test_post_explain_all_params()
@responses.activate
def test_post_explain_value_error(self):
"""
test_post_explain_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_explain')
mock_response = '{"dbname": "dbname", "fields": ["fields"], "index": {"ddoc": "ddoc", "def": {"default_analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "default_field": {"analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "enabled": true}, "fields": [{"name": "name", "type": "boolean"}], "index_array_lengths": true, "partial_filter_selector": {"mapKey": "anyValue"}}, "name": "name", "type": "json"}, "limit": 0, "opts": {"mapKey": "anyValue"}, "range": {"end_key": ["anyValue"], "start_key": ["anyValue"]}, "selector": {"mapKey": "anyValue"}, "skip": 0}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['testString']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_explain(**req_copy)
def test_post_explain_value_error_with_retries(self):
# Enable retries and run test_post_explain_value_error.
_service.enable_retries()
self.test_post_explain_value_error()
# Disable retries and run test_post_explain_value_error.
_service.disable_retries()
self.test_post_explain_value_error()
class TestPostFind():
"""
Test Class for post_find
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_find_all_params(self):
"""
post_find()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_find')
mock_response = '{"bookmark": "bookmark", "docs": [{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}], "execution_stats": {"execution_time_ms": 17, "results_returned": 0, "total_docs_examined": 0, "total_keys_examined": 0, "total_quorum_docs_examined": 0}, "warning": "warning"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['_id', 'type', 'name', 'email']
limit = 3
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Invoke method
response = _service.post_find(
db,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
r=r,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['_id', 'type', 'name', 'email']
assert req_body['limit'] == 3
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
assert req_body['r'] == 1
def test_post_find_all_params_with_retries(self):
# Enable retries and run test_post_find_all_params.
_service.enable_retries()
self.test_post_find_all_params()
# Disable retries and run test_post_find_all_params.
_service.disable_retries()
self.test_post_find_all_params()
@responses.activate
def test_post_find_value_error(self):
"""
test_post_find_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_find')
mock_response = '{"bookmark": "bookmark", "docs": [{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}], "execution_stats": {"execution_time_ms": 17, "results_returned": 0, "total_docs_examined": 0, "total_keys_examined": 0, "total_quorum_docs_examined": 0}, "warning": "warning"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['_id', 'type', 'name', 'email']
limit = 3
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_find(**req_copy)
def test_post_find_value_error_with_retries(self):
# Enable retries and run test_post_find_value_error.
_service.enable_retries()
self.test_post_find_value_error()
# Disable retries and run test_post_find_value_error.
_service.disable_retries()
self.test_post_find_value_error()
class TestPostFindAsStream():
"""
Test Class for post_find_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_find_as_stream_all_params(self):
"""
post_find_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_find')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['_id', 'type', 'name', 'email']
limit = 3
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Invoke method
response = _service.post_find_as_stream(
db,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
r=r,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['_id', 'type', 'name', 'email']
assert req_body['limit'] == 3
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
assert req_body['r'] == 1
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_find_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_find_as_stream_all_params.
_service.enable_retries()
self.test_post_find_as_stream_all_params()
# Disable retries and run test_post_find_as_stream_all_params.
_service.disable_retries()
self.test_post_find_as_stream_all_params()
@responses.activate
def test_post_find_as_stream_value_error(self):
"""
test_post_find_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_find')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['_id', 'type', 'name', 'email']
limit = 3
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_find_as_stream(**req_copy)
def test_post_find_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_find_as_stream_value_error.
_service.enable_retries()
self.test_post_find_as_stream_value_error()
# Disable retries and run test_post_find_as_stream_value_error.
_service.disable_retries()
self.test_post_find_as_stream_value_error()
class TestGetIndexesInformation():
"""
Test Class for get_indexes_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_indexes_information_all_params(self):
"""
get_indexes_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index')
mock_response = '{"total_rows": 0, "indexes": [{"ddoc": "ddoc", "def": {"default_analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "default_field": {"analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "enabled": true}, "fields": [{"name": "name", "type": "boolean"}], "index_array_lengths": true, "partial_filter_selector": {"mapKey": "anyValue"}}, "name": "name", "type": "json"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.get_indexes_information(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_indexes_information_all_params_with_retries(self):
# Enable retries and run test_get_indexes_information_all_params.
_service.enable_retries()
self.test_get_indexes_information_all_params()
# Disable retries and run test_get_indexes_information_all_params.
_service.disable_retries()
self.test_get_indexes_information_all_params()
@responses.activate
def test_get_indexes_information_value_error(self):
"""
test_get_indexes_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index')
mock_response = '{"total_rows": 0, "indexes": [{"ddoc": "ddoc", "def": {"default_analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "default_field": {"analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "enabled": true}, "fields": [{"name": "name", "type": "boolean"}], "index_array_lengths": true, "partial_filter_selector": {"mapKey": "anyValue"}}, "name": "name", "type": "json"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_indexes_information(**req_copy)
def test_get_indexes_information_value_error_with_retries(self):
# Enable retries and run test_get_indexes_information_value_error.
_service.enable_retries()
self.test_get_indexes_information_value_error()
# Disable retries and run test_get_indexes_information_value_error.
_service.disable_retries()
self.test_get_indexes_information_value_error()
class TestPostIndex():
"""
Test Class for post_index
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_index_all_params(self):
"""
post_index()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index')
mock_response = '{"id": "id", "name": "name", "result": "created"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a IndexTextOperatorDefaultField model
index_text_operator_default_field_model = {}
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
# Construct a dict representation of a IndexField model
index_field_model = {}
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
# Construct a dict representation of a IndexDefinition model
index_definition_model = {}
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
# Set up parameter values
db = 'testString'
index = index_definition_model
ddoc = 'testString'
def_ = index_definition_model
name = 'testString'
partitioned = True
type = 'json'
# Invoke method
response = _service.post_index(
db,
index,
ddoc=ddoc,
def_=def_,
name=name,
partitioned=partitioned,
type=type,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['index'] == index_definition_model
assert req_body['ddoc'] == 'testString'
assert req_body['def'] == index_definition_model
assert req_body['name'] == 'testString'
assert req_body['partitioned'] == True
assert req_body['type'] == 'json'
def test_post_index_all_params_with_retries(self):
# Enable retries and run test_post_index_all_params.
_service.enable_retries()
self.test_post_index_all_params()
# Disable retries and run test_post_index_all_params.
_service.disable_retries()
self.test_post_index_all_params()
@responses.activate
def test_post_index_value_error(self):
"""
test_post_index_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index')
mock_response = '{"id": "id", "name": "name", "result": "created"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a IndexTextOperatorDefaultField model
index_text_operator_default_field_model = {}
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
# Construct a dict representation of a IndexField model
index_field_model = {}
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
# Construct a dict representation of a IndexDefinition model
index_definition_model = {}
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
# Set up parameter values
db = 'testString'
index = index_definition_model
ddoc = 'testString'
def_ = index_definition_model
name = 'testString'
partitioned = True
type = 'json'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_index(**req_copy)
def test_post_index_value_error_with_retries(self):
# Enable retries and run test_post_index_value_error.
_service.enable_retries()
self.test_post_index_value_error()
# Disable retries and run test_post_index_value_error.
_service.disable_retries()
self.test_post_index_value_error()
class TestDeleteIndex():
"""
Test Class for delete_index
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_index_all_params(self):
"""
delete_index()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index/_design/testString/json/testString')
mock_response = '{"ok": true}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
type = 'json'
index = 'testString'
# Invoke method
response = _service.delete_index(
db,
ddoc,
type,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_index_all_params_with_retries(self):
# Enable retries and run test_delete_index_all_params.
_service.enable_retries()
self.test_delete_index_all_params()
# Disable retries and run test_delete_index_all_params.
_service.disable_retries()
self.test_delete_index_all_params()
@responses.activate
def test_delete_index_value_error(self):
"""
test_delete_index_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index/_design/testString/json/testString')
mock_response = '{"ok": true}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
type = 'json'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"type": type,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_index(**req_copy)
def test_delete_index_value_error_with_retries(self):
# Enable retries and run test_delete_index_value_error.
_service.enable_retries()
self.test_delete_index_value_error()
# Disable retries and run test_delete_index_value_error.
_service.disable_retries()
self.test_delete_index_value_error()
# endregion
##############################################################################
# End of Service: Queries
##############################################################################
##############################################################################
# Start of Service: Searches
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestPostSearchAnalyze():
"""
Test Class for post_search_analyze
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_search_analyze_all_params(self):
"""
post_search_analyze()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_search_analyze')
mock_response = '{"tokens": ["tokens"]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
analyzer = 'arabic'
text = 'testString'
# Invoke method
response = _service.post_search_analyze(
analyzer,
text,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['analyzer'] == 'arabic'
assert req_body['text'] == 'testString'
def test_post_search_analyze_all_params_with_retries(self):
# Enable retries and run test_post_search_analyze_all_params.
_service.enable_retries()
self.test_post_search_analyze_all_params()
# Disable retries and run test_post_search_analyze_all_params.
_service.disable_retries()
self.test_post_search_analyze_all_params()
@responses.activate
def test_post_search_analyze_value_error(self):
"""
test_post_search_analyze_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_search_analyze')
mock_response = '{"tokens": ["tokens"]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
analyzer = 'arabic'
text = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"analyzer": analyzer,
"text": text,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_search_analyze(**req_copy)
def test_post_search_analyze_value_error_with_retries(self):
# Enable retries and run test_post_search_analyze_value_error.
_service.enable_retries()
self.test_post_search_analyze_value_error()
# Disable retries and run test_post_search_analyze_value_error.
_service.disable_retries()
self.test_post_search_analyze_value_error()
class TestPostSearch():
"""
Test Class for post_search
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_search_all_params(self):
"""
post_search()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search/testString')
mock_response = '{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}], "groups": [{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 0
sort = ['testString']
stale = 'ok'
counts = ['testString']
drilldown = [['testString']]
group_field = 'testString'
group_limit = 1
group_sort = ['testString']
ranges = {}
# Invoke method
response = _service.post_search(
db,
ddoc,
index,
query,
bookmark=bookmark,
highlight_fields=highlight_fields,
highlight_number=highlight_number,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
highlight_size=highlight_size,
include_docs=include_docs,
include_fields=include_fields,
limit=limit,
sort=sort,
stale=stale,
counts=counts,
drilldown=drilldown,
group_field=group_field,
group_limit=group_limit,
group_sort=group_sort,
ranges=ranges,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['query'] == 'testString'
assert req_body['bookmark'] == 'testString'
assert req_body['highlight_fields'] == ['testString']
assert req_body['highlight_number'] == 1
assert req_body['highlight_post_tag'] == '</em>'
assert req_body['highlight_pre_tag'] == '<em>'
assert req_body['highlight_size'] == 1
assert req_body['include_docs'] == False
assert req_body['include_fields'] == ['testString']
assert req_body['limit'] == 0
assert req_body['sort'] == ['testString']
assert req_body['stale'] == 'ok'
assert req_body['counts'] == ['testString']
assert req_body['drilldown'] == [['testString']]
assert req_body['group_field'] == 'testString'
assert req_body['group_limit'] == 1
assert req_body['group_sort'] == ['testString']
assert req_body['ranges'] == {}
def test_post_search_all_params_with_retries(self):
# Enable retries and run test_post_search_all_params.
_service.enable_retries()
self.test_post_search_all_params()
# Disable retries and run test_post_search_all_params.
_service.disable_retries()
self.test_post_search_all_params()
@responses.activate
def test_post_search_value_error(self):
"""
test_post_search_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search/testString')
mock_response = '{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}], "groups": [{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 0
sort = ['testString']
stale = 'ok'
counts = ['testString']
drilldown = [['testString']]
group_field = 'testString'
group_limit = 1
group_sort = ['testString']
ranges = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
"query": query,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_search(**req_copy)
def test_post_search_value_error_with_retries(self):
# Enable retries and run test_post_search_value_error.
_service.enable_retries()
self.test_post_search_value_error()
# Disable retries and run test_post_search_value_error.
_service.disable_retries()
self.test_post_search_value_error()
class TestPostSearchAsStream():
"""
Test Class for post_search_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_search_as_stream_all_params(self):
"""
post_search_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 3
sort = ['testString']
stale = 'ok'
counts = ['testString']
drilldown = [['testString']]
group_field = 'testString'
group_limit = 1
group_sort = ['testString']
ranges = {}
# Invoke method
response = _service.post_search_as_stream(
db,
ddoc,
index,
query,
bookmark=bookmark,
highlight_fields=highlight_fields,
highlight_number=highlight_number,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
highlight_size=highlight_size,
include_docs=include_docs,
include_fields=include_fields,
limit=limit,
sort=sort,
stale=stale,
counts=counts,
drilldown=drilldown,
group_field=group_field,
group_limit=group_limit,
group_sort=group_sort,
ranges=ranges,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['query'] == 'testString'
assert req_body['bookmark'] == 'testString'
assert req_body['highlight_fields'] == ['testString']
assert req_body['highlight_number'] == 1
assert req_body['highlight_post_tag'] == '</em>'
assert req_body['highlight_pre_tag'] == '<em>'
assert req_body['highlight_size'] == 1
assert req_body['include_docs'] == False
assert req_body['include_fields'] == ['testString']
assert req_body['limit'] == 3
assert req_body['sort'] == ['testString']
assert req_body['stale'] == 'ok'
assert req_body['counts'] == ['testString']
assert req_body['drilldown'] == [['testString']]
assert req_body['group_field'] == 'testString'
assert req_body['group_limit'] == 1
assert req_body['group_sort'] == ['testString']
assert req_body['ranges'] == {}
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_search_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_search_as_stream_all_params.
_service.enable_retries()
self.test_post_search_as_stream_all_params()
# Disable retries and run test_post_search_as_stream_all_params.
_service.disable_retries()
self.test_post_search_as_stream_all_params()
@responses.activate
def test_post_search_as_stream_value_error(self):
"""
test_post_search_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 3
sort = ['testString']
stale = 'ok'
counts = ['testString']
drilldown = [['testString']]
group_field = 'testString'
group_limit = 1
group_sort = ['testString']
ranges = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
"query": query,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_search_as_stream(**req_copy)
def test_post_search_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_search_as_stream_value_error.
_service.enable_retries()
self.test_post_search_as_stream_value_error()
# Disable retries and run test_post_search_as_stream_value_error.
_service.disable_retries()
self.test_post_search_as_stream_value_error()
class TestGetSearchInfo():
"""
Test Class for get_search_info
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_search_info_all_params(self):
"""
get_search_info()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search_info/testString')
mock_response = '{"name": "name", "search_index": {"committed_seq": 13, "disk_size": 0, "doc_count": 0, "doc_del_count": 0, "pending_seq": 11}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Invoke method
response = _service.get_search_info(
db,
ddoc,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_search_info_all_params_with_retries(self):
# Enable retries and run test_get_search_info_all_params.
_service.enable_retries()
self.test_get_search_info_all_params()
# Disable retries and run test_get_search_info_all_params.
_service.disable_retries()
self.test_get_search_info_all_params()
@responses.activate
def test_get_search_info_value_error(self):
"""
test_get_search_info_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search_info/testString')
mock_response = '{"name": "name", "search_index": {"committed_seq": 13, "disk_size": 0, "doc_count": 0, "doc_del_count": 0, "pending_seq": 11}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_search_info(**req_copy)
def test_get_search_info_value_error_with_retries(self):
# Enable retries and run test_get_search_info_value_error.
_service.enable_retries()
self.test_get_search_info_value_error()
# Disable retries and run test_get_search_info_value_error.
_service.disable_retries()
self.test_get_search_info_value_error()
# endregion
##############################################################################
# End of Service: Searches
##############################################################################
##############################################################################
# Start of Service: Geospatial
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetGeo():
"""
Test Class for get_geo
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_geo_all_params(self):
"""
get_geo()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"bookmark": "bookmark", "features": [{"_id": "id", "_rev": "rev", "bbox": [4], "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "properties": {"mapKey": "anyValue"}, "type": "Feature"}], "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "id": "id", "rev": "rev"}], "type": "FeatureCollection"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
bbox = 'testString'
bookmark = 'testString'
format = 'view'
g = 'testString'
include_docs = False
lat = -90
limit = 0
lon = -180
nearest = False
radius = 0
rangex = 0
rangey = 0
relation = 'intersects'
skip = 0
stale = 'ok'
# Invoke method
response = _service.get_geo(
db,
ddoc,
index,
bbox=bbox,
bookmark=bookmark,
format=format,
g=g,
include_docs=include_docs,
lat=lat,
limit=limit,
lon=lon,
nearest=nearest,
radius=radius,
rangex=rangex,
rangey=rangey,
relation=relation,
skip=skip,
stale=stale,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'bbox={}'.format(bbox) in query_string
assert 'bookmark={}'.format(bookmark) in query_string
assert 'format={}'.format(format) in query_string
assert 'g={}'.format(g) in query_string
assert 'include_docs={}'.format('true' if include_docs else 'false') in query_string
assert 'lat={}'.format(lat) in query_string
assert 'limit={}'.format(limit) in query_string
assert 'lon={}'.format(lon) in query_string
assert 'nearest={}'.format('true' if nearest else 'false') in query_string
assert 'radius={}'.format(radius) in query_string
assert 'rangex={}'.format(rangex) in query_string
assert 'rangey={}'.format(rangey) in query_string
assert 'relation={}'.format(relation) in query_string
assert 'skip={}'.format(skip) in query_string
assert 'stale={}'.format(stale) in query_string
def test_get_geo_all_params_with_retries(self):
# Enable retries and run test_get_geo_all_params.
_service.enable_retries()
self.test_get_geo_all_params()
# Disable retries and run test_get_geo_all_params.
_service.disable_retries()
self.test_get_geo_all_params()
@responses.activate
def test_get_geo_required_params(self):
"""
test_get_geo_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"bookmark": "bookmark", "features": [{"_id": "id", "_rev": "rev", "bbox": [4], "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "properties": {"mapKey": "anyValue"}, "type": "Feature"}], "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "id": "id", "rev": "rev"}], "type": "FeatureCollection"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Invoke method
response = _service.get_geo(
db,
ddoc,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_geo_required_params_with_retries(self):
# Enable retries and run test_get_geo_required_params.
_service.enable_retries()
self.test_get_geo_required_params()
# Disable retries and run test_get_geo_required_params.
_service.disable_retries()
self.test_get_geo_required_params()
@responses.activate
def test_get_geo_value_error(self):
"""
test_get_geo_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"bookmark": "bookmark", "features": [{"_id": "id", "_rev": "rev", "bbox": [4], "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "properties": {"mapKey": "anyValue"}, "type": "Feature"}], "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "id": "id", "rev": "rev"}], "type": "FeatureCollection"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_geo(**req_copy)
def test_get_geo_value_error_with_retries(self):
# Enable retries and run test_get_geo_value_error.
_service.enable_retries()
self.test_get_geo_value_error()
# Disable retries and run test_get_geo_value_error.
_service.disable_retries()
self.test_get_geo_value_error()
class TestGetGeoAsStream():
"""
Test Class for get_geo_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_geo_as_stream_all_params(self):
"""
get_geo_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
bbox = 'testString'
bookmark = 'testString'
format = 'view'
g = 'testString'
include_docs = False
lat = -90
limit = 0
lon = -180
nearest = False
radius = 0
rangex = 0
rangey = 0
relation = 'intersects'
skip = 0
stale = 'ok'
# Invoke method
response = _service.get_geo_as_stream(
db,
ddoc,
index,
bbox=bbox,
bookmark=bookmark,
format=format,
g=g,
include_docs=include_docs,
lat=lat,
limit=limit,
lon=lon,
nearest=nearest,
radius=radius,
rangex=rangex,
rangey=rangey,
relation=relation,
skip=skip,
stale=stale,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'bbox={}'.format(bbox) in query_string
assert 'bookmark={}'.format(bookmark) in query_string
assert 'format={}'.format(format) in query_string
assert 'g={}'.format(g) in query_string
assert 'include_docs={}'.format('true' if include_docs else 'false') in query_string
assert 'lat={}'.format(lat) in query_string
assert 'limit={}'.format(limit) in query_string
assert 'lon={}'.format(lon) in query_string
assert 'nearest={}'.format('true' if nearest else 'false') in query_string
assert 'radius={}'.format(radius) in query_string
assert 'rangex={}'.format(rangex) in query_string
assert 'rangey={}'.format(rangey) in query_string
assert 'relation={}'.format(relation) in query_string
assert 'skip={}'.format(skip) in query_string
assert 'stale={}'.format(stale) in query_string
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_get_geo_as_stream_all_params_with_retries(self):
# Enable retries and run test_get_geo_as_stream_all_params.
_service.enable_retries()
self.test_get_geo_as_stream_all_params()
# Disable retries and run test_get_geo_as_stream_all_params.
_service.disable_retries()
self.test_get_geo_as_stream_all_params()
@responses.activate
def test_get_geo_as_stream_required_params(self):
"""
test_get_geo_as_stream_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Invoke method
response = _service.get_geo_as_stream(
db,
ddoc,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_get_geo_as_stream_required_params_with_retries(self):
# Enable retries and run test_get_geo_as_stream_required_params.
_service.enable_retries()
self.test_get_geo_as_stream_required_params()
# Disable retries and run test_get_geo_as_stream_required_params.
_service.disable_retries()
self.test_get_geo_as_stream_required_params()
@responses.activate
def test_get_geo_as_stream_value_error(self):
"""
test_get_geo_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_geo_as_stream(**req_copy)
def test_get_geo_as_stream_value_error_with_retries(self):
# Enable retries and run test_get_geo_as_stream_value_error.
_service.enable_retries()
self.test_get_geo_as_stream_value_error()
# Disable retries and run test_get_geo_as_stream_value_error.
_service.disable_retries()
self.test_get_geo_as_stream_value_error()
class TestPostGeoCleanup():
"""
Test Class for post_geo_cleanup
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_geo_cleanup_all_params(self):
"""
post_geo_cleanup()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_geo_cleanup')
mock_response = '{"ok": true}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=202)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.post_geo_cleanup(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
def test_post_geo_cleanup_all_params_with_retries(self):
# Enable retries and run test_post_geo_cleanup_all_params.
_service.enable_retries()
self.test_post_geo_cleanup_all_params()
# Disable retries and run test_post_geo_cleanup_all_params.
_service.disable_retries()
self.test_post_geo_cleanup_all_params()
@responses.activate
def test_post_geo_cleanup_value_error(self):
"""
test_post_geo_cleanup_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_geo_cleanup')
mock_response = '{"ok": true}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=202)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_geo_cleanup(**req_copy)
def test_post_geo_cleanup_value_error_with_retries(self):
# Enable retries and run test_post_geo_cleanup_value_error.
_service.enable_retries()
self.test_post_geo_cleanup_value_error()
# Disable retries and run test_post_geo_cleanup_value_error.
_service.disable_retries()
self.test_post_geo_cleanup_value_error()
class TestGetGeoIndexInformation():
"""
Test Class for get_geo_index_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_geo_index_information_all_params(self):
"""
get_geo_index_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo_info/testString')
mock_response = '{"geo_index": {"data_size": 0, "disk_size": 0, "doc_count": 0}, "name": "name"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Invoke method
response = _service.get_geo_index_information(
db,
ddoc,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_geo_index_information_all_params_with_retries(self):
# Enable retries and run test_get_geo_index_information_all_params.
_service.enable_retries()
self.test_get_geo_index_information_all_params()
# Disable retries and run test_get_geo_index_information_all_params.
_service.disable_retries()
self.test_get_geo_index_information_all_params()
@responses.activate
def test_get_geo_index_information_value_error(self):
"""
test_get_geo_index_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo_info/testString')
mock_response = '{"geo_index": {"data_size": 0, "disk_size": 0, "doc_count": 0}, "name": "name"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_geo_index_information(**req_copy)
def test_get_geo_index_information_value_error_with_retries(self):
# Enable retries and run test_get_geo_index_information_value_error.
_service.enable_retries()
self.test_get_geo_index_information_value_error()
# Disable retries and run test_get_geo_index_information_value_error.
_service.disable_retries()
self.test_get_geo_index_information_value_error()
# endregion
##############################################################################
# End of Service: Geospatial
##############################################################################
##############################################################################
# Start of Service: Replication
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadReplicationDocument():
"""
Test Class for head_replication_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_replication_document_all_params(self):
"""
head_replication_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
if_none_match = 'testString'
# Invoke method
response = _service.head_replication_document(
doc_id,
if_none_match=if_none_match,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_replication_document_all_params_with_retries(self):
# Enable retries and run test_head_replication_document_all_params.
_service.enable_retries()
self.test_head_replication_document_all_params()
# Disable retries and run test_head_replication_document_all_params.
_service.disable_retries()
self.test_head_replication_document_all_params()
@responses.activate
def test_head_replication_document_required_params(self):
"""
test_head_replication_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.head_replication_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_replication_document_required_params_with_retries(self):
# Enable retries and run test_head_replication_document_required_params.
_service.enable_retries()
self.test_head_replication_document_required_params()
# Disable retries and run test_head_replication_document_required_params.
_service.disable_retries()
self.test_head_replication_document_required_params()
@responses.activate
def test_head_replication_document_value_error(self):
"""
test_head_replication_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_replication_document(**req_copy)
def test_head_replication_document_value_error_with_retries(self):
# Enable retries and run test_head_replication_document_value_error.
_service.enable_retries()
self.test_head_replication_document_value_error()
# Disable retries and run test_head_replication_document_value_error.
_service.disable_retries()
self.test_head_replication_document_value_error()
class TestHeadSchedulerDocument():
"""
Test Class for head_scheduler_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_scheduler_document_all_params(self):
"""
head_scheduler_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.head_scheduler_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_scheduler_document_all_params_with_retries(self):
# Enable retries and run test_head_scheduler_document_all_params.
_service.enable_retries()
self.test_head_scheduler_document_all_params()
# Disable retries and run test_head_scheduler_document_all_params.
_service.disable_retries()
self.test_head_scheduler_document_all_params()
@responses.activate
def test_head_scheduler_document_value_error(self):
"""
test_head_scheduler_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_scheduler_document(**req_copy)
def test_head_scheduler_document_value_error_with_retries(self):
# Enable retries and run test_head_scheduler_document_value_error.
_service.enable_retries()
self.test_head_scheduler_document_value_error()
# Disable retries and run test_head_scheduler_document_value_error.
_service.disable_retries()
self.test_head_scheduler_document_value_error()
class TestHeadSchedulerJob():
"""
Test Class for head_scheduler_job
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_scheduler_job_all_params(self):
"""
head_scheduler_job()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
job_id = 'testString'
# Invoke method
response = _service.head_scheduler_job(
job_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_scheduler_job_all_params_with_retries(self):
# Enable retries and run test_head_scheduler_job_all_params.
_service.enable_retries()
self.test_head_scheduler_job_all_params()
# Disable retries and run test_head_scheduler_job_all_params.
_service.disable_retries()
self.test_head_scheduler_job_all_params()
@responses.activate
def test_head_scheduler_job_value_error(self):
"""
test_head_scheduler_job_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
job_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"job_id": job_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_scheduler_job(**req_copy)
def test_head_scheduler_job_value_error_with_retries(self):
# Enable retries and run test_head_scheduler_job_value_error.
_service.enable_retries()
self.test_head_scheduler_job_value_error()
# Disable retries and run test_head_scheduler_job_value_error.
_service.disable_retries()
self.test_head_scheduler_job_value_error()
class TestDeleteReplicationDocument():
"""
Test Class for delete_replication_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_replication_document_all_params(self):
"""
delete_replication_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
doc_id = 'testString'
if_match = 'testString'
batch = 'ok'
rev = 'testString'
# Invoke method
response = _service.delete_replication_document(
doc_id,
if_match=if_match,
batch=batch,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'rev={}'.format(rev) in query_string
def test_delete_replication_document_all_params_with_retries(self):
# Enable retries and run test_delete_replication_document_all_params.
_service.enable_retries()
self.test_delete_replication_document_all_params()
# Disable retries and run test_delete_replication_document_all_params.
_service.disable_retries()
self.test_delete_replication_document_all_params()
@responses.activate
def test_delete_replication_document_required_params(self):
"""
test_delete_replication_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.delete_replication_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
def test_delete_replication_document_required_params_with_retries(self):
# Enable retries and run test_delete_replication_document_required_params.
_service.enable_retries()
self.test_delete_replication_document_required_params()
# Disable retries and run test_delete_replication_document_required_params.
_service.disable_retries()
self.test_delete_replication_document_required_params()
@responses.activate
def test_delete_replication_document_value_error(self):
"""
test_delete_replication_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_replication_document(**req_copy)
def test_delete_replication_document_value_error_with_retries(self):
# Enable retries and run test_delete_replication_document_value_error.
_service.enable_retries()
self.test_delete_replication_document_value_error()
# Disable retries and run test_delete_replication_document_value_error.
_service.disable_retries()
self.test_delete_replication_document_value_error()
class TestGetReplicationDocument():
"""
Test Class for get_replication_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_replication_document_all_params(self):
"""
get_replication_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "cancel": true, "checkpoint_interval": 0, "connection_timeout": 0, "continuous": false, "create_target": false, "create_target_params": {"n": 1, "partitioned": false, "q": 1}, "doc_ids": ["doc_ids"], "filter": "filter", "http_connections": 1, "query_params": {"mapKey": "inner"}, "retries_per_request": 0, "selector": {"mapKey": "anyValue"}, "since_seq": "since_seq", "socket_options": "socket_options", "source": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "source_proxy": "source_proxy", "target": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "target_proxy": "target_proxy", "use_checkpoints": true, "user_ctx": {"db": "db", "name": "name", "roles": ["_reader"]}, "worker_batch_size": 1, "worker_processes": 1}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_replication_document(
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_replication_document_all_params_with_retries(self):
# Enable retries and run test_get_replication_document_all_params.
_service.enable_retries()
self.test_get_replication_document_all_params()
# Disable retries and run test_get_replication_document_all_params.
_service.disable_retries()
self.test_get_replication_document_all_params()
@responses.activate
def test_get_replication_document_required_params(self):
"""
test_get_replication_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "cancel": true, "checkpoint_interval": 0, "connection_timeout": 0, "continuous": false, "create_target": false, "create_target_params": {"n": 1, "partitioned": false, "q": 1}, "doc_ids": ["doc_ids"], "filter": "filter", "http_connections": 1, "query_params": {"mapKey": "inner"}, "retries_per_request": 0, "selector": {"mapKey": "anyValue"}, "since_seq": "since_seq", "socket_options": "socket_options", "source": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "source_proxy": "source_proxy", "target": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "target_proxy": "target_proxy", "use_checkpoints": true, "user_ctx": {"db": "db", "name": "name", "roles": ["_reader"]}, "worker_batch_size": 1, "worker_processes": 1}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.get_replication_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_replication_document_required_params_with_retries(self):
# Enable retries and run test_get_replication_document_required_params.
_service.enable_retries()
self.test_get_replication_document_required_params()
# Disable retries and run test_get_replication_document_required_params.
_service.disable_retries()
self.test_get_replication_document_required_params()
@responses.activate
def test_get_replication_document_value_error(self):
"""
test_get_replication_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "cancel": true, "checkpoint_interval": 0, "connection_timeout": 0, "continuous": false, "create_target": false, "create_target_params": {"n": 1, "partitioned": false, "q": 1}, "doc_ids": ["doc_ids"], "filter": "filter", "http_connections": 1, "query_params": {"mapKey": "inner"}, "retries_per_request": 0, "selector": {"mapKey": "anyValue"}, "since_seq": "since_seq", "socket_options": "socket_options", "source": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "source_proxy": "source_proxy", "target": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "target_proxy": "target_proxy", "use_checkpoints": true, "user_ctx": {"db": "db", "name": "name", "roles": ["_reader"]}, "worker_batch_size": 1, "worker_processes": 1}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_replication_document(**req_copy)
def test_get_replication_document_value_error_with_retries(self):
# Enable retries and run test_get_replication_document_value_error.
_service.enable_retries()
self.test_get_replication_document_value_error()
# Disable retries and run test_get_replication_document_value_error.
_service.disable_retries()
self.test_get_replication_document_value_error()
class TestPutReplicationDocument():
"""
Test Class for put_replication_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_replication_document_all_params(self):
"""
put_replication_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a ReplicationCreateTargetParameters model
replication_create_target_parameters_model = {}
replication_create_target_parameters_model['n'] = 1
replication_create_target_parameters_model['partitioned'] = False
replication_create_target_parameters_model['q'] = 1
# Construct a dict representation of a ReplicationDatabaseAuthBasic model
replication_database_auth_basic_model = {}
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuthIam model
replication_database_auth_iam_model = {}
replication_database_auth_iam_model['api_key'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuth model
replication_database_auth_model = {}
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
# Construct a dict representation of a ReplicationDatabase model
replication_database_model = {}
replication_database_model['auth'] = replication_database_auth_model
replication_database_model['headers'] = {}
replication_database_model['url'] = 'testString'
# Construct a dict representation of a UserContext model
user_context_model = {}
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a dict representation of a ReplicationDocument model
replication_document_model = {}
replication_document_model['_attachments'] = {}
replication_document_model['_conflicts'] = ['testString']
replication_document_model['_deleted'] = True
replication_document_model['_deleted_conflicts'] = ['testString']
replication_document_model['_id'] = 'testString'
replication_document_model['_local_seq'] = 'testString'
replication_document_model['_rev'] = 'testString'
replication_document_model['_revisions'] = revisions_model
replication_document_model['_revs_info'] = [document_revision_status_model]
replication_document_model['cancel'] = True
replication_document_model['checkpoint_interval'] = 0
replication_document_model['connection_timeout'] = 0
replication_document_model['continuous'] = False
replication_document_model['create_target'] = False
replication_document_model['create_target_params'] = replication_create_target_parameters_model
replication_document_model['doc_ids'] = ['testString']
replication_document_model['filter'] = 'testString'
replication_document_model['http_connections'] = 1
replication_document_model['query_params'] = {}
replication_document_model['retries_per_request'] = 0
replication_document_model['selector'] = {}
replication_document_model['since_seq'] = 'testString'
replication_document_model['socket_options'] = 'testString'
replication_document_model['source'] = replication_database_model
replication_document_model['source_proxy'] = 'testString'
replication_document_model['target'] = replication_database_model
replication_document_model['target_proxy'] = 'testString'
replication_document_model['use_checkpoints'] = True
replication_document_model['user_ctx'] = user_context_model
replication_document_model['worker_batch_size'] = 1
replication_document_model['worker_processes'] = 1
replication_document_model['foo'] = 'testString'
# Set up parameter values
doc_id = 'testString'
replication_document = replication_document_model
if_match = 'testString'
batch = 'ok'
new_edits = False
rev = 'testString'
# Invoke method
response = _service.put_replication_document(
doc_id,
replication_document,
if_match=if_match,
batch=batch,
new_edits=new_edits,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'new_edits={}'.format('true' if new_edits else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == replication_document
def test_put_replication_document_all_params_with_retries(self):
# Enable retries and run test_put_replication_document_all_params.
_service.enable_retries()
self.test_put_replication_document_all_params()
# Disable retries and run test_put_replication_document_all_params.
_service.disable_retries()
self.test_put_replication_document_all_params()
@responses.activate
def test_put_replication_document_required_params(self):
"""
test_put_replication_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a ReplicationCreateTargetParameters model
replication_create_target_parameters_model = {}
replication_create_target_parameters_model['n'] = 1
replication_create_target_parameters_model['partitioned'] = False
replication_create_target_parameters_model['q'] = 1
# Construct a dict representation of a ReplicationDatabaseAuthBasic model
replication_database_auth_basic_model = {}
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuthIam model
replication_database_auth_iam_model = {}
replication_database_auth_iam_model['api_key'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuth model
replication_database_auth_model = {}
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
# Construct a dict representation of a ReplicationDatabase model
replication_database_model = {}
replication_database_model['auth'] = replication_database_auth_model
replication_database_model['headers'] = {}
replication_database_model['url'] = 'testString'
# Construct a dict representation of a UserContext model
user_context_model = {}
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a dict representation of a ReplicationDocument model
replication_document_model = {}
replication_document_model['_attachments'] = {}
replication_document_model['_conflicts'] = ['testString']
replication_document_model['_deleted'] = True
replication_document_model['_deleted_conflicts'] = ['testString']
replication_document_model['_id'] = 'testString'
replication_document_model['_local_seq'] = 'testString'
replication_document_model['_rev'] = 'testString'
replication_document_model['_revisions'] = revisions_model
replication_document_model['_revs_info'] = [document_revision_status_model]
replication_document_model['cancel'] = True
replication_document_model['checkpoint_interval'] = 0
replication_document_model['connection_timeout'] = 0
replication_document_model['continuous'] = False
replication_document_model['create_target'] = False
replication_document_model['create_target_params'] = replication_create_target_parameters_model
replication_document_model['doc_ids'] = ['testString']
replication_document_model['filter'] = 'testString'
replication_document_model['http_connections'] = 1
replication_document_model['query_params'] = {}
replication_document_model['retries_per_request'] = 0
replication_document_model['selector'] = {}
replication_document_model['since_seq'] = 'testString'
replication_document_model['socket_options'] = 'testString'
replication_document_model['source'] = replication_database_model
replication_document_model['source_proxy'] = 'testString'
replication_document_model['target'] = replication_database_model
replication_document_model['target_proxy'] = 'testString'
replication_document_model['use_checkpoints'] = True
replication_document_model['user_ctx'] = user_context_model
replication_document_model['worker_batch_size'] = 1
replication_document_model['worker_processes'] = 1
replication_document_model['foo'] = 'testString'
# Set up parameter values
doc_id = 'testString'
replication_document = replication_document_model
# Invoke method
response = _service.put_replication_document(
doc_id,
replication_document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == replication_document
def test_put_replication_document_required_params_with_retries(self):
# Enable retries and run test_put_replication_document_required_params.
_service.enable_retries()
self.test_put_replication_document_required_params()
# Disable retries and run test_put_replication_document_required_params.
_service.disable_retries()
self.test_put_replication_document_required_params()
@responses.activate
def test_put_replication_document_value_error(self):
"""
test_put_replication_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a ReplicationCreateTargetParameters model
replication_create_target_parameters_model = {}
replication_create_target_parameters_model['n'] = 1
replication_create_target_parameters_model['partitioned'] = False
replication_create_target_parameters_model['q'] = 1
# Construct a dict representation of a ReplicationDatabaseAuthBasic model
replication_database_auth_basic_model = {}
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuthIam model
replication_database_auth_iam_model = {}
replication_database_auth_iam_model['api_key'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuth model
replication_database_auth_model = {}
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
# Construct a dict representation of a ReplicationDatabase model
replication_database_model = {}
replication_database_model['auth'] = replication_database_auth_model
replication_database_model['headers'] = {}
replication_database_model['url'] = 'testString'
# Construct a dict representation of a UserContext model
user_context_model = {}
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a dict representation of a ReplicationDocument model
replication_document_model = {}
replication_document_model['_attachments'] = {}
replication_document_model['_conflicts'] = ['testString']
replication_document_model['_deleted'] = True
replication_document_model['_deleted_conflicts'] = ['testString']
replication_document_model['_id'] = 'testString'
replication_document_model['_local_seq'] = 'testString'
replication_document_model['_rev'] = 'testString'
replication_document_model['_revisions'] = revisions_model
replication_document_model['_revs_info'] = [document_revision_status_model]
replication_document_model['cancel'] = True
replication_document_model['checkpoint_interval'] = 0
replication_document_model['connection_timeout'] = 0
replication_document_model['continuous'] = False
replication_document_model['create_target'] = False
replication_document_model['create_target_params'] = replication_create_target_parameters_model
replication_document_model['doc_ids'] = ['testString']
replication_document_model['filter'] = 'testString'
replication_document_model['http_connections'] = 1
replication_document_model['query_params'] = {}
replication_document_model['retries_per_request'] = 0
replication_document_model['selector'] = {}
replication_document_model['since_seq'] = 'testString'
replication_document_model['socket_options'] = 'testString'
replication_document_model['source'] = replication_database_model
replication_document_model['source_proxy'] = 'testString'
replication_document_model['target'] = replication_database_model
replication_document_model['target_proxy'] = 'testString'
replication_document_model['use_checkpoints'] = True
replication_document_model['user_ctx'] = user_context_model
replication_document_model['worker_batch_size'] = 1
replication_document_model['worker_processes'] = 1
replication_document_model['foo'] = 'testString'
# Set up parameter values
doc_id = 'testString'
replication_document = replication_document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
"replication_document": replication_document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_replication_document(**req_copy)
def test_put_replication_document_value_error_with_retries(self):
# Enable retries and run test_put_replication_document_value_error.
_service.enable_retries()
self.test_put_replication_document_value_error()
# Disable retries and run test_put_replication_document_value_error.
_service.disable_retries()
self.test_put_replication_document_value_error()
class TestGetSchedulerDocs():
"""
Test Class for get_scheduler_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_scheduler_docs_all_params(self):
"""
get_scheduler_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs')
mock_response = '{"total_rows": 0, "docs": [{"database": "database", "doc_id": "doc_id", "error_count": 0, "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "last_updated": "2019-01-01T12:00:00.000Z", "node": "node", "source": "source", "source_proxy": "source_proxy", "start_time": "2019-01-01T12:00:00.000Z", "state": "initializing", "target": "target", "target_proxy": "target_proxy"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
limit = 0
skip = 0
states = ['initializing']
# Invoke method
response = _service.get_scheduler_docs(
limit=limit,
skip=skip,
states=states,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'limit={}'.format(limit) in query_string
assert 'skip={}'.format(skip) in query_string
assert 'states={}'.format(','.join(states)) in query_string
def test_get_scheduler_docs_all_params_with_retries(self):
# Enable retries and run test_get_scheduler_docs_all_params.
_service.enable_retries()
self.test_get_scheduler_docs_all_params()
# Disable retries and run test_get_scheduler_docs_all_params.
_service.disable_retries()
self.test_get_scheduler_docs_all_params()
@responses.activate
def test_get_scheduler_docs_required_params(self):
"""
test_get_scheduler_docs_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs')
mock_response = '{"total_rows": 0, "docs": [{"database": "database", "doc_id": "doc_id", "error_count": 0, "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "last_updated": "2019-01-01T12:00:00.000Z", "node": "node", "source": "source", "source_proxy": "source_proxy", "start_time": "2019-01-01T12:00:00.000Z", "state": "initializing", "target": "target", "target_proxy": "target_proxy"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_scheduler_docs()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_scheduler_docs_required_params_with_retries(self):
# Enable retries and run test_get_scheduler_docs_required_params.
_service.enable_retries()
self.test_get_scheduler_docs_required_params()
# Disable retries and run test_get_scheduler_docs_required_params.
_service.disable_retries()
self.test_get_scheduler_docs_required_params()
class TestGetSchedulerDocument():
"""
Test Class for get_scheduler_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_scheduler_document_all_params(self):
"""
get_scheduler_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs/_replicator/testString')
mock_response = '{"database": "database", "doc_id": "doc_id", "error_count": 0, "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "last_updated": "2019-01-01T12:00:00.000Z", "node": "node", "source": "source", "source_proxy": "source_proxy", "start_time": "2019-01-01T12:00:00.000Z", "state": "initializing", "target": "target", "target_proxy": "target_proxy"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.get_scheduler_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_scheduler_document_all_params_with_retries(self):
# Enable retries and run test_get_scheduler_document_all_params.
_service.enable_retries()
self.test_get_scheduler_document_all_params()
# Disable retries and run test_get_scheduler_document_all_params.
_service.disable_retries()
self.test_get_scheduler_document_all_params()
@responses.activate
def test_get_scheduler_document_value_error(self):
"""
test_get_scheduler_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs/_replicator/testString')
mock_response = '{"database": "database", "doc_id": "doc_id", "error_count": 0, "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "last_updated": "2019-01-01T12:00:00.000Z", "node": "node", "source": "source", "source_proxy": "source_proxy", "start_time": "2019-01-01T12:00:00.000Z", "state": "initializing", "target": "target", "target_proxy": "target_proxy"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_scheduler_document(**req_copy)
def test_get_scheduler_document_value_error_with_retries(self):
# Enable retries and run test_get_scheduler_document_value_error.
_service.enable_retries()
self.test_get_scheduler_document_value_error()
# Disable retries and run test_get_scheduler_document_value_error.
_service.disable_retries()
self.test_get_scheduler_document_value_error()
class TestGetSchedulerJobs():
"""
Test Class for get_scheduler_jobs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_scheduler_jobs_all_params(self):
"""
get_scheduler_jobs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs')
mock_response = '{"total_rows": 0, "jobs": [{"database": "database", "doc_id": "doc_id", "history": [{"reason": "reason", "timestamp": "2019-01-01T12:00:00.000Z", "type": "type"}], "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "node": "node", "pid": "pid", "source": "source", "start_time": "2019-01-01T12:00:00.000Z", "target": "target", "user": "user"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
limit = 0
skip = 0
# Invoke method
response = _service.get_scheduler_jobs(
limit=limit,
skip=skip,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'limit={}'.format(limit) in query_string
assert 'skip={}'.format(skip) in query_string
def test_get_scheduler_jobs_all_params_with_retries(self):
# Enable retries and run test_get_scheduler_jobs_all_params.
_service.enable_retries()
self.test_get_scheduler_jobs_all_params()
# Disable retries and run test_get_scheduler_jobs_all_params.
_service.disable_retries()
self.test_get_scheduler_jobs_all_params()
@responses.activate
def test_get_scheduler_jobs_required_params(self):
"""
test_get_scheduler_jobs_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs')
mock_response = '{"total_rows": 0, "jobs": [{"database": "database", "doc_id": "doc_id", "history": [{"reason": "reason", "timestamp": "2019-01-01T12:00:00.000Z", "type": "type"}], "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "node": "node", "pid": "pid", "source": "source", "start_time": "2019-01-01T12:00:00.000Z", "target": "target", "user": "user"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_scheduler_jobs()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_scheduler_jobs_required_params_with_retries(self):
# Enable retries and run test_get_scheduler_jobs_required_params.
_service.enable_retries()
self.test_get_scheduler_jobs_required_params()
# Disable retries and run test_get_scheduler_jobs_required_params.
_service.disable_retries()
self.test_get_scheduler_jobs_required_params()
class TestGetSchedulerJob():
"""
Test Class for get_scheduler_job
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_scheduler_job_all_params(self):
"""
get_scheduler_job()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs/testString')
mock_response = '{"database": "database", "doc_id": "doc_id", "history": [{"reason": "reason", "timestamp": "2019-01-01T12:00:00.000Z", "type": "type"}], "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "node": "node", "pid": "pid", "source": "source", "start_time": "2019-01-01T12:00:00.000Z", "target": "target", "user": "user"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
job_id = 'testString'
# Invoke method
response = _service.get_scheduler_job(
job_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_scheduler_job_all_params_with_retries(self):
# Enable retries and run test_get_scheduler_job_all_params.
_service.enable_retries()
self.test_get_scheduler_job_all_params()
# Disable retries and run test_get_scheduler_job_all_params.
_service.disable_retries()
self.test_get_scheduler_job_all_params()
@responses.activate
def test_get_scheduler_job_value_error(self):
"""
test_get_scheduler_job_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs/testString')
mock_response = '{"database": "database", "doc_id": "doc_id", "history": [{"reason": "reason", "timestamp": "2019-01-01T12:00:00.000Z", "type": "type"}], "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "node": "node", "pid": "pid", "source": "source", "start_time": "2019-01-01T12:00:00.000Z", "target": "target", "user": "user"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
job_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"job_id": job_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_scheduler_job(**req_copy)
def test_get_scheduler_job_value_error_with_retries(self):
# Enable retries and run test_get_scheduler_job_value_error.
_service.enable_retries()
self.test_get_scheduler_job_value_error()
# Disable retries and run test_get_scheduler_job_value_error.
_service.disable_retries()
self.test_get_scheduler_job_value_error()
# endregion
##############################################################################
# End of Service: Replication
##############################################################################
##############################################################################
# Start of Service: Authentication
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetSessionInformation():
"""
Test Class for get_session_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_session_information_all_params(self):
"""
get_session_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_session')
mock_response = '{"ok": true, "info": {"authenticated": "authenticated", "authentication_db": "authentication_db", "authentication_handlers": ["authentication_handlers"]}, "userCtx": {"db": "db", "name": "name", "roles": ["_reader"]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_session_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_session_information_all_params_with_retries(self):
# Enable retries and run test_get_session_information_all_params.
_service.enable_retries()
self.test_get_session_information_all_params()
# Disable retries and run test_get_session_information_all_params.
_service.disable_retries()
self.test_get_session_information_all_params()
# endregion
##############################################################################
# End of Service: Authentication
##############################################################################
##############################################################################
# Start of Service: Authorization
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetSecurity():
"""
Test Class for get_security
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_security_all_params(self):
"""
get_security()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_security')
mock_response = '{"admins": {"names": ["names"], "roles": ["roles"]}, "members": {"names": ["names"], "roles": ["roles"]}, "cloudant": {"mapKey": ["_reader"]}, "couchdb_auth_only": false}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.get_security(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_security_all_params_with_retries(self):
# Enable retries and run test_get_security_all_params.
_service.enable_retries()
self.test_get_security_all_params()
# Disable retries and run test_get_security_all_params.
_service.disable_retries()
self.test_get_security_all_params()
@responses.activate
def test_get_security_value_error(self):
"""
test_get_security_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_security')
mock_response = '{"admins": {"names": ["names"], "roles": ["roles"]}, "members": {"names": ["names"], "roles": ["roles"]}, "cloudant": {"mapKey": ["_reader"]}, "couchdb_auth_only": false}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_security(**req_copy)
def test_get_security_value_error_with_retries(self):
# Enable retries and run test_get_security_value_error.
_service.enable_retries()
self.test_get_security_value_error()
# Disable retries and run test_get_security_value_error.
_service.disable_retries()
self.test_get_security_value_error()
class TestPutSecurity():
"""
Test Class for put_security
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_security_all_params(self):
"""
put_security()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_security')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a SecurityObject model
security_object_model = {}
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Set up parameter values
db = 'testString'
admins = security_object_model
members = security_object_model
cloudant = {}
couchdb_auth_only = True
# Invoke method
response = _service.put_security(
db,
admins=admins,
members=members,
cloudant=cloudant,
couchdb_auth_only=couchdb_auth_only,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['admins'] == security_object_model
assert req_body['members'] == security_object_model
assert req_body['cloudant'] == {}
assert req_body['couchdb_auth_only'] == True
def test_put_security_all_params_with_retries(self):
# Enable retries and run test_put_security_all_params.
_service.enable_retries()
self.test_put_security_all_params()
# Disable retries and run test_put_security_all_params.
_service.disable_retries()
self.test_put_security_all_params()
@responses.activate
def test_put_security_value_error(self):
"""
test_put_security_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_security')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a SecurityObject model
security_object_model = {}
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Set up parameter values
db = 'testString'
admins = security_object_model
members = security_object_model
cloudant = {}
couchdb_auth_only = True
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_security(**req_copy)
def test_put_security_value_error_with_retries(self):
# Enable retries and run test_put_security_value_error.
_service.enable_retries()
self.test_put_security_value_error()
# Disable retries and run test_put_security_value_error.
_service.disable_retries()
self.test_put_security_value_error()
class TestPostApiKeys():
"""
Test Class for post_api_keys
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_api_keys_all_params(self):
"""
post_api_keys()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/api_keys')
mock_response = '{"ok": true, "key": "key", "password": "password"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Invoke method
response = _service.post_api_keys()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
def test_post_api_keys_all_params_with_retries(self):
# Enable retries and run test_post_api_keys_all_params.
_service.enable_retries()
self.test_post_api_keys_all_params()
# Disable retries and run test_post_api_keys_all_params.
_service.disable_retries()
self.test_post_api_keys_all_params()
class TestPutCloudantSecurityConfiguration():
"""
Test Class for put_cloudant_security_configuration
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_cloudant_security_configuration_all_params(self):
"""
put_cloudant_security_configuration()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/db/testString/_security')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a SecurityObject model
security_object_model = {}
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Set up parameter values
db = 'testString'
cloudant = {}
admins = security_object_model
members = security_object_model
couchdb_auth_only = True
# Invoke method
response = _service.put_cloudant_security_configuration(
db,
cloudant,
admins=admins,
members=members,
couchdb_auth_only=couchdb_auth_only,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['cloudant'] == {}
assert req_body['admins'] == security_object_model
assert req_body['members'] == security_object_model
assert req_body['couchdb_auth_only'] == True
def test_put_cloudant_security_configuration_all_params_with_retries(self):
# Enable retries and run test_put_cloudant_security_configuration_all_params.
_service.enable_retries()
self.test_put_cloudant_security_configuration_all_params()
# Disable retries and run test_put_cloudant_security_configuration_all_params.
_service.disable_retries()
self.test_put_cloudant_security_configuration_all_params()
@responses.activate
def test_put_cloudant_security_configuration_value_error(self):
"""
test_put_cloudant_security_configuration_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/db/testString/_security')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a SecurityObject model
security_object_model = {}
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Set up parameter values
db = 'testString'
cloudant = {}
admins = security_object_model
members = security_object_model
couchdb_auth_only = True
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"cloudant": cloudant,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_cloudant_security_configuration(**req_copy)
def test_put_cloudant_security_configuration_value_error_with_retries(self):
# Enable retries and run test_put_cloudant_security_configuration_value_error.
_service.enable_retries()
self.test_put_cloudant_security_configuration_value_error()
# Disable retries and run test_put_cloudant_security_configuration_value_error.
_service.disable_retries()
self.test_put_cloudant_security_configuration_value_error()
# endregion
##############################################################################
# End of Service: Authorization
##############################################################################
##############################################################################
# Start of Service: CORS
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetCorsInformation():
"""
Test Class for get_cors_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_cors_information_all_params(self):
"""
get_cors_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/config/cors')
mock_response = '{"allow_credentials": true, "enable_cors": true, "origins": ["origins"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_cors_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_cors_information_all_params_with_retries(self):
# Enable retries and run test_get_cors_information_all_params.
_service.enable_retries()
self.test_get_cors_information_all_params()
# Disable retries and run test_get_cors_information_all_params.
_service.disable_retries()
self.test_get_cors_information_all_params()
class TestPutCorsConfiguration():
"""
Test Class for put_cors_configuration
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_cors_configuration_all_params(self):
"""
put_cors_configuration()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/config/cors')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
origins = ['testString']
allow_credentials = True
enable_cors = True
# Invoke method
response = _service.put_cors_configuration(
origins,
allow_credentials=allow_credentials,
enable_cors=enable_cors,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['origins'] == ['testString']
assert req_body['allow_credentials'] == True
assert req_body['enable_cors'] == True
def test_put_cors_configuration_all_params_with_retries(self):
# Enable retries and run test_put_cors_configuration_all_params.
_service.enable_retries()
self.test_put_cors_configuration_all_params()
# Disable retries and run test_put_cors_configuration_all_params.
_service.disable_retries()
self.test_put_cors_configuration_all_params()
@responses.activate
def test_put_cors_configuration_value_error(self):
"""
test_put_cors_configuration_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/config/cors')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
origins = ['testString']
allow_credentials = True
enable_cors = True
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"origins": origins,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_cors_configuration(**req_copy)
def test_put_cors_configuration_value_error_with_retries(self):
# Enable retries and run test_put_cors_configuration_value_error.
_service.enable_retries()
self.test_put_cors_configuration_value_error()
# Disable retries and run test_put_cors_configuration_value_error.
_service.disable_retries()
self.test_put_cors_configuration_value_error()
# endregion
##############################################################################
# End of Service: CORS
##############################################################################
##############################################################################
# Start of Service: Attachments
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadAttachment():
"""
Test Class for head_attachment
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_attachment_all_params(self):
"""
head_attachment()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
if_match = 'testString'
if_none_match = 'testString'
rev = 'testString'
# Invoke method
response = _service.head_attachment(
db,
doc_id,
attachment_name,
if_match=if_match,
if_none_match=if_none_match,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'rev={}'.format(rev) in query_string
def test_head_attachment_all_params_with_retries(self):
# Enable retries and run test_head_attachment_all_params.
_service.enable_retries()
self.test_head_attachment_all_params()
# Disable retries and run test_head_attachment_all_params.
_service.disable_retries()
self.test_head_attachment_all_params()
@responses.activate
def test_head_attachment_required_params(self):
"""
test_head_attachment_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Invoke method
response = _service.head_attachment(
db,
doc_id,
attachment_name,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_attachment_required_params_with_retries(self):
# Enable retries and run test_head_attachment_required_params.
_service.enable_retries()
self.test_head_attachment_required_params()
# Disable retries and run test_head_attachment_required_params.
_service.disable_retries()
self.test_head_attachment_required_params()
@responses.activate
def test_head_attachment_value_error(self):
"""
test_head_attachment_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"attachment_name": attachment_name,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_attachment(**req_copy)
def test_head_attachment_value_error_with_retries(self):
# Enable retries and run test_head_attachment_value_error.
_service.enable_retries()
self.test_head_attachment_value_error()
# Disable retries and run test_head_attachment_value_error.
_service.disable_retries()
self.test_head_attachment_value_error()
class TestDeleteAttachment():
"""
Test Class for delete_attachment
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_attachment_all_params(self):
"""
delete_attachment()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
if_match = 'testString'
rev = 'testString'
batch = 'ok'
# Invoke method
response = _service.delete_attachment(
db,
doc_id,
attachment_name,
if_match=if_match,
rev=rev,
batch=batch,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'rev={}'.format(rev) in query_string
assert 'batch={}'.format(batch) in query_string
def test_delete_attachment_all_params_with_retries(self):
# Enable retries and run test_delete_attachment_all_params.
_service.enable_retries()
self.test_delete_attachment_all_params()
# Disable retries and run test_delete_attachment_all_params.
_service.disable_retries()
self.test_delete_attachment_all_params()
@responses.activate
def test_delete_attachment_required_params(self):
"""
test_delete_attachment_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Invoke method
response = _service.delete_attachment(
db,
doc_id,
attachment_name,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
def test_delete_attachment_required_params_with_retries(self):
# Enable retries and run test_delete_attachment_required_params.
_service.enable_retries()
self.test_delete_attachment_required_params()
# Disable retries and run test_delete_attachment_required_params.
_service.disable_retries()
self.test_delete_attachment_required_params()
@responses.activate
def test_delete_attachment_value_error(self):
"""
test_delete_attachment_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"attachment_name": attachment_name,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_attachment(**req_copy)
def test_delete_attachment_value_error_with_retries(self):
# Enable retries and run test_delete_attachment_value_error.
_service.enable_retries()
self.test_delete_attachment_value_error()
# Disable retries and run test_delete_attachment_value_error.
_service.disable_retries()
self.test_delete_attachment_value_error()
class TestGetAttachment():
"""
Test Class for get_attachment
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_attachment_all_params(self):
"""
get_attachment()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='*/*',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
if_match = 'testString'
if_none_match = 'testString'
range = 'testString'
rev = 'testString'
# Invoke method
response = _service.get_attachment(
db,
doc_id,
attachment_name,
if_match=if_match,
if_none_match=if_none_match,
range=range,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'rev={}'.format(rev) in query_string
def test_get_attachment_all_params_with_retries(self):
# Enable retries and run test_get_attachment_all_params.
_service.enable_retries()
self.test_get_attachment_all_params()
# Disable retries and run test_get_attachment_all_params.
_service.disable_retries()
self.test_get_attachment_all_params()
@responses.activate
def test_get_attachment_required_params(self):
"""
test_get_attachment_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='*/*',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Invoke method
response = _service.get_attachment(
db,
doc_id,
attachment_name,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_attachment_required_params_with_retries(self):
# Enable retries and run test_get_attachment_required_params.
_service.enable_retries()
self.test_get_attachment_required_params()
# Disable retries and run test_get_attachment_required_params.
_service.disable_retries()
self.test_get_attachment_required_params()
@responses.activate
def test_get_attachment_value_error(self):
"""
test_get_attachment_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='*/*',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"attachment_name": attachment_name,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_attachment(**req_copy)
def test_get_attachment_value_error_with_retries(self):
# Enable retries and run test_get_attachment_value_error.
_service.enable_retries()
self.test_get_attachment_value_error()
# Disable retries and run test_get_attachment_value_error.
_service.disable_retries()
self.test_get_attachment_value_error()
class TestPutAttachment():
"""
Test Class for put_attachment
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_attachment_all_params(self):
"""
put_attachment()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
attachment = io.BytesIO(b'This is a mock file.').getvalue()
content_type = 'application/octet-stream'
if_match = 'testString'
rev = 'testString'
# Invoke method
response = _service.put_attachment(
db,
doc_id,
attachment_name,
attachment,
content_type,
if_match=if_match,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'rev={}'.format(rev) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_attachment_all_params_with_retries(self):
# Enable retries and run test_put_attachment_all_params.
_service.enable_retries()
self.test_put_attachment_all_params()
# Disable retries and run test_put_attachment_all_params.
_service.disable_retries()
self.test_put_attachment_all_params()
@responses.activate
def test_put_attachment_required_params(self):
"""
test_put_attachment_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
attachment = io.BytesIO(b'This is a mock file.').getvalue()
content_type = 'application/octet-stream'
# Invoke method
response = _service.put_attachment(
db,
doc_id,
attachment_name,
attachment,
content_type,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_attachment_required_params_with_retries(self):
# Enable retries and run test_put_attachment_required_params.
_service.enable_retries()
self.test_put_attachment_required_params()
# Disable retries and run test_put_attachment_required_params.
_service.disable_retries()
self.test_put_attachment_required_params()
@responses.activate
def test_put_attachment_value_error(self):
"""
test_put_attachment_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
attachment = io.BytesIO(b'This is a mock file.').getvalue()
content_type = 'application/octet-stream'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"attachment_name": attachment_name,
"attachment": attachment,
"content_type": content_type,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_attachment(**req_copy)
def test_put_attachment_value_error_with_retries(self):
# Enable retries and run test_put_attachment_value_error.
_service.enable_retries()
self.test_put_attachment_value_error()
# Disable retries and run test_put_attachment_value_error.
_service.disable_retries()
self.test_put_attachment_value_error()
# endregion
##############################################################################
# End of Service: Attachments
##############################################################################
##############################################################################
# Start of Service: LocalDocuments
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadLocalDocument():
"""
Test Class for head_local_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_local_document_all_params(self):
"""
head_local_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
# Invoke method
response = _service.head_local_document(
db,
doc_id,
if_none_match=if_none_match,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_local_document_all_params_with_retries(self):
# Enable retries and run test_head_local_document_all_params.
_service.enable_retries()
self.test_head_local_document_all_params()
# Disable retries and run test_head_local_document_all_params.
_service.disable_retries()
self.test_head_local_document_all_params()
@responses.activate
def test_head_local_document_required_params(self):
"""
test_head_local_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.head_local_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_local_document_required_params_with_retries(self):
# Enable retries and run test_head_local_document_required_params.
_service.enable_retries()
self.test_head_local_document_required_params()
# Disable retries and run test_head_local_document_required_params.
_service.disable_retries()
self.test_head_local_document_required_params()
@responses.activate
def test_head_local_document_value_error(self):
"""
test_head_local_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_local_document(**req_copy)
def test_head_local_document_value_error_with_retries(self):
# Enable retries and run test_head_local_document_value_error.
_service.enable_retries()
self.test_head_local_document_value_error()
# Disable retries and run test_head_local_document_value_error.
_service.disable_retries()
self.test_head_local_document_value_error()
class TestDeleteLocalDocument():
"""
Test Class for delete_local_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_local_document_all_params(self):
"""
delete_local_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
batch = 'ok'
# Invoke method
response = _service.delete_local_document(
db,
doc_id,
batch=batch,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
def test_delete_local_document_all_params_with_retries(self):
# Enable retries and run test_delete_local_document_all_params.
_service.enable_retries()
self.test_delete_local_document_all_params()
# Disable retries and run test_delete_local_document_all_params.
_service.disable_retries()
self.test_delete_local_document_all_params()
@responses.activate
def test_delete_local_document_required_params(self):
"""
test_delete_local_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.delete_local_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_local_document_required_params_with_retries(self):
# Enable retries and run test_delete_local_document_required_params.
_service.enable_retries()
self.test_delete_local_document_required_params()
# Disable retries and run test_delete_local_document_required_params.
_service.disable_retries()
self.test_delete_local_document_required_params()
@responses.activate
def test_delete_local_document_value_error(self):
"""
test_delete_local_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_local_document(**req_copy)
def test_delete_local_document_value_error_with_retries(self):
# Enable retries and run test_delete_local_document_value_error.
_service.enable_retries()
self.test_delete_local_document_value_error()
# Disable retries and run test_delete_local_document_value_error.
_service.disable_retries()
self.test_delete_local_document_value_error()
class TestGetLocalDocument():
"""
Test Class for get_local_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_local_document_all_params(self):
"""
get_local_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
accept = 'application/json'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
local_seq = False
# Invoke method
response = _service.get_local_document(
db,
doc_id,
accept=accept,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
local_seq=local_seq,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
def test_get_local_document_all_params_with_retries(self):
# Enable retries and run test_get_local_document_all_params.
_service.enable_retries()
self.test_get_local_document_all_params()
# Disable retries and run test_get_local_document_all_params.
_service.disable_retries()
self.test_get_local_document_all_params()
@responses.activate
def test_get_local_document_required_params(self):
"""
test_get_local_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_local_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_local_document_required_params_with_retries(self):
# Enable retries and run test_get_local_document_required_params.
_service.enable_retries()
self.test_get_local_document_required_params()
# Disable retries and run test_get_local_document_required_params.
_service.disable_retries()
self.test_get_local_document_required_params()
@responses.activate
def test_get_local_document_value_error(self):
"""
test_get_local_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_local_document(**req_copy)
def test_get_local_document_value_error_with_retries(self):
# Enable retries and run test_get_local_document_value_error.
_service.enable_retries()
self.test_get_local_document_value_error()
# Disable retries and run test_get_local_document_value_error.
_service.disable_retries()
self.test_get_local_document_value_error()
class TestPutLocalDocument():
"""
Test Class for put_local_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_local_document_all_params(self):
"""
put_local_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
content_type = 'application/json'
batch = 'ok'
# Invoke method
response = _service.put_local_document(
db,
doc_id,
document,
content_type=content_type,
batch=batch,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_local_document_all_params_with_retries(self):
# Enable retries and run test_put_local_document_all_params.
_service.enable_retries()
self.test_put_local_document_all_params()
# Disable retries and run test_put_local_document_all_params.
_service.disable_retries()
self.test_put_local_document_all_params()
@responses.activate
def test_put_local_document_required_params(self):
"""
test_put_local_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
# Invoke method
response = _service.put_local_document(
db,
doc_id,
document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_local_document_required_params_with_retries(self):
# Enable retries and run test_put_local_document_required_params.
_service.enable_retries()
self.test_put_local_document_required_params()
# Disable retries and run test_put_local_document_required_params.
_service.disable_retries()
self.test_put_local_document_required_params()
@responses.activate
def test_put_local_document_value_error(self):
"""
test_put_local_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"document": document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_local_document(**req_copy)
def test_put_local_document_value_error_with_retries(self):
# Enable retries and run test_put_local_document_value_error.
_service.enable_retries()
self.test_put_local_document_value_error()
# Disable retries and run test_put_local_document_value_error.
_service.disable_retries()
self.test_put_local_document_value_error()
# endregion
##############################################################################
# End of Service: LocalDocuments
##############################################################################
##############################################################################
# Start of Service: DatabaseDetails
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestPostRevsDiff():
"""
Test Class for post_revs_diff
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_revs_diff_all_params(self):
"""
post_revs_diff()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_revs_diff')
mock_response = '{"mapKey": {"missing": ["missing"], "possible_ancestors": ["possible_ancestors"]}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
document_revisions = {}
# Invoke method
response = _service.post_revs_diff(
db,
document_revisions,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == document_revisions
def test_post_revs_diff_all_params_with_retries(self):
# Enable retries and run test_post_revs_diff_all_params.
_service.enable_retries()
self.test_post_revs_diff_all_params()
# Disable retries and run test_post_revs_diff_all_params.
_service.disable_retries()
self.test_post_revs_diff_all_params()
@responses.activate
def test_post_revs_diff_value_error(self):
"""
test_post_revs_diff_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_revs_diff')
mock_response = '{"mapKey": {"missing": ["missing"], "possible_ancestors": ["possible_ancestors"]}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
document_revisions = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"document_revisions": document_revisions,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_revs_diff(**req_copy)
def test_post_revs_diff_value_error_with_retries(self):
# Enable retries and run test_post_revs_diff_value_error.
_service.enable_retries()
self.test_post_revs_diff_value_error()
# Disable retries and run test_post_revs_diff_value_error.
_service.disable_retries()
self.test_post_revs_diff_value_error()
class TestGetShardsInformation():
"""
Test Class for get_shards_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_shards_information_all_params(self):
"""
get_shards_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_shards')
mock_response = '{"shards": {"mapKey": ["inner"]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.get_shards_information(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_shards_information_all_params_with_retries(self):
# Enable retries and run test_get_shards_information_all_params.
_service.enable_retries()
self.test_get_shards_information_all_params()
# Disable retries and run test_get_shards_information_all_params.
_service.disable_retries()
self.test_get_shards_information_all_params()
@responses.activate
def test_get_shards_information_value_error(self):
"""
test_get_shards_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_shards')
mock_response = '{"shards": {"mapKey": ["inner"]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_shards_information(**req_copy)
def test_get_shards_information_value_error_with_retries(self):
# Enable retries and run test_get_shards_information_value_error.
_service.enable_retries()
self.test_get_shards_information_value_error()
# Disable retries and run test_get_shards_information_value_error.
_service.disable_retries()
self.test_get_shards_information_value_error()
class TestGetDocumentShardsInfo():
"""
Test Class for get_document_shards_info
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_shards_info_all_params(self):
"""
get_document_shards_info()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_shards/testString')
mock_response = '{"nodes": ["nodes"], "range": "range"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document_shards_info(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_document_shards_info_all_params_with_retries(self):
# Enable retries and run test_get_document_shards_info_all_params.
_service.enable_retries()
self.test_get_document_shards_info_all_params()
# Disable retries and run test_get_document_shards_info_all_params.
_service.disable_retries()
self.test_get_document_shards_info_all_params()
@responses.activate
def test_get_document_shards_info_value_error(self):
"""
test_get_document_shards_info_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_shards/testString')
mock_response = '{"nodes": ["nodes"], "range": "range"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document_shards_info(**req_copy)
def test_get_document_shards_info_value_error_with_retries(self):
# Enable retries and run test_get_document_shards_info_value_error.
_service.enable_retries()
self.test_get_document_shards_info_value_error()
# Disable retries and run test_get_document_shards_info_value_error.
_service.disable_retries()
self.test_get_document_shards_info_value_error()
# endregion
##############################################################################
# End of Service: DatabaseDetails
##############################################################################
##############################################################################
# Start of Service: Monitoring
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadUpInformation():
"""
Test Class for head_up_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_up_information_all_params(self):
"""
head_up_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_up')
responses.add(responses.HEAD,
url,
status=200)
# Invoke method
response = _service.head_up_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_up_information_all_params_with_retries(self):
# Enable retries and run test_head_up_information_all_params.
_service.enable_retries()
self.test_head_up_information_all_params()
# Disable retries and run test_head_up_information_all_params.
_service.disable_retries()
self.test_head_up_information_all_params()
class TestGetActiveTasks():
"""
Test Class for get_active_tasks
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_active_tasks_all_params(self):
"""
get_active_tasks()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_active_tasks')
mock_response = '[{"changes_done": 0, "database": "database", "node": "node", "pid": "pid", "progress": 0, "started_on": 0, "status": "status", "task": "task", "total_changes": 0, "type": "type", "updated_on": 0}]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_active_tasks()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_active_tasks_all_params_with_retries(self):
# Enable retries and run test_get_active_tasks_all_params.
_service.enable_retries()
self.test_get_active_tasks_all_params()
# Disable retries and run test_get_active_tasks_all_params.
_service.disable_retries()
self.test_get_active_tasks_all_params()
class TestGetUpInformation():
"""
Test Class for get_up_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_up_information_all_params(self):
"""
get_up_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_up')
mock_response = '{"seeds": {"anyKey": "anyValue"}, "status": "maintenance_mode"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_up_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_up_information_all_params_with_retries(self):
# Enable retries and run test_get_up_information_all_params.
_service.enable_retries()
self.test_get_up_information_all_params()
# Disable retries and run test_get_up_information_all_params.
_service.disable_retries()
self.test_get_up_information_all_params()
class TestGetActivityTrackerEvents():
"""
Test Class for get_activity_tracker_events
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_activity_tracker_events_all_params(self):
"""
get_activity_tracker_events()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/activity_tracker/events')
mock_response = '{"types": ["management"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_activity_tracker_events()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_activity_tracker_events_all_params_with_retries(self):
# Enable retries and run test_get_activity_tracker_events_all_params.
_service.enable_retries()
self.test_get_activity_tracker_events_all_params()
# Disable retries and run test_get_activity_tracker_events_all_params.
_service.disable_retries()
self.test_get_activity_tracker_events_all_params()
class TestPostActivityTrackerEvents():
"""
Test Class for post_activity_tracker_events
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_activity_tracker_events_all_params(self):
"""
post_activity_tracker_events()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/activity_tracker/events')
mock_response = '{"ok": true}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
types = ['management']
# Invoke method
response = _service.post_activity_tracker_events(
types,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['types'] == ['management']
def test_post_activity_tracker_events_all_params_with_retries(self):
# Enable retries and run test_post_activity_tracker_events_all_params.
_service.enable_retries()
self.test_post_activity_tracker_events_all_params()
# Disable retries and run test_post_activity_tracker_events_all_params.
_service.disable_retries()
self.test_post_activity_tracker_events_all_params()
@responses.activate
def test_post_activity_tracker_events_value_error(self):
"""
test_post_activity_tracker_events_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/activity_tracker/events')
mock_response = '{"ok": true}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
types = ['management']
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"types": types,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_activity_tracker_events(**req_copy)
def test_post_activity_tracker_events_value_error_with_retries(self):
# Enable retries and run test_post_activity_tracker_events_value_error.
_service.enable_retries()
self.test_post_activity_tracker_events_value_error()
# Disable retries and run test_post_activity_tracker_events_value_error.
_service.disable_retries()
self.test_post_activity_tracker_events_value_error()
class TestGetCurrentThroughputInformation():
"""
Test Class for get_current_throughput_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_current_throughput_information_all_params(self):
"""
get_current_throughput_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/current/throughput')
mock_response = '{"throughput": {"query": 0, "read": 0, "write": 0}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_current_throughput_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_current_throughput_information_all_params_with_retries(self):
# Enable retries and run test_get_current_throughput_information_all_params.
_service.enable_retries()
self.test_get_current_throughput_information_all_params()
# Disable retries and run test_get_current_throughput_information_all_params.
_service.disable_retries()
self.test_get_current_throughput_information_all_params()
# endregion
##############################################################################
# End of Service: Monitoring
##############################################################################
##############################################################################
# Start of Model Tests
##############################################################################
# region
class TestModel_ActiveTask():
"""
Test Class for ActiveTask
"""
def test_active_task_serialization(self):
"""
Test serialization/deserialization for ActiveTask
"""
# Construct a json representation of a ActiveTask model
active_task_model_json = {}
active_task_model_json['changes_done'] = 0
active_task_model_json['database'] = 'testString'
active_task_model_json['node'] = 'testString'
active_task_model_json['pid'] = 'testString'
active_task_model_json['progress'] = 0
active_task_model_json['started_on'] = 0
active_task_model_json['status'] = 'testString'
active_task_model_json['task'] = 'testString'
active_task_model_json['total_changes'] = 0
active_task_model_json['type'] = 'testString'
active_task_model_json['updated_on'] = 0
# Construct a model instance of ActiveTask by calling from_dict on the json representation
active_task_model = ActiveTask.from_dict(active_task_model_json)
assert active_task_model != False
# Construct a model instance of ActiveTask by calling from_dict on the json representation
active_task_model_dict = ActiveTask.from_dict(active_task_model_json).__dict__
active_task_model2 = ActiveTask(**active_task_model_dict)
# Verify the model instances are equivalent
assert active_task_model == active_task_model2
# Convert model instance back to dict and verify no loss of data
active_task_model_json2 = active_task_model.to_dict()
assert active_task_model_json2 == active_task_model_json
class TestModel_ActivityTrackerEvents():
"""
Test Class for ActivityTrackerEvents
"""
def test_activity_tracker_events_serialization(self):
"""
Test serialization/deserialization for ActivityTrackerEvents
"""
# Construct a json representation of a ActivityTrackerEvents model
activity_tracker_events_model_json = {}
activity_tracker_events_model_json['types'] = ['management']
# Construct a model instance of ActivityTrackerEvents by calling from_dict on the json representation
activity_tracker_events_model = ActivityTrackerEvents.from_dict(activity_tracker_events_model_json)
assert activity_tracker_events_model != False
# Construct a model instance of ActivityTrackerEvents by calling from_dict on the json representation
activity_tracker_events_model_dict = ActivityTrackerEvents.from_dict(activity_tracker_events_model_json).__dict__
activity_tracker_events_model2 = ActivityTrackerEvents(**activity_tracker_events_model_dict)
# Verify the model instances are equivalent
assert activity_tracker_events_model == activity_tracker_events_model2
# Convert model instance back to dict and verify no loss of data
activity_tracker_events_model_json2 = activity_tracker_events_model.to_dict()
assert activity_tracker_events_model_json2 == activity_tracker_events_model_json
class TestModel_AllDocsQueriesResult():
"""
Test Class for AllDocsQueriesResult
"""
def test_all_docs_queries_result_serialization(self):
"""
Test serialization/deserialization for AllDocsQueriesResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
docs_result_row_value_model = {} # DocsResultRowValue
docs_result_row_value_model['rev'] = 'testString'
docs_result_row_model = {} # DocsResultRow
docs_result_row_model['caused_by'] = 'testString'
docs_result_row_model['error'] = 'testString'
docs_result_row_model['reason'] = 'testString'
docs_result_row_model['doc'] = document_model
docs_result_row_model['id'] = 'testString'
docs_result_row_model['key'] = 'testString'
docs_result_row_model['value'] = docs_result_row_value_model
all_docs_result_model = {} # AllDocsResult
all_docs_result_model['total_rows'] = 0
all_docs_result_model['rows'] = [docs_result_row_model]
all_docs_result_model['update_seq'] = 'testString'
# Construct a json representation of a AllDocsQueriesResult model
all_docs_queries_result_model_json = {}
all_docs_queries_result_model_json['results'] = [all_docs_result_model]
# Construct a model instance of AllDocsQueriesResult by calling from_dict on the json representation
all_docs_queries_result_model = AllDocsQueriesResult.from_dict(all_docs_queries_result_model_json)
assert all_docs_queries_result_model != False
# Construct a model instance of AllDocsQueriesResult by calling from_dict on the json representation
all_docs_queries_result_model_dict = AllDocsQueriesResult.from_dict(all_docs_queries_result_model_json).__dict__
all_docs_queries_result_model2 = AllDocsQueriesResult(**all_docs_queries_result_model_dict)
# Verify the model instances are equivalent
assert all_docs_queries_result_model == all_docs_queries_result_model2
# Convert model instance back to dict and verify no loss of data
all_docs_queries_result_model_json2 = all_docs_queries_result_model.to_dict()
assert all_docs_queries_result_model_json2 == all_docs_queries_result_model_json
class TestModel_AllDocsQuery():
"""
Test Class for AllDocsQuery
"""
def test_all_docs_query_serialization(self):
"""
Test serialization/deserialization for AllDocsQuery
"""
# Construct a json representation of a AllDocsQuery model
all_docs_query_model_json = {}
all_docs_query_model_json['att_encoding_info'] = False
all_docs_query_model_json['attachments'] = False
all_docs_query_model_json['conflicts'] = False
all_docs_query_model_json['descending'] = False
all_docs_query_model_json['include_docs'] = False
all_docs_query_model_json['inclusive_end'] = True
all_docs_query_model_json['limit'] = 0
all_docs_query_model_json['skip'] = 0
all_docs_query_model_json['update_seq'] = False
all_docs_query_model_json['endkey'] = 'testString'
all_docs_query_model_json['key'] = 'testString'
all_docs_query_model_json['keys'] = ['testString']
all_docs_query_model_json['startkey'] = 'testString'
# Construct a model instance of AllDocsQuery by calling from_dict on the json representation
all_docs_query_model = AllDocsQuery.from_dict(all_docs_query_model_json)
assert all_docs_query_model != False
# Construct a model instance of AllDocsQuery by calling from_dict on the json representation
all_docs_query_model_dict = AllDocsQuery.from_dict(all_docs_query_model_json).__dict__
all_docs_query_model2 = AllDocsQuery(**all_docs_query_model_dict)
# Verify the model instances are equivalent
assert all_docs_query_model == all_docs_query_model2
# Convert model instance back to dict and verify no loss of data
all_docs_query_model_json2 = all_docs_query_model.to_dict()
assert all_docs_query_model_json2 == all_docs_query_model_json
class TestModel_AllDocsResult():
"""
Test Class for AllDocsResult
"""
def test_all_docs_result_serialization(self):
"""
Test serialization/deserialization for AllDocsResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
docs_result_row_value_model = {} # DocsResultRowValue
docs_result_row_value_model['rev'] = 'testString'
docs_result_row_model = {} # DocsResultRow
docs_result_row_model['caused_by'] = 'testString'
docs_result_row_model['error'] = 'testString'
docs_result_row_model['reason'] = 'testString'
docs_result_row_model['doc'] = document_model
docs_result_row_model['id'] = 'testString'
docs_result_row_model['key'] = 'testString'
docs_result_row_model['value'] = docs_result_row_value_model
# Construct a json representation of a AllDocsResult model
all_docs_result_model_json = {}
all_docs_result_model_json['total_rows'] = 0
all_docs_result_model_json['rows'] = [docs_result_row_model]
all_docs_result_model_json['update_seq'] = 'testString'
# Construct a model instance of AllDocsResult by calling from_dict on the json representation
all_docs_result_model = AllDocsResult.from_dict(all_docs_result_model_json)
assert all_docs_result_model != False
# Construct a model instance of AllDocsResult by calling from_dict on the json representation
all_docs_result_model_dict = AllDocsResult.from_dict(all_docs_result_model_json).__dict__
all_docs_result_model2 = AllDocsResult(**all_docs_result_model_dict)
# Verify the model instances are equivalent
assert all_docs_result_model == all_docs_result_model2
# Convert model instance back to dict and verify no loss of data
all_docs_result_model_json2 = all_docs_result_model.to_dict()
assert all_docs_result_model_json2 == all_docs_result_model_json
class TestModel_Analyzer():
"""
Test Class for Analyzer
"""
def test_analyzer_serialization(self):
"""
Test serialization/deserialization for Analyzer
"""
# Construct a json representation of a Analyzer model
analyzer_model_json = {}
analyzer_model_json['name'] = 'classic'
analyzer_model_json['stopwords'] = ['testString']
# Construct a model instance of Analyzer by calling from_dict on the json representation
analyzer_model = Analyzer.from_dict(analyzer_model_json)
assert analyzer_model != False
# Construct a model instance of Analyzer by calling from_dict on the json representation
analyzer_model_dict = Analyzer.from_dict(analyzer_model_json).__dict__
analyzer_model2 = Analyzer(**analyzer_model_dict)
# Verify the model instances are equivalent
assert analyzer_model == analyzer_model2
# Convert model instance back to dict and verify no loss of data
analyzer_model_json2 = analyzer_model.to_dict()
assert analyzer_model_json2 == analyzer_model_json
class TestModel_AnalyzerConfiguration():
"""
Test Class for AnalyzerConfiguration
"""
def test_analyzer_configuration_serialization(self):
"""
Test serialization/deserialization for AnalyzerConfiguration
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a json representation of a AnalyzerConfiguration model
analyzer_configuration_model_json = {}
analyzer_configuration_model_json['name'] = 'classic'
analyzer_configuration_model_json['stopwords'] = ['testString']
analyzer_configuration_model_json['fields'] = {}
# Construct a model instance of AnalyzerConfiguration by calling from_dict on the json representation
analyzer_configuration_model = AnalyzerConfiguration.from_dict(analyzer_configuration_model_json)
assert analyzer_configuration_model != False
# Construct a model instance of AnalyzerConfiguration by calling from_dict on the json representation
analyzer_configuration_model_dict = AnalyzerConfiguration.from_dict(analyzer_configuration_model_json).__dict__
analyzer_configuration_model2 = AnalyzerConfiguration(**analyzer_configuration_model_dict)
# Verify the model instances are equivalent
assert analyzer_configuration_model == analyzer_configuration_model2
# Convert model instance back to dict and verify no loss of data
analyzer_configuration_model_json2 = analyzer_configuration_model.to_dict()
assert analyzer_configuration_model_json2 == analyzer_configuration_model_json
class TestModel_ApiKeysResult():
"""
Test Class for ApiKeysResult
"""
def test_api_keys_result_serialization(self):
"""
Test serialization/deserialization for ApiKeysResult
"""
# Construct a json representation of a ApiKeysResult model
api_keys_result_model_json = {}
api_keys_result_model_json['ok'] = True
api_keys_result_model_json['key'] = 'testString'
api_keys_result_model_json['password'] = 'testString'
# Construct a model instance of ApiKeysResult by calling from_dict on the json representation
api_keys_result_model = ApiKeysResult.from_dict(api_keys_result_model_json)
assert api_keys_result_model != False
# Construct a model instance of ApiKeysResult by calling from_dict on the json representation
api_keys_result_model_dict = ApiKeysResult.from_dict(api_keys_result_model_json).__dict__
api_keys_result_model2 = ApiKeysResult(**api_keys_result_model_dict)
# Verify the model instances are equivalent
assert api_keys_result_model == api_keys_result_model2
# Convert model instance back to dict and verify no loss of data
api_keys_result_model_json2 = api_keys_result_model.to_dict()
assert api_keys_result_model_json2 == api_keys_result_model_json
class TestModel_Attachment():
"""
Test Class for Attachment
"""
def test_attachment_serialization(self):
"""
Test serialization/deserialization for Attachment
"""
# Construct a json representation of a Attachment model
attachment_model_json = {}
attachment_model_json['content_type'] = 'testString'
attachment_model_json['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model_json['digest'] = 'testString'
attachment_model_json['encoded_length'] = 0
attachment_model_json['encoding'] = 'testString'
attachment_model_json['follows'] = True
attachment_model_json['length'] = 0
attachment_model_json['revpos'] = 1
attachment_model_json['stub'] = True
# Construct a model instance of Attachment by calling from_dict on the json representation
attachment_model = Attachment.from_dict(attachment_model_json)
assert attachment_model != False
# Construct a model instance of Attachment by calling from_dict on the json representation
attachment_model_dict = Attachment.from_dict(attachment_model_json).__dict__
attachment_model2 = Attachment(**attachment_model_dict)
# Verify the model instances are equivalent
assert attachment_model == attachment_model2
# Convert model instance back to dict and verify no loss of data
attachment_model_json2 = attachment_model.to_dict()
assert attachment_model_json2 == attachment_model_json
class TestModel_BulkDocs():
"""
Test Class for BulkDocs
"""
def test_bulk_docs_serialization(self):
"""
Test serialization/deserialization for BulkDocs
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a BulkDocs model
bulk_docs_model_json = {}
bulk_docs_model_json['docs'] = [document_model]
bulk_docs_model_json['new_edits'] = True
# Construct a model instance of BulkDocs by calling from_dict on the json representation
bulk_docs_model = BulkDocs.from_dict(bulk_docs_model_json)
assert bulk_docs_model != False
# Construct a model instance of BulkDocs by calling from_dict on the json representation
bulk_docs_model_dict = BulkDocs.from_dict(bulk_docs_model_json).__dict__
bulk_docs_model2 = BulkDocs(**bulk_docs_model_dict)
# Verify the model instances are equivalent
assert bulk_docs_model == bulk_docs_model2
# Convert model instance back to dict and verify no loss of data
bulk_docs_model_json2 = bulk_docs_model.to_dict()
assert bulk_docs_model_json2 == bulk_docs_model_json
class TestModel_BulkGetQueryDocument():
"""
Test Class for BulkGetQueryDocument
"""
def test_bulk_get_query_document_serialization(self):
"""
Test serialization/deserialization for BulkGetQueryDocument
"""
# Construct a json representation of a BulkGetQueryDocument model
bulk_get_query_document_model_json = {}
bulk_get_query_document_model_json['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model_json['id'] = 'testString'
bulk_get_query_document_model_json['rev'] = 'testString'
# Construct a model instance of BulkGetQueryDocument by calling from_dict on the json representation
bulk_get_query_document_model = BulkGetQueryDocument.from_dict(bulk_get_query_document_model_json)
assert bulk_get_query_document_model != False
# Construct a model instance of BulkGetQueryDocument by calling from_dict on the json representation
bulk_get_query_document_model_dict = BulkGetQueryDocument.from_dict(bulk_get_query_document_model_json).__dict__
bulk_get_query_document_model2 = BulkGetQueryDocument(**bulk_get_query_document_model_dict)
# Verify the model instances are equivalent
assert bulk_get_query_document_model == bulk_get_query_document_model2
# Convert model instance back to dict and verify no loss of data
bulk_get_query_document_model_json2 = bulk_get_query_document_model.to_dict()
assert bulk_get_query_document_model_json2 == bulk_get_query_document_model_json
class TestModel_BulkGetResult():
"""
Test Class for BulkGetResult
"""
def test_bulk_get_result_serialization(self):
"""
Test serialization/deserialization for BulkGetResult
"""
# Construct dict forms of any model objects needed in order to build this model.
document_result_model = {} # DocumentResult
document_result_model['id'] = 'testString'
document_result_model['rev'] = 'testString'
document_result_model['ok'] = True
document_result_model['caused_by'] = 'testString'
document_result_model['error'] = 'testString'
document_result_model['reason'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
bulk_get_result_document_model = {} # BulkGetResultDocument
bulk_get_result_document_model['error'] = document_result_model
bulk_get_result_document_model['ok'] = document_model
bulk_get_result_item_model = {} # BulkGetResultItem
bulk_get_result_item_model['docs'] = [bulk_get_result_document_model]
bulk_get_result_item_model['id'] = 'testString'
# Construct a json representation of a BulkGetResult model
bulk_get_result_model_json = {}
bulk_get_result_model_json['results'] = [bulk_get_result_item_model]
# Construct a model instance of BulkGetResult by calling from_dict on the json representation
bulk_get_result_model = BulkGetResult.from_dict(bulk_get_result_model_json)
assert bulk_get_result_model != False
# Construct a model instance of BulkGetResult by calling from_dict on the json representation
bulk_get_result_model_dict = BulkGetResult.from_dict(bulk_get_result_model_json).__dict__
bulk_get_result_model2 = BulkGetResult(**bulk_get_result_model_dict)
# Verify the model instances are equivalent
assert bulk_get_result_model == bulk_get_result_model2
# Convert model instance back to dict and verify no loss of data
bulk_get_result_model_json2 = bulk_get_result_model.to_dict()
assert bulk_get_result_model_json2 == bulk_get_result_model_json
class TestModel_BulkGetResultDocument():
"""
Test Class for BulkGetResultDocument
"""
def test_bulk_get_result_document_serialization(self):
"""
Test serialization/deserialization for BulkGetResultDocument
"""
# Construct dict forms of any model objects needed in order to build this model.
document_result_model = {} # DocumentResult
document_result_model['id'] = 'testString'
document_result_model['rev'] = 'testString'
document_result_model['ok'] = True
document_result_model['caused_by'] = 'testString'
document_result_model['error'] = 'testString'
document_result_model['reason'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a BulkGetResultDocument model
bulk_get_result_document_model_json = {}
bulk_get_result_document_model_json['error'] = document_result_model
bulk_get_result_document_model_json['ok'] = document_model
# Construct a model instance of BulkGetResultDocument by calling from_dict on the json representation
bulk_get_result_document_model = BulkGetResultDocument.from_dict(bulk_get_result_document_model_json)
assert bulk_get_result_document_model != False
# Construct a model instance of BulkGetResultDocument by calling from_dict on the json representation
bulk_get_result_document_model_dict = BulkGetResultDocument.from_dict(bulk_get_result_document_model_json).__dict__
bulk_get_result_document_model2 = BulkGetResultDocument(**bulk_get_result_document_model_dict)
# Verify the model instances are equivalent
assert bulk_get_result_document_model == bulk_get_result_document_model2
# Convert model instance back to dict and verify no loss of data
bulk_get_result_document_model_json2 = bulk_get_result_document_model.to_dict()
assert bulk_get_result_document_model_json2 == bulk_get_result_document_model_json
class TestModel_BulkGetResultItem():
"""
Test Class for BulkGetResultItem
"""
def test_bulk_get_result_item_serialization(self):
"""
Test serialization/deserialization for BulkGetResultItem
"""
# Construct dict forms of any model objects needed in order to build this model.
document_result_model = {} # DocumentResult
document_result_model['id'] = 'testString'
document_result_model['rev'] = 'testString'
document_result_model['ok'] = True
document_result_model['caused_by'] = 'testString'
document_result_model['error'] = 'testString'
document_result_model['reason'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
bulk_get_result_document_model = {} # BulkGetResultDocument
bulk_get_result_document_model['error'] = document_result_model
bulk_get_result_document_model['ok'] = document_model
# Construct a json representation of a BulkGetResultItem model
bulk_get_result_item_model_json = {}
bulk_get_result_item_model_json['docs'] = [bulk_get_result_document_model]
bulk_get_result_item_model_json['id'] = 'testString'
# Construct a model instance of BulkGetResultItem by calling from_dict on the json representation
bulk_get_result_item_model = BulkGetResultItem.from_dict(bulk_get_result_item_model_json)
assert bulk_get_result_item_model != False
# Construct a model instance of BulkGetResultItem by calling from_dict on the json representation
bulk_get_result_item_model_dict = BulkGetResultItem.from_dict(bulk_get_result_item_model_json).__dict__
bulk_get_result_item_model2 = BulkGetResultItem(**bulk_get_result_item_model_dict)
# Verify the model instances are equivalent
assert bulk_get_result_item_model == bulk_get_result_item_model2
# Convert model instance back to dict and verify no loss of data
bulk_get_result_item_model_json2 = bulk_get_result_item_model.to_dict()
assert bulk_get_result_item_model_json2 == bulk_get_result_item_model_json
class TestModel_CapacityThroughputInformation():
"""
Test Class for CapacityThroughputInformation
"""
def test_capacity_throughput_information_serialization(self):
"""
Test serialization/deserialization for CapacityThroughputInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
throughput_information_model = {} # ThroughputInformation
throughput_information_model['blocks'] = 0
throughput_information_model['query'] = 0
throughput_information_model['read'] = 0
throughput_information_model['write'] = 0
capacity_throughput_information_current_model = {} # CapacityThroughputInformationCurrent
capacity_throughput_information_current_model['throughput'] = throughput_information_model
capacity_throughput_information_target_model = {} # CapacityThroughputInformationTarget
capacity_throughput_information_target_model['throughput'] = throughput_information_model
# Construct a json representation of a CapacityThroughputInformation model
capacity_throughput_information_model_json = {}
capacity_throughput_information_model_json['current'] = capacity_throughput_information_current_model
capacity_throughput_information_model_json['target'] = capacity_throughput_information_target_model
# Construct a model instance of CapacityThroughputInformation by calling from_dict on the json representation
capacity_throughput_information_model = CapacityThroughputInformation.from_dict(capacity_throughput_information_model_json)
assert capacity_throughput_information_model != False
# Construct a model instance of CapacityThroughputInformation by calling from_dict on the json representation
capacity_throughput_information_model_dict = CapacityThroughputInformation.from_dict(capacity_throughput_information_model_json).__dict__
capacity_throughput_information_model2 = CapacityThroughputInformation(**capacity_throughput_information_model_dict)
# Verify the model instances are equivalent
assert capacity_throughput_information_model == capacity_throughput_information_model2
# Convert model instance back to dict and verify no loss of data
capacity_throughput_information_model_json2 = capacity_throughput_information_model.to_dict()
assert capacity_throughput_information_model_json2 == capacity_throughput_information_model_json
class TestModel_CapacityThroughputInformationCurrent():
"""
Test Class for CapacityThroughputInformationCurrent
"""
def test_capacity_throughput_information_current_serialization(self):
"""
Test serialization/deserialization for CapacityThroughputInformationCurrent
"""
# Construct dict forms of any model objects needed in order to build this model.
throughput_information_model = {} # ThroughputInformation
throughput_information_model['blocks'] = 0
throughput_information_model['query'] = 0
throughput_information_model['read'] = 0
throughput_information_model['write'] = 0
# Construct a json representation of a CapacityThroughputInformationCurrent model
capacity_throughput_information_current_model_json = {}
capacity_throughput_information_current_model_json['throughput'] = throughput_information_model
# Construct a model instance of CapacityThroughputInformationCurrent by calling from_dict on the json representation
capacity_throughput_information_current_model = CapacityThroughputInformationCurrent.from_dict(capacity_throughput_information_current_model_json)
assert capacity_throughput_information_current_model != False
# Construct a model instance of CapacityThroughputInformationCurrent by calling from_dict on the json representation
capacity_throughput_information_current_model_dict = CapacityThroughputInformationCurrent.from_dict(capacity_throughput_information_current_model_json).__dict__
capacity_throughput_information_current_model2 = CapacityThroughputInformationCurrent(**capacity_throughput_information_current_model_dict)
# Verify the model instances are equivalent
assert capacity_throughput_information_current_model == capacity_throughput_information_current_model2
# Convert model instance back to dict and verify no loss of data
capacity_throughput_information_current_model_json2 = capacity_throughput_information_current_model.to_dict()
assert capacity_throughput_information_current_model_json2 == capacity_throughput_information_current_model_json
class TestModel_CapacityThroughputInformationTarget():
"""
Test Class for CapacityThroughputInformationTarget
"""
def test_capacity_throughput_information_target_serialization(self):
"""
Test serialization/deserialization for CapacityThroughputInformationTarget
"""
# Construct dict forms of any model objects needed in order to build this model.
throughput_information_model = {} # ThroughputInformation
throughput_information_model['blocks'] = 0
throughput_information_model['query'] = 0
throughput_information_model['read'] = 0
throughput_information_model['write'] = 0
# Construct a json representation of a CapacityThroughputInformationTarget model
capacity_throughput_information_target_model_json = {}
capacity_throughput_information_target_model_json['throughput'] = throughput_information_model
# Construct a model instance of CapacityThroughputInformationTarget by calling from_dict on the json representation
capacity_throughput_information_target_model = CapacityThroughputInformationTarget.from_dict(capacity_throughput_information_target_model_json)
assert capacity_throughput_information_target_model != False
# Construct a model instance of CapacityThroughputInformationTarget by calling from_dict on the json representation
capacity_throughput_information_target_model_dict = CapacityThroughputInformationTarget.from_dict(capacity_throughput_information_target_model_json).__dict__
capacity_throughput_information_target_model2 = CapacityThroughputInformationTarget(**capacity_throughput_information_target_model_dict)
# Verify the model instances are equivalent
assert capacity_throughput_information_target_model == capacity_throughput_information_target_model2
# Convert model instance back to dict and verify no loss of data
capacity_throughput_information_target_model_json2 = capacity_throughput_information_target_model.to_dict()
assert capacity_throughput_information_target_model_json2 == capacity_throughput_information_target_model_json
class TestModel_Change():
"""
Test Class for Change
"""
def test_change_serialization(self):
"""
Test serialization/deserialization for Change
"""
# Construct a json representation of a Change model
change_model_json = {}
change_model_json['rev'] = 'testString'
# Construct a model instance of Change by calling from_dict on the json representation
change_model = Change.from_dict(change_model_json)
assert change_model != False
# Construct a model instance of Change by calling from_dict on the json representation
change_model_dict = Change.from_dict(change_model_json).__dict__
change_model2 = Change(**change_model_dict)
# Verify the model instances are equivalent
assert change_model == change_model2
# Convert model instance back to dict and verify no loss of data
change_model_json2 = change_model.to_dict()
assert change_model_json2 == change_model_json
class TestModel_ChangesResult():
"""
Test Class for ChangesResult
"""
def test_changes_result_serialization(self):
"""
Test serialization/deserialization for ChangesResult
"""
# Construct dict forms of any model objects needed in order to build this model.
change_model = {} # Change
change_model['rev'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
changes_result_item_model = {} # ChangesResultItem
changes_result_item_model['changes'] = [change_model]
changes_result_item_model['deleted'] = True
changes_result_item_model['doc'] = document_model
changes_result_item_model['id'] = 'testString'
changes_result_item_model['seq'] = 'testString'
# Construct a json representation of a ChangesResult model
changes_result_model_json = {}
changes_result_model_json['last_seq'] = 'testString'
changes_result_model_json['pending'] = 26
changes_result_model_json['results'] = [changes_result_item_model]
# Construct a model instance of ChangesResult by calling from_dict on the json representation
changes_result_model = ChangesResult.from_dict(changes_result_model_json)
assert changes_result_model != False
# Construct a model instance of ChangesResult by calling from_dict on the json representation
changes_result_model_dict = ChangesResult.from_dict(changes_result_model_json).__dict__
changes_result_model2 = ChangesResult(**changes_result_model_dict)
# Verify the model instances are equivalent
assert changes_result_model == changes_result_model2
# Convert model instance back to dict and verify no loss of data
changes_result_model_json2 = changes_result_model.to_dict()
assert changes_result_model_json2 == changes_result_model_json
class TestModel_ChangesResultItem():
"""
Test Class for ChangesResultItem
"""
def test_changes_result_item_serialization(self):
"""
Test serialization/deserialization for ChangesResultItem
"""
# Construct dict forms of any model objects needed in order to build this model.
change_model = {} # Change
change_model['rev'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a ChangesResultItem model
changes_result_item_model_json = {}
changes_result_item_model_json['changes'] = [change_model]
changes_result_item_model_json['deleted'] = True
changes_result_item_model_json['doc'] = document_model
changes_result_item_model_json['id'] = 'testString'
changes_result_item_model_json['seq'] = 'testString'
# Construct a model instance of ChangesResultItem by calling from_dict on the json representation
changes_result_item_model = ChangesResultItem.from_dict(changes_result_item_model_json)
assert changes_result_item_model != False
# Construct a model instance of ChangesResultItem by calling from_dict on the json representation
changes_result_item_model_dict = ChangesResultItem.from_dict(changes_result_item_model_json).__dict__
changes_result_item_model2 = ChangesResultItem(**changes_result_item_model_dict)
# Verify the model instances are equivalent
assert changes_result_item_model == changes_result_item_model2
# Convert model instance back to dict and verify no loss of data
changes_result_item_model_json2 = changes_result_item_model.to_dict()
assert changes_result_item_model_json2 == changes_result_item_model_json
class TestModel_ContentInformationSizes():
"""
Test Class for ContentInformationSizes
"""
def test_content_information_sizes_serialization(self):
"""
Test serialization/deserialization for ContentInformationSizes
"""
# Construct a json representation of a ContentInformationSizes model
content_information_sizes_model_json = {}
content_information_sizes_model_json['active'] = 26
content_information_sizes_model_json['external'] = 26
content_information_sizes_model_json['file'] = 26
# Construct a model instance of ContentInformationSizes by calling from_dict on the json representation
content_information_sizes_model = ContentInformationSizes.from_dict(content_information_sizes_model_json)
assert content_information_sizes_model != False
# Construct a model instance of ContentInformationSizes by calling from_dict on the json representation
content_information_sizes_model_dict = ContentInformationSizes.from_dict(content_information_sizes_model_json).__dict__
content_information_sizes_model2 = ContentInformationSizes(**content_information_sizes_model_dict)
# Verify the model instances are equivalent
assert content_information_sizes_model == content_information_sizes_model2
# Convert model instance back to dict and verify no loss of data
content_information_sizes_model_json2 = content_information_sizes_model.to_dict()
assert content_information_sizes_model_json2 == content_information_sizes_model_json
class TestModel_CorsInformation():
"""
Test Class for CorsInformation
"""
def test_cors_information_serialization(self):
"""
Test serialization/deserialization for CorsInformation
"""
# Construct a json representation of a CorsInformation model
cors_information_model_json = {}
cors_information_model_json['allow_credentials'] = True
cors_information_model_json['enable_cors'] = True
cors_information_model_json['origins'] = ['testString']
# Construct a model instance of CorsInformation by calling from_dict on the json representation
cors_information_model = CorsInformation.from_dict(cors_information_model_json)
assert cors_information_model != False
# Construct a model instance of CorsInformation by calling from_dict on the json representation
cors_information_model_dict = CorsInformation.from_dict(cors_information_model_json).__dict__
cors_information_model2 = CorsInformation(**cors_information_model_dict)
# Verify the model instances are equivalent
assert cors_information_model == cors_information_model2
# Convert model instance back to dict and verify no loss of data
cors_information_model_json2 = cors_information_model.to_dict()
assert cors_information_model_json2 == cors_information_model_json
class TestModel_CurrentThroughputInformation():
"""
Test Class for CurrentThroughputInformation
"""
def test_current_throughput_information_serialization(self):
"""
Test serialization/deserialization for CurrentThroughputInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
current_throughput_information_throughput_model = {} # CurrentThroughputInformationThroughput
current_throughput_information_throughput_model['query'] = 0
current_throughput_information_throughput_model['read'] = 0
current_throughput_information_throughput_model['write'] = 0
# Construct a json representation of a CurrentThroughputInformation model
current_throughput_information_model_json = {}
current_throughput_information_model_json['throughput'] = current_throughput_information_throughput_model
# Construct a model instance of CurrentThroughputInformation by calling from_dict on the json representation
current_throughput_information_model = CurrentThroughputInformation.from_dict(current_throughput_information_model_json)
assert current_throughput_information_model != False
# Construct a model instance of CurrentThroughputInformation by calling from_dict on the json representation
current_throughput_information_model_dict = CurrentThroughputInformation.from_dict(current_throughput_information_model_json).__dict__
current_throughput_information_model2 = CurrentThroughputInformation(**current_throughput_information_model_dict)
# Verify the model instances are equivalent
assert current_throughput_information_model == current_throughput_information_model2
# Convert model instance back to dict and verify no loss of data
current_throughput_information_model_json2 = current_throughput_information_model.to_dict()
assert current_throughput_information_model_json2 == current_throughput_information_model_json
class TestModel_CurrentThroughputInformationThroughput():
"""
Test Class for CurrentThroughputInformationThroughput
"""
def test_current_throughput_information_throughput_serialization(self):
"""
Test serialization/deserialization for CurrentThroughputInformationThroughput
"""
# Construct a json representation of a CurrentThroughputInformationThroughput model
current_throughput_information_throughput_model_json = {}
current_throughput_information_throughput_model_json['query'] = 0
current_throughput_information_throughput_model_json['read'] = 0
current_throughput_information_throughput_model_json['write'] = 0
# Construct a model instance of CurrentThroughputInformationThroughput by calling from_dict on the json representation
current_throughput_information_throughput_model = CurrentThroughputInformationThroughput.from_dict(current_throughput_information_throughput_model_json)
assert current_throughput_information_throughput_model != False
# Construct a model instance of CurrentThroughputInformationThroughput by calling from_dict on the json representation
current_throughput_information_throughput_model_dict = CurrentThroughputInformationThroughput.from_dict(current_throughput_information_throughput_model_json).__dict__
current_throughput_information_throughput_model2 = CurrentThroughputInformationThroughput(**current_throughput_information_throughput_model_dict)
# Verify the model instances are equivalent
assert current_throughput_information_throughput_model == current_throughput_information_throughput_model2
# Convert model instance back to dict and verify no loss of data
current_throughput_information_throughput_model_json2 = current_throughput_information_throughput_model.to_dict()
assert current_throughput_information_throughput_model_json2 == current_throughput_information_throughput_model_json
class TestModel_DatabaseInformation():
"""
Test Class for DatabaseInformation
"""
def test_database_information_serialization(self):
"""
Test serialization/deserialization for DatabaseInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
database_information_cluster_model = {} # DatabaseInformationCluster
database_information_cluster_model['n'] = 1
database_information_cluster_model['q'] = 1
database_information_cluster_model['r'] = 1
database_information_cluster_model['w'] = 1
database_information_props_model = {} # DatabaseInformationProps
database_information_props_model['partitioned'] = True
content_information_sizes_model = {} # ContentInformationSizes
content_information_sizes_model['active'] = 26
content_information_sizes_model['external'] = 26
content_information_sizes_model['file'] = 26
# Construct a json representation of a DatabaseInformation model
database_information_model_json = {}
database_information_model_json['cluster'] = database_information_cluster_model
database_information_model_json['committed_update_seq'] = 'testString'
database_information_model_json['compact_running'] = True
database_information_model_json['compacted_seq'] = 'testString'
database_information_model_json['db_name'] = 'testString'
database_information_model_json['disk_format_version'] = 26
database_information_model_json['doc_count'] = 0
database_information_model_json['doc_del_count'] = 0
database_information_model_json['engine'] = 'testString'
database_information_model_json['props'] = database_information_props_model
database_information_model_json['sizes'] = content_information_sizes_model
database_information_model_json['update_seq'] = 'testString'
database_information_model_json['uuid'] = 'testString'
# Construct a model instance of DatabaseInformation by calling from_dict on the json representation
database_information_model = DatabaseInformation.from_dict(database_information_model_json)
assert database_information_model != False
# Construct a model instance of DatabaseInformation by calling from_dict on the json representation
database_information_model_dict = DatabaseInformation.from_dict(database_information_model_json).__dict__
database_information_model2 = DatabaseInformation(**database_information_model_dict)
# Verify the model instances are equivalent
assert database_information_model == database_information_model2
# Convert model instance back to dict and verify no loss of data
database_information_model_json2 = database_information_model.to_dict()
assert database_information_model_json2 == database_information_model_json
class TestModel_DatabaseInformationCluster():
"""
Test Class for DatabaseInformationCluster
"""
def test_database_information_cluster_serialization(self):
"""
Test serialization/deserialization for DatabaseInformationCluster
"""
# Construct a json representation of a DatabaseInformationCluster model
database_information_cluster_model_json = {}
database_information_cluster_model_json['n'] = 1
database_information_cluster_model_json['q'] = 1
database_information_cluster_model_json['r'] = 1
database_information_cluster_model_json['w'] = 1
# Construct a model instance of DatabaseInformationCluster by calling from_dict on the json representation
database_information_cluster_model = DatabaseInformationCluster.from_dict(database_information_cluster_model_json)
assert database_information_cluster_model != False
# Construct a model instance of DatabaseInformationCluster by calling from_dict on the json representation
database_information_cluster_model_dict = DatabaseInformationCluster.from_dict(database_information_cluster_model_json).__dict__
database_information_cluster_model2 = DatabaseInformationCluster(**database_information_cluster_model_dict)
# Verify the model instances are equivalent
assert database_information_cluster_model == database_information_cluster_model2
# Convert model instance back to dict and verify no loss of data
database_information_cluster_model_json2 = database_information_cluster_model.to_dict()
assert database_information_cluster_model_json2 == database_information_cluster_model_json
class TestModel_DatabaseInformationProps():
"""
Test Class for DatabaseInformationProps
"""
def test_database_information_props_serialization(self):
"""
Test serialization/deserialization for DatabaseInformationProps
"""
# Construct a json representation of a DatabaseInformationProps model
database_information_props_model_json = {}
database_information_props_model_json['partitioned'] = True
# Construct a model instance of DatabaseInformationProps by calling from_dict on the json representation
database_information_props_model = DatabaseInformationProps.from_dict(database_information_props_model_json)
assert database_information_props_model != False
# Construct a model instance of DatabaseInformationProps by calling from_dict on the json representation
database_information_props_model_dict = DatabaseInformationProps.from_dict(database_information_props_model_json).__dict__
database_information_props_model2 = DatabaseInformationProps(**database_information_props_model_dict)
# Verify the model instances are equivalent
assert database_information_props_model == database_information_props_model2
# Convert model instance back to dict and verify no loss of data
database_information_props_model_json2 = database_information_props_model.to_dict()
assert database_information_props_model_json2 == database_information_props_model_json
class TestModel_DbEvent():
"""
Test Class for DbEvent
"""
def test_db_event_serialization(self):
"""
Test serialization/deserialization for DbEvent
"""
# Construct a json representation of a DbEvent model
db_event_model_json = {}
db_event_model_json['account'] = 'testString'
db_event_model_json['db_name'] = 'testString'
db_event_model_json['seq'] = 'testString'
db_event_model_json['type'] = 'created'
# Construct a model instance of DbEvent by calling from_dict on the json representation
db_event_model = DbEvent.from_dict(db_event_model_json)
assert db_event_model != False
# Construct a model instance of DbEvent by calling from_dict on the json representation
db_event_model_dict = DbEvent.from_dict(db_event_model_json).__dict__
db_event_model2 = DbEvent(**db_event_model_dict)
# Verify the model instances are equivalent
assert db_event_model == db_event_model2
# Convert model instance back to dict and verify no loss of data
db_event_model_json2 = db_event_model.to_dict()
assert db_event_model_json2 == db_event_model_json
class TestModel_DbUpdates():
"""
Test Class for DbUpdates
"""
def test_db_updates_serialization(self):
"""
Test serialization/deserialization for DbUpdates
"""
# Construct dict forms of any model objects needed in order to build this model.
db_event_model = {} # DbEvent
db_event_model['account'] = 'testString'
db_event_model['db_name'] = 'testString'
db_event_model['seq'] = 'testString'
db_event_model['type'] = 'created'
# Construct a json representation of a DbUpdates model
db_updates_model_json = {}
db_updates_model_json['last_seq'] = 'testString'
db_updates_model_json['results'] = [db_event_model]
# Construct a model instance of DbUpdates by calling from_dict on the json representation
db_updates_model = DbUpdates.from_dict(db_updates_model_json)
assert db_updates_model != False
# Construct a model instance of DbUpdates by calling from_dict on the json representation
db_updates_model_dict = DbUpdates.from_dict(db_updates_model_json).__dict__
db_updates_model2 = DbUpdates(**db_updates_model_dict)
# Verify the model instances are equivalent
assert db_updates_model == db_updates_model2
# Convert model instance back to dict and verify no loss of data
db_updates_model_json2 = db_updates_model.to_dict()
assert db_updates_model_json2 == db_updates_model_json
class TestModel_DbsInfoResult():
"""
Test Class for DbsInfoResult
"""
def test_dbs_info_result_serialization(self):
"""
Test serialization/deserialization for DbsInfoResult
"""
# Construct dict forms of any model objects needed in order to build this model.
database_information_cluster_model = {} # DatabaseInformationCluster
database_information_cluster_model['n'] = 1
database_information_cluster_model['q'] = 1
database_information_cluster_model['r'] = 1
database_information_cluster_model['w'] = 1
database_information_props_model = {} # DatabaseInformationProps
database_information_props_model['partitioned'] = True
content_information_sizes_model = {} # ContentInformationSizes
content_information_sizes_model['active'] = 26
content_information_sizes_model['external'] = 26
content_information_sizes_model['file'] = 26
database_information_model = {} # DatabaseInformation
database_information_model['cluster'] = database_information_cluster_model
database_information_model['committed_update_seq'] = 'testString'
database_information_model['compact_running'] = True
database_information_model['compacted_seq'] = 'testString'
database_information_model['db_name'] = 'testString'
database_information_model['disk_format_version'] = 26
database_information_model['doc_count'] = 0
database_information_model['doc_del_count'] = 0
database_information_model['engine'] = 'testString'
database_information_model['props'] = database_information_props_model
database_information_model['sizes'] = content_information_sizes_model
database_information_model['update_seq'] = 'testString'
database_information_model['uuid'] = 'testString'
# Construct a json representation of a DbsInfoResult model
dbs_info_result_model_json = {}
dbs_info_result_model_json['error'] = 'testString'
dbs_info_result_model_json['info'] = database_information_model
dbs_info_result_model_json['key'] = 'testString'
# Construct a model instance of DbsInfoResult by calling from_dict on the json representation
dbs_info_result_model = DbsInfoResult.from_dict(dbs_info_result_model_json)
assert dbs_info_result_model != False
# Construct a model instance of DbsInfoResult by calling from_dict on the json representation
dbs_info_result_model_dict = DbsInfoResult.from_dict(dbs_info_result_model_json).__dict__
dbs_info_result_model2 = DbsInfoResult(**dbs_info_result_model_dict)
# Verify the model instances are equivalent
assert dbs_info_result_model == dbs_info_result_model2
# Convert model instance back to dict and verify no loss of data
dbs_info_result_model_json2 = dbs_info_result_model.to_dict()
assert dbs_info_result_model_json2 == dbs_info_result_model_json
class TestModel_DesignDocument():
"""
Test Class for DesignDocument
"""
def test_design_document_serialization(self):
"""
Test serialization/deserialization for DesignDocument
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
analyzer_configuration_model = {} # AnalyzerConfiguration
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
search_index_definition_model = {} # SearchIndexDefinition
search_index_definition_model['analyzer'] = analyzer_configuration_model
search_index_definition_model['index'] = 'testString'
design_document_options_model = {} # DesignDocumentOptions
design_document_options_model['partitioned'] = True
design_document_views_map_reduce_model = {} # DesignDocumentViewsMapReduce
design_document_views_map_reduce_model['map'] = 'testString'
design_document_views_map_reduce_model['reduce'] = 'testString'
geo_index_definition_model = {} # GeoIndexDefinition
geo_index_definition_model['index'] = 'testString'
# Construct a json representation of a DesignDocument model
design_document_model_json = {}
design_document_model_json['_attachments'] = {}
design_document_model_json['_conflicts'] = ['testString']
design_document_model_json['_deleted'] = True
design_document_model_json['_deleted_conflicts'] = ['testString']
design_document_model_json['_id'] = 'testString'
design_document_model_json['_local_seq'] = 'testString'
design_document_model_json['_rev'] = 'testString'
design_document_model_json['_revisions'] = revisions_model
design_document_model_json['_revs_info'] = [document_revision_status_model]
design_document_model_json['autoupdate'] = True
design_document_model_json['filters'] = {}
design_document_model_json['indexes'] = {}
design_document_model_json['language'] = 'javascript'
design_document_model_json['options'] = design_document_options_model
design_document_model_json['validate_doc_update'] = 'testString'
design_document_model_json['views'] = {}
design_document_model_json['st_indexes'] = {}
design_document_model_json['foo'] = 'testString'
# Construct a model instance of DesignDocument by calling from_dict on the json representation
design_document_model = DesignDocument.from_dict(design_document_model_json)
assert design_document_model != False
# Construct a model instance of DesignDocument by calling from_dict on the json representation
design_document_model_dict = DesignDocument.from_dict(design_document_model_json).__dict__
design_document_model2 = DesignDocument(**design_document_model_dict)
# Verify the model instances are equivalent
assert design_document_model == design_document_model2
# Convert model instance back to dict and verify no loss of data
design_document_model_json2 = design_document_model.to_dict()
assert design_document_model_json2 == design_document_model_json
# Test get_properties and set_properties methods.
design_document_model.set_properties({})
actual_dict = design_document_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'testString'}
design_document_model.set_properties(expected_dict)
actual_dict = design_document_model.get_properties()
assert actual_dict == expected_dict
class TestModel_DesignDocumentInformation():
"""
Test Class for DesignDocumentInformation
"""
def test_design_document_information_serialization(self):
"""
Test serialization/deserialization for DesignDocumentInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
content_information_sizes_model = {} # ContentInformationSizes
content_information_sizes_model['active'] = 26
content_information_sizes_model['external'] = 26
content_information_sizes_model['file'] = 26
design_document_view_index_model = {} # DesignDocumentViewIndex
design_document_view_index_model['compact_running'] = True
design_document_view_index_model['language'] = 'testString'
design_document_view_index_model['signature'] = 'testString'
design_document_view_index_model['sizes'] = content_information_sizes_model
design_document_view_index_model['updater_running'] = True
design_document_view_index_model['waiting_clients'] = 0
design_document_view_index_model['waiting_commit'] = True
# Construct a json representation of a DesignDocumentInformation model
design_document_information_model_json = {}
design_document_information_model_json['name'] = 'testString'
design_document_information_model_json['view_index'] = design_document_view_index_model
# Construct a model instance of DesignDocumentInformation by calling from_dict on the json representation
design_document_information_model = DesignDocumentInformation.from_dict(design_document_information_model_json)
assert design_document_information_model != False
# Construct a model instance of DesignDocumentInformation by calling from_dict on the json representation
design_document_information_model_dict = DesignDocumentInformation.from_dict(design_document_information_model_json).__dict__
design_document_information_model2 = DesignDocumentInformation(**design_document_information_model_dict)
# Verify the model instances are equivalent
assert design_document_information_model == design_document_information_model2
# Convert model instance back to dict and verify no loss of data
design_document_information_model_json2 = design_document_information_model.to_dict()
assert design_document_information_model_json2 == design_document_information_model_json
class TestModel_DesignDocumentOptions():
"""
Test Class for DesignDocumentOptions
"""
def test_design_document_options_serialization(self):
"""
Test serialization/deserialization for DesignDocumentOptions
"""
# Construct a json representation of a DesignDocumentOptions model
design_document_options_model_json = {}
design_document_options_model_json['partitioned'] = True
# Construct a model instance of DesignDocumentOptions by calling from_dict on the json representation
design_document_options_model = DesignDocumentOptions.from_dict(design_document_options_model_json)
assert design_document_options_model != False
# Construct a model instance of DesignDocumentOptions by calling from_dict on the json representation
design_document_options_model_dict = DesignDocumentOptions.from_dict(design_document_options_model_json).__dict__
design_document_options_model2 = DesignDocumentOptions(**design_document_options_model_dict)
# Verify the model instances are equivalent
assert design_document_options_model == design_document_options_model2
# Convert model instance back to dict and verify no loss of data
design_document_options_model_json2 = design_document_options_model.to_dict()
assert design_document_options_model_json2 == design_document_options_model_json
class TestModel_DesignDocumentViewIndex():
"""
Test Class for DesignDocumentViewIndex
"""
def test_design_document_view_index_serialization(self):
"""
Test serialization/deserialization for DesignDocumentViewIndex
"""
# Construct dict forms of any model objects needed in order to build this model.
content_information_sizes_model = {} # ContentInformationSizes
content_information_sizes_model['active'] = 26
content_information_sizes_model['external'] = 26
content_information_sizes_model['file'] = 26
# Construct a json representation of a DesignDocumentViewIndex model
design_document_view_index_model_json = {}
design_document_view_index_model_json['compact_running'] = True
design_document_view_index_model_json['language'] = 'testString'
design_document_view_index_model_json['signature'] = 'testString'
design_document_view_index_model_json['sizes'] = content_information_sizes_model
design_document_view_index_model_json['updater_running'] = True
design_document_view_index_model_json['waiting_clients'] = 0
design_document_view_index_model_json['waiting_commit'] = True
# Construct a model instance of DesignDocumentViewIndex by calling from_dict on the json representation
design_document_view_index_model = DesignDocumentViewIndex.from_dict(design_document_view_index_model_json)
assert design_document_view_index_model != False
# Construct a model instance of DesignDocumentViewIndex by calling from_dict on the json representation
design_document_view_index_model_dict = DesignDocumentViewIndex.from_dict(design_document_view_index_model_json).__dict__
design_document_view_index_model2 = DesignDocumentViewIndex(**design_document_view_index_model_dict)
# Verify the model instances are equivalent
assert design_document_view_index_model == design_document_view_index_model2
# Convert model instance back to dict and verify no loss of data
design_document_view_index_model_json2 = design_document_view_index_model.to_dict()
assert design_document_view_index_model_json2 == design_document_view_index_model_json
class TestModel_DesignDocumentViewsMapReduce():
"""
Test Class for DesignDocumentViewsMapReduce
"""
def test_design_document_views_map_reduce_serialization(self):
"""
Test serialization/deserialization for DesignDocumentViewsMapReduce
"""
# Construct a json representation of a DesignDocumentViewsMapReduce model
design_document_views_map_reduce_model_json = {}
design_document_views_map_reduce_model_json['map'] = 'testString'
design_document_views_map_reduce_model_json['reduce'] = 'testString'
# Construct a model instance of DesignDocumentViewsMapReduce by calling from_dict on the json representation
design_document_views_map_reduce_model = DesignDocumentViewsMapReduce.from_dict(design_document_views_map_reduce_model_json)
assert design_document_views_map_reduce_model != False
# Construct a model instance of DesignDocumentViewsMapReduce by calling from_dict on the json representation
design_document_views_map_reduce_model_dict = DesignDocumentViewsMapReduce.from_dict(design_document_views_map_reduce_model_json).__dict__
design_document_views_map_reduce_model2 = DesignDocumentViewsMapReduce(**design_document_views_map_reduce_model_dict)
# Verify the model instances are equivalent
assert design_document_views_map_reduce_model == design_document_views_map_reduce_model2
# Convert model instance back to dict and verify no loss of data
design_document_views_map_reduce_model_json2 = design_document_views_map_reduce_model.to_dict()
assert design_document_views_map_reduce_model_json2 == design_document_views_map_reduce_model_json
class TestModel_DocsResultRow():
"""
Test Class for DocsResultRow
"""
def test_docs_result_row_serialization(self):
"""
Test serialization/deserialization for DocsResultRow
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
docs_result_row_value_model = {} # DocsResultRowValue
docs_result_row_value_model['rev'] = 'testString'
# Construct a json representation of a DocsResultRow model
docs_result_row_model_json = {}
docs_result_row_model_json['caused_by'] = 'testString'
docs_result_row_model_json['error'] = 'testString'
docs_result_row_model_json['reason'] = 'testString'
docs_result_row_model_json['doc'] = document_model
docs_result_row_model_json['id'] = 'testString'
docs_result_row_model_json['key'] = 'testString'
docs_result_row_model_json['value'] = docs_result_row_value_model
# Construct a model instance of DocsResultRow by calling from_dict on the json representation
docs_result_row_model = DocsResultRow.from_dict(docs_result_row_model_json)
assert docs_result_row_model != False
# Construct a model instance of DocsResultRow by calling from_dict on the json representation
docs_result_row_model_dict = DocsResultRow.from_dict(docs_result_row_model_json).__dict__
docs_result_row_model2 = DocsResultRow(**docs_result_row_model_dict)
# Verify the model instances are equivalent
assert docs_result_row_model == docs_result_row_model2
# Convert model instance back to dict and verify no loss of data
docs_result_row_model_json2 = docs_result_row_model.to_dict()
assert docs_result_row_model_json2 == docs_result_row_model_json
class TestModel_DocsResultRowValue():
"""
Test Class for DocsResultRowValue
"""
def test_docs_result_row_value_serialization(self):
"""
Test serialization/deserialization for DocsResultRowValue
"""
# Construct a json representation of a DocsResultRowValue model
docs_result_row_value_model_json = {}
docs_result_row_value_model_json['rev'] = 'testString'
# Construct a model instance of DocsResultRowValue by calling from_dict on the json representation
docs_result_row_value_model = DocsResultRowValue.from_dict(docs_result_row_value_model_json)
assert docs_result_row_value_model != False
# Construct a model instance of DocsResultRowValue by calling from_dict on the json representation
docs_result_row_value_model_dict = DocsResultRowValue.from_dict(docs_result_row_value_model_json).__dict__
docs_result_row_value_model2 = DocsResultRowValue(**docs_result_row_value_model_dict)
# Verify the model instances are equivalent
assert docs_result_row_value_model == docs_result_row_value_model2
# Convert model instance back to dict and verify no loss of data
docs_result_row_value_model_json2 = docs_result_row_value_model.to_dict()
assert docs_result_row_value_model_json2 == docs_result_row_value_model_json
class TestModel_Document():
"""
Test Class for Document
"""
def test_document_serialization(self):
"""
Test serialization/deserialization for Document
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a json representation of a Document model
document_model_json = {}
document_model_json['_attachments'] = {}
document_model_json['_conflicts'] = ['testString']
document_model_json['_deleted'] = True
document_model_json['_deleted_conflicts'] = ['testString']
document_model_json['_id'] = 'testString'
document_model_json['_local_seq'] = 'testString'
document_model_json['_rev'] = 'testString'
document_model_json['_revisions'] = revisions_model
document_model_json['_revs_info'] = [document_revision_status_model]
document_model_json['foo'] = 'testString'
# Construct a model instance of Document by calling from_dict on the json representation
document_model = Document.from_dict(document_model_json)
assert document_model != False
# Construct a model instance of Document by calling from_dict on the json representation
document_model_dict = Document.from_dict(document_model_json).__dict__
document_model2 = Document(**document_model_dict)
# Verify the model instances are equivalent
assert document_model == document_model2
# Convert model instance back to dict and verify no loss of data
document_model_json2 = document_model.to_dict()
assert document_model_json2 == document_model_json
# Test get_properties and set_properties methods.
document_model.set_properties({})
actual_dict = document_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'testString'}
document_model.set_properties(expected_dict)
actual_dict = document_model.get_properties()
assert actual_dict == expected_dict
class TestModel_DocumentResult():
"""
Test Class for DocumentResult
"""
def test_document_result_serialization(self):
"""
Test serialization/deserialization for DocumentResult
"""
# Construct a json representation of a DocumentResult model
document_result_model_json = {}
document_result_model_json['id'] = 'testString'
document_result_model_json['rev'] = 'testString'
document_result_model_json['ok'] = True
document_result_model_json['caused_by'] = 'testString'
document_result_model_json['error'] = 'testString'
document_result_model_json['reason'] = 'testString'
# Construct a model instance of DocumentResult by calling from_dict on the json representation
document_result_model = DocumentResult.from_dict(document_result_model_json)
assert document_result_model != False
# Construct a model instance of DocumentResult by calling from_dict on the json representation
document_result_model_dict = DocumentResult.from_dict(document_result_model_json).__dict__
document_result_model2 = DocumentResult(**document_result_model_dict)
# Verify the model instances are equivalent
assert document_result_model == document_result_model2
# Convert model instance back to dict and verify no loss of data
document_result_model_json2 = document_result_model.to_dict()
assert document_result_model_json2 == document_result_model_json
class TestModel_DocumentRevisionStatus():
"""
Test Class for DocumentRevisionStatus
"""
def test_document_revision_status_serialization(self):
"""
Test serialization/deserialization for DocumentRevisionStatus
"""
# Construct a json representation of a DocumentRevisionStatus model
document_revision_status_model_json = {}
document_revision_status_model_json['rev'] = 'testString'
document_revision_status_model_json['status'] = 'available'
# Construct a model instance of DocumentRevisionStatus by calling from_dict on the json representation
document_revision_status_model = DocumentRevisionStatus.from_dict(document_revision_status_model_json)
assert document_revision_status_model != False
# Construct a model instance of DocumentRevisionStatus by calling from_dict on the json representation
document_revision_status_model_dict = DocumentRevisionStatus.from_dict(document_revision_status_model_json).__dict__
document_revision_status_model2 = DocumentRevisionStatus(**document_revision_status_model_dict)
# Verify the model instances are equivalent
assert document_revision_status_model == document_revision_status_model2
# Convert model instance back to dict and verify no loss of data
document_revision_status_model_json2 = document_revision_status_model.to_dict()
assert document_revision_status_model_json2 == document_revision_status_model_json
class TestModel_DocumentShardInfo():
"""
Test Class for DocumentShardInfo
"""
def test_document_shard_info_serialization(self):
"""
Test serialization/deserialization for DocumentShardInfo
"""
# Construct a json representation of a DocumentShardInfo model
document_shard_info_model_json = {}
document_shard_info_model_json['nodes'] = ['testString']
document_shard_info_model_json['range'] = 'testString'
# Construct a model instance of DocumentShardInfo by calling from_dict on the json representation
document_shard_info_model = DocumentShardInfo.from_dict(document_shard_info_model_json)
assert document_shard_info_model != False
# Construct a model instance of DocumentShardInfo by calling from_dict on the json representation
document_shard_info_model_dict = DocumentShardInfo.from_dict(document_shard_info_model_json).__dict__
document_shard_info_model2 = DocumentShardInfo(**document_shard_info_model_dict)
# Verify the model instances are equivalent
assert document_shard_info_model == document_shard_info_model2
# Convert model instance back to dict and verify no loss of data
document_shard_info_model_json2 = document_shard_info_model.to_dict()
assert document_shard_info_model_json2 == document_shard_info_model_json
class TestModel_ExecutionStats():
"""
Test Class for ExecutionStats
"""
def test_execution_stats_serialization(self):
"""
Test serialization/deserialization for ExecutionStats
"""
# Construct a json representation of a ExecutionStats model
execution_stats_model_json = {}
execution_stats_model_json['execution_time_ms'] = 72.5
execution_stats_model_json['results_returned'] = 0
execution_stats_model_json['total_docs_examined'] = 0
execution_stats_model_json['total_keys_examined'] = 0
execution_stats_model_json['total_quorum_docs_examined'] = 0
# Construct a model instance of ExecutionStats by calling from_dict on the json representation
execution_stats_model = ExecutionStats.from_dict(execution_stats_model_json)
assert execution_stats_model != False
# Construct a model instance of ExecutionStats by calling from_dict on the json representation
execution_stats_model_dict = ExecutionStats.from_dict(execution_stats_model_json).__dict__
execution_stats_model2 = ExecutionStats(**execution_stats_model_dict)
# Verify the model instances are equivalent
assert execution_stats_model == execution_stats_model2
# Convert model instance back to dict and verify no loss of data
execution_stats_model_json2 = execution_stats_model.to_dict()
assert execution_stats_model_json2 == execution_stats_model_json
class TestModel_ExplainResult():
"""
Test Class for ExplainResult
"""
def test_explain_result_serialization(self):
"""
Test serialization/deserialization for ExplainResult
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
index_text_operator_default_field_model = {} # IndexTextOperatorDefaultField
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
index_field_model = {} # IndexField
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
index_definition_model = {} # IndexDefinition
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
index_information_model = {} # IndexInformation
index_information_model['ddoc'] = 'testString'
index_information_model['def'] = index_definition_model
index_information_model['name'] = 'testString'
index_information_model['type'] = 'json'
explain_result_range_model = {} # ExplainResultRange
explain_result_range_model['end_key'] = ['testString']
explain_result_range_model['start_key'] = ['testString']
# Construct a json representation of a ExplainResult model
explain_result_model_json = {}
explain_result_model_json['dbname'] = 'testString'
explain_result_model_json['fields'] = ['testString']
explain_result_model_json['index'] = index_information_model
explain_result_model_json['limit'] = 0
explain_result_model_json['opts'] = {}
explain_result_model_json['range'] = explain_result_range_model
explain_result_model_json['selector'] = {}
explain_result_model_json['skip'] = 0
# Construct a model instance of ExplainResult by calling from_dict on the json representation
explain_result_model = ExplainResult.from_dict(explain_result_model_json)
assert explain_result_model != False
# Construct a model instance of ExplainResult by calling from_dict on the json representation
explain_result_model_dict = ExplainResult.from_dict(explain_result_model_json).__dict__
explain_result_model2 = ExplainResult(**explain_result_model_dict)
# Verify the model instances are equivalent
assert explain_result_model == explain_result_model2
# Convert model instance back to dict and verify no loss of data
explain_result_model_json2 = explain_result_model.to_dict()
assert explain_result_model_json2 == explain_result_model_json
class TestModel_ExplainResultRange():
"""
Test Class for ExplainResultRange
"""
def test_explain_result_range_serialization(self):
"""
Test serialization/deserialization for ExplainResultRange
"""
# Construct a json representation of a ExplainResultRange model
explain_result_range_model_json = {}
explain_result_range_model_json['end_key'] = ['testString']
explain_result_range_model_json['start_key'] = ['testString']
# Construct a model instance of ExplainResultRange by calling from_dict on the json representation
explain_result_range_model = ExplainResultRange.from_dict(explain_result_range_model_json)
assert explain_result_range_model != False
# Construct a model instance of ExplainResultRange by calling from_dict on the json representation
explain_result_range_model_dict = ExplainResultRange.from_dict(explain_result_range_model_json).__dict__
explain_result_range_model2 = ExplainResultRange(**explain_result_range_model_dict)
# Verify the model instances are equivalent
assert explain_result_range_model == explain_result_range_model2
# Convert model instance back to dict and verify no loss of data
explain_result_range_model_json2 = explain_result_range_model.to_dict()
assert explain_result_range_model_json2 == explain_result_range_model_json
class TestModel_FindResult():
"""
Test Class for FindResult
"""
def test_find_result_serialization(self):
"""
Test serialization/deserialization for FindResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
execution_stats_model = {} # ExecutionStats
execution_stats_model['execution_time_ms'] = 72.5
execution_stats_model['results_returned'] = 0
execution_stats_model['total_docs_examined'] = 0
execution_stats_model['total_keys_examined'] = 0
execution_stats_model['total_quorum_docs_examined'] = 0
# Construct a json representation of a FindResult model
find_result_model_json = {}
find_result_model_json['bookmark'] = 'testString'
find_result_model_json['docs'] = [document_model]
find_result_model_json['execution_stats'] = execution_stats_model
find_result_model_json['warning'] = 'testString'
# Construct a model instance of FindResult by calling from_dict on the json representation
find_result_model = FindResult.from_dict(find_result_model_json)
assert find_result_model != False
# Construct a model instance of FindResult by calling from_dict on the json representation
find_result_model_dict = FindResult.from_dict(find_result_model_json).__dict__
find_result_model2 = FindResult(**find_result_model_dict)
# Verify the model instances are equivalent
assert find_result_model == find_result_model2
# Convert model instance back to dict and verify no loss of data
find_result_model_json2 = find_result_model.to_dict()
assert find_result_model_json2 == find_result_model_json
class TestModel_GeoIndexDefinition():
"""
Test Class for GeoIndexDefinition
"""
def test_geo_index_definition_serialization(self):
"""
Test serialization/deserialization for GeoIndexDefinition
"""
# Construct a json representation of a GeoIndexDefinition model
geo_index_definition_model_json = {}
geo_index_definition_model_json['index'] = 'testString'
# Construct a model instance of GeoIndexDefinition by calling from_dict on the json representation
geo_index_definition_model = GeoIndexDefinition.from_dict(geo_index_definition_model_json)
assert geo_index_definition_model != False
# Construct a model instance of GeoIndexDefinition by calling from_dict on the json representation
geo_index_definition_model_dict = GeoIndexDefinition.from_dict(geo_index_definition_model_json).__dict__
geo_index_definition_model2 = GeoIndexDefinition(**geo_index_definition_model_dict)
# Verify the model instances are equivalent
assert geo_index_definition_model == geo_index_definition_model2
# Convert model instance back to dict and verify no loss of data
geo_index_definition_model_json2 = geo_index_definition_model.to_dict()
assert geo_index_definition_model_json2 == geo_index_definition_model_json
class TestModel_GeoIndexInformation():
"""
Test Class for GeoIndexInformation
"""
def test_geo_index_information_serialization(self):
"""
Test serialization/deserialization for GeoIndexInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
geo_index_stats_model = {} # GeoIndexStats
geo_index_stats_model['data_size'] = 0
geo_index_stats_model['disk_size'] = 0
geo_index_stats_model['doc_count'] = 0
# Construct a json representation of a GeoIndexInformation model
geo_index_information_model_json = {}
geo_index_information_model_json['geo_index'] = geo_index_stats_model
geo_index_information_model_json['name'] = 'testString'
# Construct a model instance of GeoIndexInformation by calling from_dict on the json representation
geo_index_information_model = GeoIndexInformation.from_dict(geo_index_information_model_json)
assert geo_index_information_model != False
# Construct a model instance of GeoIndexInformation by calling from_dict on the json representation
geo_index_information_model_dict = GeoIndexInformation.from_dict(geo_index_information_model_json).__dict__
geo_index_information_model2 = GeoIndexInformation(**geo_index_information_model_dict)
# Verify the model instances are equivalent
assert geo_index_information_model == geo_index_information_model2
# Convert model instance back to dict and verify no loss of data
geo_index_information_model_json2 = geo_index_information_model.to_dict()
assert geo_index_information_model_json2 == geo_index_information_model_json
class TestModel_GeoIndexStats():
"""
Test Class for GeoIndexStats
"""
def test_geo_index_stats_serialization(self):
"""
Test serialization/deserialization for GeoIndexStats
"""
# Construct a json representation of a GeoIndexStats model
geo_index_stats_model_json = {}
geo_index_stats_model_json['data_size'] = 0
geo_index_stats_model_json['disk_size'] = 0
geo_index_stats_model_json['doc_count'] = 0
# Construct a model instance of GeoIndexStats by calling from_dict on the json representation
geo_index_stats_model = GeoIndexStats.from_dict(geo_index_stats_model_json)
assert geo_index_stats_model != False
# Construct a model instance of GeoIndexStats by calling from_dict on the json representation
geo_index_stats_model_dict = GeoIndexStats.from_dict(geo_index_stats_model_json).__dict__
geo_index_stats_model2 = GeoIndexStats(**geo_index_stats_model_dict)
# Verify the model instances are equivalent
assert geo_index_stats_model == geo_index_stats_model2
# Convert model instance back to dict and verify no loss of data
geo_index_stats_model_json2 = geo_index_stats_model.to_dict()
assert geo_index_stats_model_json2 == geo_index_stats_model_json
class TestModel_GeoJsonFeature():
"""
Test Class for GeoJsonFeature
"""
def test_geo_json_feature_serialization(self):
"""
Test serialization/deserialization for GeoJsonFeature
"""
# Construct dict forms of any model objects needed in order to build this model.
geo_json_geometry_object_model = {} # GeoJsonGeometry
geo_json_geometry_object_model['type'] = 'Point'
geo_json_geometry_object_model['coordinates'] = ['testString']
# Construct a json representation of a GeoJsonFeature model
geo_json_feature_model_json = {}
geo_json_feature_model_json['_id'] = 'testString'
geo_json_feature_model_json['_rev'] = 'testString'
geo_json_feature_model_json['bbox'] = [72.5]
geo_json_feature_model_json['geometry'] = geo_json_geometry_object_model
geo_json_feature_model_json['properties'] = {}
geo_json_feature_model_json['type'] = 'Feature'
geo_json_feature_model_json['foo'] = 'testString'
# Construct a model instance of GeoJsonFeature by calling from_dict on the json representation
geo_json_feature_model = GeoJsonFeature.from_dict(geo_json_feature_model_json)
assert geo_json_feature_model != False
# Construct a model instance of GeoJsonFeature by calling from_dict on the json representation
geo_json_feature_model_dict = GeoJsonFeature.from_dict(geo_json_feature_model_json).__dict__
geo_json_feature_model2 = GeoJsonFeature(**geo_json_feature_model_dict)
# Verify the model instances are equivalent
assert geo_json_feature_model == geo_json_feature_model2
# Convert model instance back to dict and verify no loss of data
geo_json_feature_model_json2 = geo_json_feature_model.to_dict()
assert geo_json_feature_model_json2 == geo_json_feature_model_json
# Test get_properties and set_properties methods.
geo_json_feature_model.set_properties({})
actual_dict = geo_json_feature_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'testString'}
geo_json_feature_model.set_properties(expected_dict)
actual_dict = geo_json_feature_model.get_properties()
assert actual_dict == expected_dict
class TestModel_GeoResult():
"""
Test Class for GeoResult
"""
def test_geo_result_serialization(self):
"""
Test serialization/deserialization for GeoResult
"""
# Construct dict forms of any model objects needed in order to build this model.
geo_json_geometry_object_model = {} # GeoJsonGeometry
geo_json_geometry_object_model['type'] = 'Point'
geo_json_geometry_object_model['coordinates'] = ['testString']
geo_json_feature_model = {} # GeoJsonFeature
geo_json_feature_model['_id'] = 'testString'
geo_json_feature_model['_rev'] = 'testString'
geo_json_feature_model['bbox'] = [72.5]
geo_json_feature_model['geometry'] = geo_json_geometry_object_model
geo_json_feature_model['properties'] = {}
geo_json_feature_model['type'] = 'Feature'
geo_json_feature_model['foo'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
geo_json_geometry_model = {} # GeoJsonGeometry
geo_json_geometry_model['type'] = 'Point'
geo_json_geometry_model['coordinates'] = ['testString']
geo_result_row_model = {} # GeoResultRow
geo_result_row_model['doc'] = document_model
geo_result_row_model['geometry'] = geo_json_geometry_model
geo_result_row_model['id'] = 'testString'
geo_result_row_model['rev'] = 'testString'
# Construct a json representation of a GeoResult model
geo_result_model_json = {}
geo_result_model_json['bookmark'] = 'testString'
geo_result_model_json['features'] = [geo_json_feature_model]
geo_result_model_json['rows'] = [geo_result_row_model]
geo_result_model_json['type'] = 'FeatureCollection'
# Construct a model instance of GeoResult by calling from_dict on the json representation
geo_result_model = GeoResult.from_dict(geo_result_model_json)
assert geo_result_model != False
# Construct a model instance of GeoResult by calling from_dict on the json representation
geo_result_model_dict = GeoResult.from_dict(geo_result_model_json).__dict__
geo_result_model2 = GeoResult(**geo_result_model_dict)
# Verify the model instances are equivalent
assert geo_result_model == geo_result_model2
# Convert model instance back to dict and verify no loss of data
geo_result_model_json2 = geo_result_model.to_dict()
assert geo_result_model_json2 == geo_result_model_json
class TestModel_GeoResultRow():
"""
Test Class for GeoResultRow
"""
def test_geo_result_row_serialization(self):
"""
Test serialization/deserialization for GeoResultRow
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
geo_json_geometry_model = {} # GeoJsonGeometry
geo_json_geometry_model['type'] = 'Point'
geo_json_geometry_model['coordinates'] = ['testString']
# Construct a json representation of a GeoResultRow model
geo_result_row_model_json = {}
geo_result_row_model_json['doc'] = document_model
geo_result_row_model_json['geometry'] = geo_json_geometry_model
geo_result_row_model_json['id'] = 'testString'
geo_result_row_model_json['rev'] = 'testString'
# Construct a model instance of GeoResultRow by calling from_dict on the json representation
geo_result_row_model = GeoResultRow.from_dict(geo_result_row_model_json)
assert geo_result_row_model != False
# Construct a model instance of GeoResultRow by calling from_dict on the json representation
geo_result_row_model_dict = GeoResultRow.from_dict(geo_result_row_model_json).__dict__
geo_result_row_model2 = GeoResultRow(**geo_result_row_model_dict)
# Verify the model instances are equivalent
assert geo_result_row_model == geo_result_row_model2
# Convert model instance back to dict and verify no loss of data
geo_result_row_model_json2 = geo_result_row_model.to_dict()
assert geo_result_row_model_json2 == geo_result_row_model_json
class TestModel_IndexDefinition():
"""
Test Class for IndexDefinition
"""
def test_index_definition_serialization(self):
"""
Test serialization/deserialization for IndexDefinition
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
index_text_operator_default_field_model = {} # IndexTextOperatorDefaultField
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
index_field_model = {} # IndexField
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
# Construct a json representation of a IndexDefinition model
index_definition_model_json = {}
index_definition_model_json['default_analyzer'] = analyzer_model
index_definition_model_json['default_field'] = index_text_operator_default_field_model
index_definition_model_json['fields'] = [index_field_model]
index_definition_model_json['index_array_lengths'] = True
index_definition_model_json['partial_filter_selector'] = {}
# Construct a model instance of IndexDefinition by calling from_dict on the json representation
index_definition_model = IndexDefinition.from_dict(index_definition_model_json)
assert index_definition_model != False
# Construct a model instance of IndexDefinition by calling from_dict on the json representation
index_definition_model_dict = IndexDefinition.from_dict(index_definition_model_json).__dict__
index_definition_model2 = IndexDefinition(**index_definition_model_dict)
# Verify the model instances are equivalent
assert index_definition_model == index_definition_model2
# Convert model instance back to dict and verify no loss of data
index_definition_model_json2 = index_definition_model.to_dict()
assert index_definition_model_json2 == index_definition_model_json
class TestModel_IndexField():
"""
Test Class for IndexField
"""
def test_index_field_serialization(self):
"""
Test serialization/deserialization for IndexField
"""
# Construct a json representation of a IndexField model
index_field_model_json = {}
index_field_model_json['name'] = 'testString'
index_field_model_json['type'] = 'boolean'
index_field_model_json['foo'] = 'asc'
# Construct a model instance of IndexField by calling from_dict on the json representation
index_field_model = IndexField.from_dict(index_field_model_json)
assert index_field_model != False
# Construct a model instance of IndexField by calling from_dict on the json representation
index_field_model_dict = IndexField.from_dict(index_field_model_json).__dict__
index_field_model2 = IndexField(**index_field_model_dict)
# Verify the model instances are equivalent
assert index_field_model == index_field_model2
# Convert model instance back to dict and verify no loss of data
index_field_model_json2 = index_field_model.to_dict()
assert index_field_model_json2 == index_field_model_json
# Test get_properties and set_properties methods.
index_field_model.set_properties({})
actual_dict = index_field_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'asc'}
index_field_model.set_properties(expected_dict)
actual_dict = index_field_model.get_properties()
assert actual_dict == expected_dict
class TestModel_IndexInformation():
"""
Test Class for IndexInformation
"""
def test_index_information_serialization(self):
"""
Test serialization/deserialization for IndexInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
index_text_operator_default_field_model = {} # IndexTextOperatorDefaultField
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
index_field_model = {} # IndexField
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
index_definition_model = {} # IndexDefinition
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
# Construct a json representation of a IndexInformation model
index_information_model_json = {}
index_information_model_json['ddoc'] = 'testString'
index_information_model_json['def'] = index_definition_model
index_information_model_json['name'] = 'testString'
index_information_model_json['type'] = 'json'
# Construct a model instance of IndexInformation by calling from_dict on the json representation
index_information_model = IndexInformation.from_dict(index_information_model_json)
assert index_information_model != False
# Construct a model instance of IndexInformation by calling from_dict on the json representation
index_information_model_dict = IndexInformation.from_dict(index_information_model_json).__dict__
index_information_model2 = IndexInformation(**index_information_model_dict)
# Verify the model instances are equivalent
assert index_information_model == index_information_model2
# Convert model instance back to dict and verify no loss of data
index_information_model_json2 = index_information_model.to_dict()
assert index_information_model_json2 == index_information_model_json
class TestModel_IndexResult():
"""
Test Class for IndexResult
"""
def test_index_result_serialization(self):
"""
Test serialization/deserialization for IndexResult
"""
# Construct a json representation of a IndexResult model
index_result_model_json = {}
index_result_model_json['id'] = 'testString'
index_result_model_json['name'] = 'testString'
index_result_model_json['result'] = 'created'
# Construct a model instance of IndexResult by calling from_dict on the json representation
index_result_model = IndexResult.from_dict(index_result_model_json)
assert index_result_model != False
# Construct a model instance of IndexResult by calling from_dict on the json representation
index_result_model_dict = IndexResult.from_dict(index_result_model_json).__dict__
index_result_model2 = IndexResult(**index_result_model_dict)
# Verify the model instances are equivalent
assert index_result_model == index_result_model2
# Convert model instance back to dict and verify no loss of data
index_result_model_json2 = index_result_model.to_dict()
assert index_result_model_json2 == index_result_model_json
class TestModel_IndexTextOperatorDefaultField():
"""
Test Class for IndexTextOperatorDefaultField
"""
def test_index_text_operator_default_field_serialization(self):
"""
Test serialization/deserialization for IndexTextOperatorDefaultField
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a json representation of a IndexTextOperatorDefaultField model
index_text_operator_default_field_model_json = {}
index_text_operator_default_field_model_json['analyzer'] = analyzer_model
index_text_operator_default_field_model_json['enabled'] = True
# Construct a model instance of IndexTextOperatorDefaultField by calling from_dict on the json representation
index_text_operator_default_field_model = IndexTextOperatorDefaultField.from_dict(index_text_operator_default_field_model_json)
assert index_text_operator_default_field_model != False
# Construct a model instance of IndexTextOperatorDefaultField by calling from_dict on the json representation
index_text_operator_default_field_model_dict = IndexTextOperatorDefaultField.from_dict(index_text_operator_default_field_model_json).__dict__
index_text_operator_default_field_model2 = IndexTextOperatorDefaultField(**index_text_operator_default_field_model_dict)
# Verify the model instances are equivalent
assert index_text_operator_default_field_model == index_text_operator_default_field_model2
# Convert model instance back to dict and verify no loss of data
index_text_operator_default_field_model_json2 = index_text_operator_default_field_model.to_dict()
assert index_text_operator_default_field_model_json2 == index_text_operator_default_field_model_json
class TestModel_IndexesInformation():
"""
Test Class for IndexesInformation
"""
def test_indexes_information_serialization(self):
"""
Test serialization/deserialization for IndexesInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
index_text_operator_default_field_model = {} # IndexTextOperatorDefaultField
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
index_field_model = {} # IndexField
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
index_definition_model = {} # IndexDefinition
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
index_information_model = {} # IndexInformation
index_information_model['ddoc'] = 'testString'
index_information_model['def'] = index_definition_model
index_information_model['name'] = 'testString'
index_information_model['type'] = 'json'
# Construct a json representation of a IndexesInformation model
indexes_information_model_json = {}
indexes_information_model_json['total_rows'] = 0
indexes_information_model_json['indexes'] = [index_information_model]
# Construct a model instance of IndexesInformation by calling from_dict on the json representation
indexes_information_model = IndexesInformation.from_dict(indexes_information_model_json)
assert indexes_information_model != False
# Construct a model instance of IndexesInformation by calling from_dict on the json representation
indexes_information_model_dict = IndexesInformation.from_dict(indexes_information_model_json).__dict__
indexes_information_model2 = IndexesInformation(**indexes_information_model_dict)
# Verify the model instances are equivalent
assert indexes_information_model == indexes_information_model2
# Convert model instance back to dict and verify no loss of data
indexes_information_model_json2 = indexes_information_model.to_dict()
assert indexes_information_model_json2 == indexes_information_model_json
class TestModel_MembershipInformation():
"""
Test Class for MembershipInformation
"""
def test_membership_information_serialization(self):
"""
Test serialization/deserialization for MembershipInformation
"""
# Construct a json representation of a MembershipInformation model
membership_information_model_json = {}
membership_information_model_json['all_nodes'] = ['testString']
membership_information_model_json['cluster_nodes'] = ['testString']
# Construct a model instance of MembershipInformation by calling from_dict on the json representation
membership_information_model = MembershipInformation.from_dict(membership_information_model_json)
assert membership_information_model != False
# Construct a model instance of MembershipInformation by calling from_dict on the json representation
membership_information_model_dict = MembershipInformation.from_dict(membership_information_model_json).__dict__
membership_information_model2 = MembershipInformation(**membership_information_model_dict)
# Verify the model instances are equivalent
assert membership_information_model == membership_information_model2
# Convert model instance back to dict and verify no loss of data
membership_information_model_json2 = membership_information_model.to_dict()
assert membership_information_model_json2 == membership_information_model_json
class TestModel_Ok():
"""
Test Class for Ok
"""
def test_ok_serialization(self):
"""
Test serialization/deserialization for Ok
"""
# Construct a json representation of a Ok model
ok_model_json = {}
ok_model_json['ok'] = True
# Construct a model instance of Ok by calling from_dict on the json representation
ok_model = Ok.from_dict(ok_model_json)
assert ok_model != False
# Construct a model instance of Ok by calling from_dict on the json representation
ok_model_dict = Ok.from_dict(ok_model_json).__dict__
ok_model2 = Ok(**ok_model_dict)
# Verify the model instances are equivalent
assert ok_model == ok_model2
# Convert model instance back to dict and verify no loss of data
ok_model_json2 = ok_model.to_dict()
assert ok_model_json2 == ok_model_json
class TestModel_PartitionInformation():
"""
Test Class for PartitionInformation
"""
def test_partition_information_serialization(self):
"""
Test serialization/deserialization for PartitionInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
partition_information_indexes_indexes_model = {} # PartitionInformationIndexesIndexes
partition_information_indexes_indexes_model['search'] = 0
partition_information_indexes_indexes_model['view'] = 0
partition_information_indexes_model = {} # PartitionInformationIndexes
partition_information_indexes_model['count'] = 0
partition_information_indexes_model['indexes'] = partition_information_indexes_indexes_model
partition_information_indexes_model['limit'] = 0
partition_information_sizes_model = {} # PartitionInformationSizes
partition_information_sizes_model['active'] = 0
partition_information_sizes_model['external'] = 0
# Construct a json representation of a PartitionInformation model
partition_information_model_json = {}
partition_information_model_json['db_name'] = 'testString'
partition_information_model_json['doc_count'] = 0
partition_information_model_json['doc_del_count'] = 0
partition_information_model_json['partition'] = 'testString'
partition_information_model_json['partitioned_indexes'] = partition_information_indexes_model
partition_information_model_json['sizes'] = partition_information_sizes_model
# Construct a model instance of PartitionInformation by calling from_dict on the json representation
partition_information_model = PartitionInformation.from_dict(partition_information_model_json)
assert partition_information_model != False
# Construct a model instance of PartitionInformation by calling from_dict on the json representation
partition_information_model_dict = PartitionInformation.from_dict(partition_information_model_json).__dict__
partition_information_model2 = PartitionInformation(**partition_information_model_dict)
# Verify the model instances are equivalent
assert partition_information_model == partition_information_model2
# Convert model instance back to dict and verify no loss of data
partition_information_model_json2 = partition_information_model.to_dict()
assert partition_information_model_json2 == partition_information_model_json
class TestModel_PartitionInformationIndexes():
"""
Test Class for PartitionInformationIndexes
"""
def test_partition_information_indexes_serialization(self):
"""
Test serialization/deserialization for PartitionInformationIndexes
"""
# Construct dict forms of any model objects needed in order to build this model.
partition_information_indexes_indexes_model = {} # PartitionInformationIndexesIndexes
partition_information_indexes_indexes_model['search'] = 0
partition_information_indexes_indexes_model['view'] = 0
# Construct a json representation of a PartitionInformationIndexes model
partition_information_indexes_model_json = {}
partition_information_indexes_model_json['count'] = 0
partition_information_indexes_model_json['indexes'] = partition_information_indexes_indexes_model
partition_information_indexes_model_json['limit'] = 0
# Construct a model instance of PartitionInformationIndexes by calling from_dict on the json representation
partition_information_indexes_model = PartitionInformationIndexes.from_dict(partition_information_indexes_model_json)
assert partition_information_indexes_model != False
# Construct a model instance of PartitionInformationIndexes by calling from_dict on the json representation
partition_information_indexes_model_dict = PartitionInformationIndexes.from_dict(partition_information_indexes_model_json).__dict__
partition_information_indexes_model2 = PartitionInformationIndexes(**partition_information_indexes_model_dict)
# Verify the model instances are equivalent
assert partition_information_indexes_model == partition_information_indexes_model2
# Convert model instance back to dict and verify no loss of data
partition_information_indexes_model_json2 = partition_information_indexes_model.to_dict()
assert partition_information_indexes_model_json2 == partition_information_indexes_model_json
class TestModel_PartitionInformationIndexesIndexes():
"""
Test Class for PartitionInformationIndexesIndexes
"""
def test_partition_information_indexes_indexes_serialization(self):
"""
Test serialization/deserialization for PartitionInformationIndexesIndexes
"""
# Construct a json representation of a PartitionInformationIndexesIndexes model
partition_information_indexes_indexes_model_json = {}
partition_information_indexes_indexes_model_json['search'] = 0
partition_information_indexes_indexes_model_json['view'] = 0
# Construct a model instance of PartitionInformationIndexesIndexes by calling from_dict on the json representation
partition_information_indexes_indexes_model = PartitionInformationIndexesIndexes.from_dict(partition_information_indexes_indexes_model_json)
assert partition_information_indexes_indexes_model != False
# Construct a model instance of PartitionInformationIndexesIndexes by calling from_dict on the json representation
partition_information_indexes_indexes_model_dict = PartitionInformationIndexesIndexes.from_dict(partition_information_indexes_indexes_model_json).__dict__
partition_information_indexes_indexes_model2 = PartitionInformationIndexesIndexes(**partition_information_indexes_indexes_model_dict)
# Verify the model instances are equivalent
assert partition_information_indexes_indexes_model == partition_information_indexes_indexes_model2
# Convert model instance back to dict and verify no loss of data
partition_information_indexes_indexes_model_json2 = partition_information_indexes_indexes_model.to_dict()
assert partition_information_indexes_indexes_model_json2 == partition_information_indexes_indexes_model_json
class TestModel_PartitionInformationSizes():
"""
Test Class for PartitionInformationSizes
"""
def test_partition_information_sizes_serialization(self):
"""
Test serialization/deserialization for PartitionInformationSizes
"""
# Construct a json representation of a PartitionInformationSizes model
partition_information_sizes_model_json = {}
partition_information_sizes_model_json['active'] = 0
partition_information_sizes_model_json['external'] = 0
# Construct a model instance of PartitionInformationSizes by calling from_dict on the json representation
partition_information_sizes_model = PartitionInformationSizes.from_dict(partition_information_sizes_model_json)
assert partition_information_sizes_model != False
# Construct a model instance of PartitionInformationSizes by calling from_dict on the json representation
partition_information_sizes_model_dict = PartitionInformationSizes.from_dict(partition_information_sizes_model_json).__dict__
partition_information_sizes_model2 = PartitionInformationSizes(**partition_information_sizes_model_dict)
# Verify the model instances are equivalent
assert partition_information_sizes_model == partition_information_sizes_model2
# Convert model instance back to dict and verify no loss of data
partition_information_sizes_model_json2 = partition_information_sizes_model.to_dict()
assert partition_information_sizes_model_json2 == partition_information_sizes_model_json
class TestModel_ReplicationCreateTargetParameters():
"""
Test Class for ReplicationCreateTargetParameters
"""
def test_replication_create_target_parameters_serialization(self):
"""
Test serialization/deserialization for ReplicationCreateTargetParameters
"""
# Construct a json representation of a ReplicationCreateTargetParameters model
replication_create_target_parameters_model_json = {}
replication_create_target_parameters_model_json['n'] = 1
replication_create_target_parameters_model_json['partitioned'] = False
replication_create_target_parameters_model_json['q'] = 1
# Construct a model instance of ReplicationCreateTargetParameters by calling from_dict on the json representation
replication_create_target_parameters_model = ReplicationCreateTargetParameters.from_dict(replication_create_target_parameters_model_json)
assert replication_create_target_parameters_model != False
# Construct a model instance of ReplicationCreateTargetParameters by calling from_dict on the json representation
replication_create_target_parameters_model_dict = ReplicationCreateTargetParameters.from_dict(replication_create_target_parameters_model_json).__dict__
replication_create_target_parameters_model2 = ReplicationCreateTargetParameters(**replication_create_target_parameters_model_dict)
# Verify the model instances are equivalent
assert replication_create_target_parameters_model == replication_create_target_parameters_model2
# Convert model instance back to dict and verify no loss of data
replication_create_target_parameters_model_json2 = replication_create_target_parameters_model.to_dict()
assert replication_create_target_parameters_model_json2 == replication_create_target_parameters_model_json
class TestModel_ReplicationDatabase():
"""
Test Class for ReplicationDatabase
"""
def test_replication_database_serialization(self):
"""
Test serialization/deserialization for ReplicationDatabase
"""
# Construct dict forms of any model objects needed in order to build this model.
replication_database_auth_basic_model = {} # ReplicationDatabaseAuthBasic
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
replication_database_auth_iam_model = {} # ReplicationDatabaseAuthIam
replication_database_auth_iam_model['api_key'] = 'testString'
replication_database_auth_model = {} # ReplicationDatabaseAuth
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
# Construct a json representation of a ReplicationDatabase model
replication_database_model_json = {}
replication_database_model_json['auth'] = replication_database_auth_model
replication_database_model_json['headers'] = {}
replication_database_model_json['url'] = 'testString'
# Construct a model instance of ReplicationDatabase by calling from_dict on the json representation
replication_database_model = ReplicationDatabase.from_dict(replication_database_model_json)
assert replication_database_model != False
# Construct a model instance of ReplicationDatabase by calling from_dict on the json representation
replication_database_model_dict = ReplicationDatabase.from_dict(replication_database_model_json).__dict__
replication_database_model2 = ReplicationDatabase(**replication_database_model_dict)
# Verify the model instances are equivalent
assert replication_database_model == replication_database_model2
# Convert model instance back to dict and verify no loss of data
replication_database_model_json2 = replication_database_model.to_dict()
assert replication_database_model_json2 == replication_database_model_json
class TestModel_ReplicationDatabaseAuth():
"""
Test Class for ReplicationDatabaseAuth
"""
def test_replication_database_auth_serialization(self):
"""
Test serialization/deserialization for ReplicationDatabaseAuth
"""
# Construct dict forms of any model objects needed in order to build this model.
replication_database_auth_basic_model = {} # ReplicationDatabaseAuthBasic
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
replication_database_auth_iam_model = {} # ReplicationDatabaseAuthIam
replication_database_auth_iam_model['api_key'] = 'testString'
# Construct a json representation of a ReplicationDatabaseAuth model
replication_database_auth_model_json = {}
replication_database_auth_model_json['basic'] = replication_database_auth_basic_model
replication_database_auth_model_json['iam'] = replication_database_auth_iam_model
# Construct a model instance of ReplicationDatabaseAuth by calling from_dict on the json representation
replication_database_auth_model = ReplicationDatabaseAuth.from_dict(replication_database_auth_model_json)
assert replication_database_auth_model != False
# Construct a model instance of ReplicationDatabaseAuth by calling from_dict on the json representation
replication_database_auth_model_dict = ReplicationDatabaseAuth.from_dict(replication_database_auth_model_json).__dict__
replication_database_auth_model2 = ReplicationDatabaseAuth(**replication_database_auth_model_dict)
# Verify the model instances are equivalent
assert replication_database_auth_model == replication_database_auth_model2
# Convert model instance back to dict and verify no loss of data
replication_database_auth_model_json2 = replication_database_auth_model.to_dict()
assert replication_database_auth_model_json2 == replication_database_auth_model_json
class TestModel_ReplicationDatabaseAuthBasic():
"""
Test Class for ReplicationDatabaseAuthBasic
"""
def test_replication_database_auth_basic_serialization(self):
"""
Test serialization/deserialization for ReplicationDatabaseAuthBasic
"""
# Construct a json representation of a ReplicationDatabaseAuthBasic model
replication_database_auth_basic_model_json = {}
replication_database_auth_basic_model_json['password'] = 'testString'
replication_database_auth_basic_model_json['username'] = 'testString'
# Construct a model instance of ReplicationDatabaseAuthBasic by calling from_dict on the json representation
replication_database_auth_basic_model = ReplicationDatabaseAuthBasic.from_dict(replication_database_auth_basic_model_json)
assert replication_database_auth_basic_model != False
# Construct a model instance of ReplicationDatabaseAuthBasic by calling from_dict on the json representation
replication_database_auth_basic_model_dict = ReplicationDatabaseAuthBasic.from_dict(replication_database_auth_basic_model_json).__dict__
replication_database_auth_basic_model2 = ReplicationDatabaseAuthBasic(**replication_database_auth_basic_model_dict)
# Verify the model instances are equivalent
assert replication_database_auth_basic_model == replication_database_auth_basic_model2
# Convert model instance back to dict and verify no loss of data
replication_database_auth_basic_model_json2 = replication_database_auth_basic_model.to_dict()
assert replication_database_auth_basic_model_json2 == replication_database_auth_basic_model_json
class TestModel_ReplicationDatabaseAuthIam():
"""
Test Class for ReplicationDatabaseAuthIam
"""
def test_replication_database_auth_iam_serialization(self):
"""
Test serialization/deserialization for ReplicationDatabaseAuthIam
"""
# Construct a json representation of a ReplicationDatabaseAuthIam model
replication_database_auth_iam_model_json = {}
replication_database_auth_iam_model_json['api_key'] = 'testString'
# Construct a model instance of ReplicationDatabaseAuthIam by calling from_dict on the json representation
replication_database_auth_iam_model = ReplicationDatabaseAuthIam.from_dict(replication_database_auth_iam_model_json)
assert replication_database_auth_iam_model != False
# Construct a model instance of ReplicationDatabaseAuthIam by calling from_dict on the json representation
replication_database_auth_iam_model_dict = ReplicationDatabaseAuthIam.from_dict(replication_database_auth_iam_model_json).__dict__
replication_database_auth_iam_model2 = ReplicationDatabaseAuthIam(**replication_database_auth_iam_model_dict)
# Verify the model instances are equivalent
assert replication_database_auth_iam_model == replication_database_auth_iam_model2
# Convert model instance back to dict and verify no loss of data
replication_database_auth_iam_model_json2 = replication_database_auth_iam_model.to_dict()
assert replication_database_auth_iam_model_json2 == replication_database_auth_iam_model_json
class TestModel_ReplicationDocument():
"""
Test Class for ReplicationDocument
"""
def test_replication_document_serialization(self):
"""
Test serialization/deserialization for ReplicationDocument
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
replication_create_target_parameters_model = {} # ReplicationCreateTargetParameters
replication_create_target_parameters_model['n'] = 1
replication_create_target_parameters_model['partitioned'] = False
replication_create_target_parameters_model['q'] = 1
replication_database_auth_basic_model = {} # ReplicationDatabaseAuthBasic
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
replication_database_auth_iam_model = {} # ReplicationDatabaseAuthIam
replication_database_auth_iam_model['api_key'] = 'testString'
replication_database_auth_model = {} # ReplicationDatabaseAuth
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
replication_database_model = {} # ReplicationDatabase
replication_database_model['auth'] = replication_database_auth_model
replication_database_model['headers'] = {}
replication_database_model['url'] = 'testString'
user_context_model = {} # UserContext
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a json representation of a ReplicationDocument model
replication_document_model_json = {}
replication_document_model_json['_attachments'] = {}
replication_document_model_json['_conflicts'] = ['testString']
replication_document_model_json['_deleted'] = True
replication_document_model_json['_deleted_conflicts'] = ['testString']
replication_document_model_json['_id'] = 'testString'
replication_document_model_json['_local_seq'] = 'testString'
replication_document_model_json['_rev'] = 'testString'
replication_document_model_json['_revisions'] = revisions_model
replication_document_model_json['_revs_info'] = [document_revision_status_model]
replication_document_model_json['cancel'] = True
replication_document_model_json['checkpoint_interval'] = 0
replication_document_model_json['connection_timeout'] = 0
replication_document_model_json['continuous'] = False
replication_document_model_json['create_target'] = False
replication_document_model_json['create_target_params'] = replication_create_target_parameters_model
replication_document_model_json['doc_ids'] = ['testString']
replication_document_model_json['filter'] = 'testString'
replication_document_model_json['http_connections'] = 1
replication_document_model_json['query_params'] = {}
replication_document_model_json['retries_per_request'] = 0
replication_document_model_json['selector'] = {}
replication_document_model_json['since_seq'] = 'testString'
replication_document_model_json['socket_options'] = 'testString'
replication_document_model_json['source'] = replication_database_model
replication_document_model_json['source_proxy'] = 'testString'
replication_document_model_json['target'] = replication_database_model
replication_document_model_json['target_proxy'] = 'testString'
replication_document_model_json['use_checkpoints'] = True
replication_document_model_json['user_ctx'] = user_context_model
replication_document_model_json['worker_batch_size'] = 1
replication_document_model_json['worker_processes'] = 1
replication_document_model_json['foo'] = 'testString'
# Construct a model instance of ReplicationDocument by calling from_dict on the json representation
replication_document_model = ReplicationDocument.from_dict(replication_document_model_json)
assert replication_document_model != False
# Construct a model instance of ReplicationDocument by calling from_dict on the json representation
replication_document_model_dict = ReplicationDocument.from_dict(replication_document_model_json).__dict__
replication_document_model2 = ReplicationDocument(**replication_document_model_dict)
# Verify the model instances are equivalent
assert replication_document_model == replication_document_model2
# Convert model instance back to dict and verify no loss of data
replication_document_model_json2 = replication_document_model.to_dict()
assert replication_document_model_json2 == replication_document_model_json
# Test get_properties and set_properties methods.
replication_document_model.set_properties({})
actual_dict = replication_document_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'testString'}
replication_document_model.set_properties(expected_dict)
actual_dict = replication_document_model.get_properties()
assert actual_dict == expected_dict
class TestModel_Revisions():
"""
Test Class for Revisions
"""
def test_revisions_serialization(self):
"""
Test serialization/deserialization for Revisions
"""
# Construct a json representation of a Revisions model
revisions_model_json = {}
revisions_model_json['ids'] = ['testString']
revisions_model_json['start'] = 1
# Construct a model instance of Revisions by calling from_dict on the json representation
revisions_model = Revisions.from_dict(revisions_model_json)
assert revisions_model != False
# Construct a model instance of Revisions by calling from_dict on the json representation
revisions_model_dict = Revisions.from_dict(revisions_model_json).__dict__
revisions_model2 = Revisions(**revisions_model_dict)
# Verify the model instances are equivalent
assert revisions_model == revisions_model2
# Convert model instance back to dict and verify no loss of data
revisions_model_json2 = revisions_model.to_dict()
assert revisions_model_json2 == revisions_model_json
class TestModel_RevsDiff():
"""
Test Class for RevsDiff
"""
def test_revs_diff_serialization(self):
"""
Test serialization/deserialization for RevsDiff
"""
# Construct a json representation of a RevsDiff model
revs_diff_model_json = {}
revs_diff_model_json['missing'] = ['testString']
revs_diff_model_json['possible_ancestors'] = ['testString']
# Construct a model instance of RevsDiff by calling from_dict on the json representation
revs_diff_model = RevsDiff.from_dict(revs_diff_model_json)
assert revs_diff_model != False
# Construct a model instance of RevsDiff by calling from_dict on the json representation
revs_diff_model_dict = RevsDiff.from_dict(revs_diff_model_json).__dict__
revs_diff_model2 = RevsDiff(**revs_diff_model_dict)
# Verify the model instances are equivalent
assert revs_diff_model == revs_diff_model2
# Convert model instance back to dict and verify no loss of data
revs_diff_model_json2 = revs_diff_model.to_dict()
assert revs_diff_model_json2 == revs_diff_model_json
class TestModel_SchedulerDocsResult():
"""
Test Class for SchedulerDocsResult
"""
def test_scheduler_docs_result_serialization(self):
"""
Test serialization/deserialization for SchedulerDocsResult
"""
# Construct dict forms of any model objects needed in order to build this model.
scheduler_info_model = {} # SchedulerInfo
scheduler_info_model['changes_pending'] = 0
scheduler_info_model['checkpointed_source_seq'] = 'testString'
scheduler_info_model['doc_write_failures'] = 0
scheduler_info_model['docs_read'] = 0
scheduler_info_model['docs_written'] = 0
scheduler_info_model['error'] = 'testString'
scheduler_info_model['missing_revisions_found'] = 0
scheduler_info_model['revisions_checked'] = 0
scheduler_info_model['source_seq'] = 'testString'
scheduler_info_model['through_seq'] = 'testString'
scheduler_document_model = {} # SchedulerDocument
scheduler_document_model['database'] = 'testString'
scheduler_document_model['doc_id'] = 'testString'
scheduler_document_model['error_count'] = 0
scheduler_document_model['id'] = 'testString'
scheduler_document_model['info'] = scheduler_info_model
scheduler_document_model['last_updated'] = "2019-01-01T12:00:00Z"
scheduler_document_model['node'] = 'testString'
scheduler_document_model['source'] = 'testString'
scheduler_document_model['source_proxy'] = 'testString'
scheduler_document_model['start_time'] = "2019-01-01T12:00:00Z"
scheduler_document_model['state'] = 'initializing'
scheduler_document_model['target'] = 'testString'
scheduler_document_model['target_proxy'] = 'testString'
# Construct a json representation of a SchedulerDocsResult model
scheduler_docs_result_model_json = {}
scheduler_docs_result_model_json['total_rows'] = 0
scheduler_docs_result_model_json['docs'] = [scheduler_document_model]
# Construct a model instance of SchedulerDocsResult by calling from_dict on the json representation
scheduler_docs_result_model = SchedulerDocsResult.from_dict(scheduler_docs_result_model_json)
assert scheduler_docs_result_model != False
# Construct a model instance of SchedulerDocsResult by calling from_dict on the json representation
scheduler_docs_result_model_dict = SchedulerDocsResult.from_dict(scheduler_docs_result_model_json).__dict__
scheduler_docs_result_model2 = SchedulerDocsResult(**scheduler_docs_result_model_dict)
# Verify the model instances are equivalent
assert scheduler_docs_result_model == scheduler_docs_result_model2
# Convert model instance back to dict and verify no loss of data
scheduler_docs_result_model_json2 = scheduler_docs_result_model.to_dict()
assert scheduler_docs_result_model_json2 == scheduler_docs_result_model_json
class TestModel_SchedulerDocument():
"""
Test Class for SchedulerDocument
"""
def test_scheduler_document_serialization(self):
"""
Test serialization/deserialization for SchedulerDocument
"""
# Construct dict forms of any model objects needed in order to build this model.
scheduler_info_model = {} # SchedulerInfo
scheduler_info_model['changes_pending'] = 0
scheduler_info_model['checkpointed_source_seq'] = 'testString'
scheduler_info_model['doc_write_failures'] = 0
scheduler_info_model['docs_read'] = 0
scheduler_info_model['docs_written'] = 0
scheduler_info_model['error'] = 'testString'
scheduler_info_model['missing_revisions_found'] = 0
scheduler_info_model['revisions_checked'] = 0
scheduler_info_model['source_seq'] = 'testString'
scheduler_info_model['through_seq'] = 'testString'
# Construct a json representation of a SchedulerDocument model
scheduler_document_model_json = {}
scheduler_document_model_json['database'] = 'testString'
scheduler_document_model_json['doc_id'] = 'testString'
scheduler_document_model_json['error_count'] = 0
scheduler_document_model_json['id'] = 'testString'
scheduler_document_model_json['info'] = scheduler_info_model
scheduler_document_model_json['last_updated'] = "2019-01-01T12:00:00Z"
scheduler_document_model_json['node'] = 'testString'
scheduler_document_model_json['source'] = 'testString'
scheduler_document_model_json['source_proxy'] = 'testString'
scheduler_document_model_json['start_time'] = "2019-01-01T12:00:00Z"
scheduler_document_model_json['state'] = 'initializing'
scheduler_document_model_json['target'] = 'testString'
scheduler_document_model_json['target_proxy'] = 'testString'
# Construct a model instance of SchedulerDocument by calling from_dict on the json representation
scheduler_document_model = SchedulerDocument.from_dict(scheduler_document_model_json)
assert scheduler_document_model != False
# Construct a model instance of SchedulerDocument by calling from_dict on the json representation
scheduler_document_model_dict = SchedulerDocument.from_dict(scheduler_document_model_json).__dict__
scheduler_document_model2 = SchedulerDocument(**scheduler_document_model_dict)
# Verify the model instances are equivalent
assert scheduler_document_model == scheduler_document_model2
# Convert model instance back to dict and verify no loss of data
scheduler_document_model_json2 = scheduler_document_model.to_dict()
assert scheduler_document_model_json2 == scheduler_document_model_json
class TestModel_SchedulerInfo():
"""
Test Class for SchedulerInfo
"""
def test_scheduler_info_serialization(self):
"""
Test serialization/deserialization for SchedulerInfo
"""
# Construct a json representation of a SchedulerInfo model
scheduler_info_model_json = {}
scheduler_info_model_json['changes_pending'] = 0
scheduler_info_model_json['checkpointed_source_seq'] = 'testString'
scheduler_info_model_json['doc_write_failures'] = 0
scheduler_info_model_json['docs_read'] = 0
scheduler_info_model_json['docs_written'] = 0
scheduler_info_model_json['error'] = 'testString'
scheduler_info_model_json['missing_revisions_found'] = 0
scheduler_info_model_json['revisions_checked'] = 0
scheduler_info_model_json['source_seq'] = 'testString'
scheduler_info_model_json['through_seq'] = 'testString'
# Construct a model instance of SchedulerInfo by calling from_dict on the json representation
scheduler_info_model = SchedulerInfo.from_dict(scheduler_info_model_json)
assert scheduler_info_model != False
# Construct a model instance of SchedulerInfo by calling from_dict on the json representation
scheduler_info_model_dict = SchedulerInfo.from_dict(scheduler_info_model_json).__dict__
scheduler_info_model2 = SchedulerInfo(**scheduler_info_model_dict)
# Verify the model instances are equivalent
assert scheduler_info_model == scheduler_info_model2
# Convert model instance back to dict and verify no loss of data
scheduler_info_model_json2 = scheduler_info_model.to_dict()
assert scheduler_info_model_json2 == scheduler_info_model_json
class TestModel_SchedulerJob():
"""
Test Class for SchedulerJob
"""
def test_scheduler_job_serialization(self):
"""
Test serialization/deserialization for SchedulerJob
"""
# Construct dict forms of any model objects needed in order to build this model.
scheduler_job_event_model = {} # SchedulerJobEvent
scheduler_job_event_model['reason'] = 'testString'
scheduler_job_event_model['timestamp'] = "2019-01-01T12:00:00Z"
scheduler_job_event_model['type'] = 'testString'
scheduler_info_model = {} # SchedulerInfo
scheduler_info_model['changes_pending'] = 0
scheduler_info_model['checkpointed_source_seq'] = 'testString'
scheduler_info_model['doc_write_failures'] = 0
scheduler_info_model['docs_read'] = 0
scheduler_info_model['docs_written'] = 0
scheduler_info_model['error'] = 'testString'
scheduler_info_model['missing_revisions_found'] = 0
scheduler_info_model['revisions_checked'] = 0
scheduler_info_model['source_seq'] = 'testString'
scheduler_info_model['through_seq'] = 'testString'
# Construct a json representation of a SchedulerJob model
scheduler_job_model_json = {}
scheduler_job_model_json['database'] = 'testString'
scheduler_job_model_json['doc_id'] = 'testString'
scheduler_job_model_json['history'] = [scheduler_job_event_model]
scheduler_job_model_json['id'] = 'testString'
scheduler_job_model_json['info'] = scheduler_info_model
scheduler_job_model_json['node'] = 'testString'
scheduler_job_model_json['pid'] = 'testString'
scheduler_job_model_json['source'] = 'testString'
scheduler_job_model_json['start_time'] = "2019-01-01T12:00:00Z"
scheduler_job_model_json['target'] = 'testString'
scheduler_job_model_json['user'] = 'testString'
# Construct a model instance of SchedulerJob by calling from_dict on the json representation
scheduler_job_model = SchedulerJob.from_dict(scheduler_job_model_json)
assert scheduler_job_model != False
# Construct a model instance of SchedulerJob by calling from_dict on the json representation
scheduler_job_model_dict = SchedulerJob.from_dict(scheduler_job_model_json).__dict__
scheduler_job_model2 = SchedulerJob(**scheduler_job_model_dict)
# Verify the model instances are equivalent
assert scheduler_job_model == scheduler_job_model2
# Convert model instance back to dict and verify no loss of data
scheduler_job_model_json2 = scheduler_job_model.to_dict()
assert scheduler_job_model_json2 == scheduler_job_model_json
class TestModel_SchedulerJobEvent():
"""
Test Class for SchedulerJobEvent
"""
def test_scheduler_job_event_serialization(self):
"""
Test serialization/deserialization for SchedulerJobEvent
"""
# Construct a json representation of a SchedulerJobEvent model
scheduler_job_event_model_json = {}
scheduler_job_event_model_json['reason'] = 'testString'
scheduler_job_event_model_json['timestamp'] = "2019-01-01T12:00:00Z"
scheduler_job_event_model_json['type'] = 'testString'
# Construct a model instance of SchedulerJobEvent by calling from_dict on the json representation
scheduler_job_event_model = SchedulerJobEvent.from_dict(scheduler_job_event_model_json)
assert scheduler_job_event_model != False
# Construct a model instance of SchedulerJobEvent by calling from_dict on the json representation
scheduler_job_event_model_dict = SchedulerJobEvent.from_dict(scheduler_job_event_model_json).__dict__
scheduler_job_event_model2 = SchedulerJobEvent(**scheduler_job_event_model_dict)
# Verify the model instances are equivalent
assert scheduler_job_event_model == scheduler_job_event_model2
# Convert model instance back to dict and verify no loss of data
scheduler_job_event_model_json2 = scheduler_job_event_model.to_dict()
assert scheduler_job_event_model_json2 == scheduler_job_event_model_json
class TestModel_SchedulerJobsResult():
"""
Test Class for SchedulerJobsResult
"""
def test_scheduler_jobs_result_serialization(self):
"""
Test serialization/deserialization for SchedulerJobsResult
"""
# Construct dict forms of any model objects needed in order to build this model.
scheduler_job_event_model = {} # SchedulerJobEvent
scheduler_job_event_model['reason'] = 'testString'
scheduler_job_event_model['timestamp'] = "2019-01-01T12:00:00Z"
scheduler_job_event_model['type'] = 'testString'
scheduler_info_model = {} # SchedulerInfo
scheduler_info_model['changes_pending'] = 0
scheduler_info_model['checkpointed_source_seq'] = 'testString'
scheduler_info_model['doc_write_failures'] = 0
scheduler_info_model['docs_read'] = 0
scheduler_info_model['docs_written'] = 0
scheduler_info_model['error'] = 'testString'
scheduler_info_model['missing_revisions_found'] = 0
scheduler_info_model['revisions_checked'] = 0
scheduler_info_model['source_seq'] = 'testString'
scheduler_info_model['through_seq'] = 'testString'
scheduler_job_model = {} # SchedulerJob
scheduler_job_model['database'] = 'testString'
scheduler_job_model['doc_id'] = 'testString'
scheduler_job_model['history'] = [scheduler_job_event_model]
scheduler_job_model['id'] = 'testString'
scheduler_job_model['info'] = scheduler_info_model
scheduler_job_model['node'] = 'testString'
scheduler_job_model['pid'] = 'testString'
scheduler_job_model['source'] = 'testString'
scheduler_job_model['start_time'] = "2019-01-01T12:00:00Z"
scheduler_job_model['target'] = 'testString'
scheduler_job_model['user'] = 'testString'
# Construct a json representation of a SchedulerJobsResult model
scheduler_jobs_result_model_json = {}
scheduler_jobs_result_model_json['total_rows'] = 0
scheduler_jobs_result_model_json['jobs'] = [scheduler_job_model]
# Construct a model instance of SchedulerJobsResult by calling from_dict on the json representation
scheduler_jobs_result_model = SchedulerJobsResult.from_dict(scheduler_jobs_result_model_json)
assert scheduler_jobs_result_model != False
# Construct a model instance of SchedulerJobsResult by calling from_dict on the json representation
scheduler_jobs_result_model_dict = SchedulerJobsResult.from_dict(scheduler_jobs_result_model_json).__dict__
scheduler_jobs_result_model2 = SchedulerJobsResult(**scheduler_jobs_result_model_dict)
# Verify the model instances are equivalent
assert scheduler_jobs_result_model == scheduler_jobs_result_model2
# Convert model instance back to dict and verify no loss of data
scheduler_jobs_result_model_json2 = scheduler_jobs_result_model.to_dict()
assert scheduler_jobs_result_model_json2 == scheduler_jobs_result_model_json
class TestModel_SearchAnalyzeResult():
"""
Test Class for SearchAnalyzeResult
"""
def test_search_analyze_result_serialization(self):
"""
Test serialization/deserialization for SearchAnalyzeResult
"""
# Construct a json representation of a SearchAnalyzeResult model
search_analyze_result_model_json = {}
search_analyze_result_model_json['tokens'] = ['testString']
# Construct a model instance of SearchAnalyzeResult by calling from_dict on the json representation
search_analyze_result_model = SearchAnalyzeResult.from_dict(search_analyze_result_model_json)
assert search_analyze_result_model != False
# Construct a model instance of SearchAnalyzeResult by calling from_dict on the json representation
search_analyze_result_model_dict = SearchAnalyzeResult.from_dict(search_analyze_result_model_json).__dict__
search_analyze_result_model2 = SearchAnalyzeResult(**search_analyze_result_model_dict)
# Verify the model instances are equivalent
assert search_analyze_result_model == search_analyze_result_model2
# Convert model instance back to dict and verify no loss of data
search_analyze_result_model_json2 = search_analyze_result_model.to_dict()
assert search_analyze_result_model_json2 == search_analyze_result_model_json
class TestModel_SearchIndexDefinition():
"""
Test Class for SearchIndexDefinition
"""
def test_search_index_definition_serialization(self):
"""
Test serialization/deserialization for SearchIndexDefinition
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
analyzer_configuration_model = {} # AnalyzerConfiguration
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
# Construct a json representation of a SearchIndexDefinition model
search_index_definition_model_json = {}
search_index_definition_model_json['analyzer'] = analyzer_configuration_model
search_index_definition_model_json['index'] = 'testString'
# Construct a model instance of SearchIndexDefinition by calling from_dict on the json representation
search_index_definition_model = SearchIndexDefinition.from_dict(search_index_definition_model_json)
assert search_index_definition_model != False
# Construct a model instance of SearchIndexDefinition by calling from_dict on the json representation
search_index_definition_model_dict = SearchIndexDefinition.from_dict(search_index_definition_model_json).__dict__
search_index_definition_model2 = SearchIndexDefinition(**search_index_definition_model_dict)
# Verify the model instances are equivalent
assert search_index_definition_model == search_index_definition_model2
# Convert model instance back to dict and verify no loss of data
search_index_definition_model_json2 = search_index_definition_model.to_dict()
assert search_index_definition_model_json2 == search_index_definition_model_json
class TestModel_SearchIndexInfo():
"""
Test Class for SearchIndexInfo
"""
def test_search_index_info_serialization(self):
"""
Test serialization/deserialization for SearchIndexInfo
"""
# Construct a json representation of a SearchIndexInfo model
search_index_info_model_json = {}
search_index_info_model_json['committed_seq'] = 26
search_index_info_model_json['disk_size'] = 0
search_index_info_model_json['doc_count'] = 0
search_index_info_model_json['doc_del_count'] = 0
search_index_info_model_json['pending_seq'] = 26
# Construct a model instance of SearchIndexInfo by calling from_dict on the json representation
search_index_info_model = SearchIndexInfo.from_dict(search_index_info_model_json)
assert search_index_info_model != False
# Construct a model instance of SearchIndexInfo by calling from_dict on the json representation
search_index_info_model_dict = SearchIndexInfo.from_dict(search_index_info_model_json).__dict__
search_index_info_model2 = SearchIndexInfo(**search_index_info_model_dict)
# Verify the model instances are equivalent
assert search_index_info_model == search_index_info_model2
# Convert model instance back to dict and verify no loss of data
search_index_info_model_json2 = search_index_info_model.to_dict()
assert search_index_info_model_json2 == search_index_info_model_json
class TestModel_SearchInfoResult():
"""
Test Class for SearchInfoResult
"""
def test_search_info_result_serialization(self):
"""
Test serialization/deserialization for SearchInfoResult
"""
# Construct dict forms of any model objects needed in order to build this model.
search_index_info_model = {} # SearchIndexInfo
search_index_info_model['committed_seq'] = 26
search_index_info_model['disk_size'] = 0
search_index_info_model['doc_count'] = 0
search_index_info_model['doc_del_count'] = 0
search_index_info_model['pending_seq'] = 26
# Construct a json representation of a SearchInfoResult model
search_info_result_model_json = {}
search_info_result_model_json['name'] = 'testString'
search_info_result_model_json['search_index'] = search_index_info_model
# Construct a model instance of SearchInfoResult by calling from_dict on the json representation
search_info_result_model = SearchInfoResult.from_dict(search_info_result_model_json)
assert search_info_result_model != False
# Construct a model instance of SearchInfoResult by calling from_dict on the json representation
search_info_result_model_dict = SearchInfoResult.from_dict(search_info_result_model_json).__dict__
search_info_result_model2 = SearchInfoResult(**search_info_result_model_dict)
# Verify the model instances are equivalent
assert search_info_result_model == search_info_result_model2
# Convert model instance back to dict and verify no loss of data
search_info_result_model_json2 = search_info_result_model.to_dict()
assert search_info_result_model_json2 == search_info_result_model_json
class TestModel_SearchResult():
"""
Test Class for SearchResult
"""
def test_search_result_serialization(self):
"""
Test serialization/deserialization for SearchResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
search_result_row_model = {} # SearchResultRow
search_result_row_model['doc'] = document_model
search_result_row_model['fields'] = {}
search_result_row_model['highlights'] = {}
search_result_row_model['id'] = 'testString'
search_result_properties_model = {} # SearchResultProperties
search_result_properties_model['total_rows'] = 0
search_result_properties_model['bookmark'] = 'testString'
search_result_properties_model['by'] = 'testString'
search_result_properties_model['counts'] = {}
search_result_properties_model['ranges'] = {}
search_result_properties_model['rows'] = [search_result_row_model]
# Construct a json representation of a SearchResult model
search_result_model_json = {}
search_result_model_json['total_rows'] = 0
search_result_model_json['bookmark'] = 'testString'
search_result_model_json['by'] = 'testString'
search_result_model_json['counts'] = {}
search_result_model_json['ranges'] = {}
search_result_model_json['rows'] = [search_result_row_model]
search_result_model_json['groups'] = [search_result_properties_model]
# Construct a model instance of SearchResult by calling from_dict on the json representation
search_result_model = SearchResult.from_dict(search_result_model_json)
assert search_result_model != False
# Construct a model instance of SearchResult by calling from_dict on the json representation
search_result_model_dict = SearchResult.from_dict(search_result_model_json).__dict__
search_result_model2 = SearchResult(**search_result_model_dict)
# Verify the model instances are equivalent
assert search_result_model == search_result_model2
# Convert model instance back to dict and verify no loss of data
search_result_model_json2 = search_result_model.to_dict()
assert search_result_model_json2 == search_result_model_json
class TestModel_SearchResultProperties():
"""
Test Class for SearchResultProperties
"""
def test_search_result_properties_serialization(self):
"""
Test serialization/deserialization for SearchResultProperties
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
search_result_row_model = {} # SearchResultRow
search_result_row_model['doc'] = document_model
search_result_row_model['fields'] = {}
search_result_row_model['highlights'] = {}
search_result_row_model['id'] = 'testString'
# Construct a json representation of a SearchResultProperties model
search_result_properties_model_json = {}
search_result_properties_model_json['total_rows'] = 0
search_result_properties_model_json['bookmark'] = 'testString'
search_result_properties_model_json['by'] = 'testString'
search_result_properties_model_json['counts'] = {}
search_result_properties_model_json['ranges'] = {}
search_result_properties_model_json['rows'] = [search_result_row_model]
# Construct a model instance of SearchResultProperties by calling from_dict on the json representation
search_result_properties_model = SearchResultProperties.from_dict(search_result_properties_model_json)
assert search_result_properties_model != False
# Construct a model instance of SearchResultProperties by calling from_dict on the json representation
search_result_properties_model_dict = SearchResultProperties.from_dict(search_result_properties_model_json).__dict__
search_result_properties_model2 = SearchResultProperties(**search_result_properties_model_dict)
# Verify the model instances are equivalent
assert search_result_properties_model == search_result_properties_model2
# Convert model instance back to dict and verify no loss of data
search_result_properties_model_json2 = search_result_properties_model.to_dict()
assert search_result_properties_model_json2 == search_result_properties_model_json
class TestModel_SearchResultRow():
"""
Test Class for SearchResultRow
"""
def test_search_result_row_serialization(self):
"""
Test serialization/deserialization for SearchResultRow
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a SearchResultRow model
search_result_row_model_json = {}
search_result_row_model_json['doc'] = document_model
search_result_row_model_json['fields'] = {}
search_result_row_model_json['highlights'] = {}
search_result_row_model_json['id'] = 'testString'
# Construct a model instance of SearchResultRow by calling from_dict on the json representation
search_result_row_model = SearchResultRow.from_dict(search_result_row_model_json)
assert search_result_row_model != False
# Construct a model instance of SearchResultRow by calling from_dict on the json representation
search_result_row_model_dict = SearchResultRow.from_dict(search_result_row_model_json).__dict__
search_result_row_model2 = SearchResultRow(**search_result_row_model_dict)
# Verify the model instances are equivalent
assert search_result_row_model == search_result_row_model2
# Convert model instance back to dict and verify no loss of data
search_result_row_model_json2 = search_result_row_model.to_dict()
assert search_result_row_model_json2 == search_result_row_model_json
class TestModel_Security():
"""
Test Class for Security
"""
def test_security_serialization(self):
"""
Test serialization/deserialization for Security
"""
# Construct dict forms of any model objects needed in order to build this model.
security_object_model = {} # SecurityObject
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Construct a json representation of a Security model
security_model_json = {}
security_model_json['admins'] = security_object_model
security_model_json['members'] = security_object_model
security_model_json['cloudant'] = {}
security_model_json['couchdb_auth_only'] = True
# Construct a model instance of Security by calling from_dict on the json representation
security_model = Security.from_dict(security_model_json)
assert security_model != False
# Construct a model instance of Security by calling from_dict on the json representation
security_model_dict = Security.from_dict(security_model_json).__dict__
security_model2 = Security(**security_model_dict)
# Verify the model instances are equivalent
assert security_model == security_model2
# Convert model instance back to dict and verify no loss of data
security_model_json2 = security_model.to_dict()
assert security_model_json2 == security_model_json
class TestModel_SecurityObject():
"""
Test Class for SecurityObject
"""
def test_security_object_serialization(self):
"""
Test serialization/deserialization for SecurityObject
"""
# Construct a json representation of a SecurityObject model
security_object_model_json = {}
security_object_model_json['names'] = ['testString']
security_object_model_json['roles'] = ['testString']
# Construct a model instance of SecurityObject by calling from_dict on the json representation
security_object_model = SecurityObject.from_dict(security_object_model_json)
assert security_object_model != False
# Construct a model instance of SecurityObject by calling from_dict on the json representation
security_object_model_dict = SecurityObject.from_dict(security_object_model_json).__dict__
security_object_model2 = SecurityObject(**security_object_model_dict)
# Verify the model instances are equivalent
assert security_object_model == security_object_model2
# Convert model instance back to dict and verify no loss of data
security_object_model_json2 = security_object_model.to_dict()
assert security_object_model_json2 == security_object_model_json
class TestModel_ServerInformation():
"""
Test Class for ServerInformation
"""
def test_server_information_serialization(self):
"""
Test serialization/deserialization for ServerInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
server_vendor_model = {} # ServerVendor
server_vendor_model['name'] = 'testString'
server_vendor_model['variant'] = 'testString'
server_vendor_model['version'] = 'testString'
# Construct a json representation of a ServerInformation model
server_information_model_json = {}
server_information_model_json['couchdb'] = 'testString'
server_information_model_json['features'] = ['testString']
server_information_model_json['vendor'] = server_vendor_model
server_information_model_json['version'] = 'testString'
server_information_model_json['features_flags'] = ['testString']
# Construct a model instance of ServerInformation by calling from_dict on the json representation
server_information_model = ServerInformation.from_dict(server_information_model_json)
assert server_information_model != False
# Construct a model instance of ServerInformation by calling from_dict on the json representation
server_information_model_dict = ServerInformation.from_dict(server_information_model_json).__dict__
server_information_model2 = ServerInformation(**server_information_model_dict)
# Verify the model instances are equivalent
assert server_information_model == server_information_model2
# Convert model instance back to dict and verify no loss of data
server_information_model_json2 = server_information_model.to_dict()
assert server_information_model_json2 == server_information_model_json
class TestModel_ServerVendor():
"""
Test Class for ServerVendor
"""
def test_server_vendor_serialization(self):
"""
Test serialization/deserialization for ServerVendor
"""
# Construct a json representation of a ServerVendor model
server_vendor_model_json = {}
server_vendor_model_json['name'] = 'testString'
server_vendor_model_json['variant'] = 'testString'
server_vendor_model_json['version'] = 'testString'
# Construct a model instance of ServerVendor by calling from_dict on the json representation
server_vendor_model = ServerVendor.from_dict(server_vendor_model_json)
assert server_vendor_model != False
# Construct a model instance of ServerVendor by calling from_dict on the json representation
server_vendor_model_dict = ServerVendor.from_dict(server_vendor_model_json).__dict__
server_vendor_model2 = ServerVendor(**server_vendor_model_dict)
# Verify the model instances are equivalent
assert server_vendor_model == server_vendor_model2
# Convert model instance back to dict and verify no loss of data
server_vendor_model_json2 = server_vendor_model.to_dict()
assert server_vendor_model_json2 == server_vendor_model_json
class TestModel_SessionAuthentication():
"""
Test Class for SessionAuthentication
"""
def test_session_authentication_serialization(self):
"""
Test serialization/deserialization for SessionAuthentication
"""
# Construct a json representation of a SessionAuthentication model
session_authentication_model_json = {}
session_authentication_model_json['authenticated'] = 'testString'
session_authentication_model_json['authentication_db'] = 'testString'
session_authentication_model_json['authentication_handlers'] = ['testString']
# Construct a model instance of SessionAuthentication by calling from_dict on the json representation
session_authentication_model = SessionAuthentication.from_dict(session_authentication_model_json)
assert session_authentication_model != False
# Construct a model instance of SessionAuthentication by calling from_dict on the json representation
session_authentication_model_dict = SessionAuthentication.from_dict(session_authentication_model_json).__dict__
session_authentication_model2 = SessionAuthentication(**session_authentication_model_dict)
# Verify the model instances are equivalent
assert session_authentication_model == session_authentication_model2
# Convert model instance back to dict and verify no loss of data
session_authentication_model_json2 = session_authentication_model.to_dict()
assert session_authentication_model_json2 == session_authentication_model_json
class TestModel_SessionInformation():
"""
Test Class for SessionInformation
"""
def test_session_information_serialization(self):
"""
Test serialization/deserialization for SessionInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
session_authentication_model = {} # SessionAuthentication
session_authentication_model['authenticated'] = 'testString'
session_authentication_model['authentication_db'] = 'testString'
session_authentication_model['authentication_handlers'] = ['testString']
user_context_model = {} # UserContext
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a json representation of a SessionInformation model
session_information_model_json = {}
session_information_model_json['ok'] = True
session_information_model_json['info'] = session_authentication_model
session_information_model_json['userCtx'] = user_context_model
# Construct a model instance of SessionInformation by calling from_dict on the json representation
session_information_model = SessionInformation.from_dict(session_information_model_json)
assert session_information_model != False
# Construct a model instance of SessionInformation by calling from_dict on the json representation
session_information_model_dict = SessionInformation.from_dict(session_information_model_json).__dict__
session_information_model2 = SessionInformation(**session_information_model_dict)
# Verify the model instances are equivalent
assert session_information_model == session_information_model2
# Convert model instance back to dict and verify no loss of data
session_information_model_json2 = session_information_model.to_dict()
assert session_information_model_json2 == session_information_model_json
class TestModel_ShardsInformation():
"""
Test Class for ShardsInformation
"""
def test_shards_information_serialization(self):
"""
Test serialization/deserialization for ShardsInformation
"""
# Construct a json representation of a ShardsInformation model
shards_information_model_json = {}
shards_information_model_json['shards'] = {}
# Construct a model instance of ShardsInformation by calling from_dict on the json representation
shards_information_model = ShardsInformation.from_dict(shards_information_model_json)
assert shards_information_model != False
# Construct a model instance of ShardsInformation by calling from_dict on the json representation
shards_information_model_dict = ShardsInformation.from_dict(shards_information_model_json).__dict__
shards_information_model2 = ShardsInformation(**shards_information_model_dict)
# Verify the model instances are equivalent
assert shards_information_model == shards_information_model2
# Convert model instance back to dict and verify no loss of data
shards_information_model_json2 = shards_information_model.to_dict()
assert shards_information_model_json2 == shards_information_model_json
class TestModel_ThroughputInformation():
"""
Test Class for ThroughputInformation
"""
def test_throughput_information_serialization(self):
"""
Test serialization/deserialization for ThroughputInformation
"""
# Construct a json representation of a ThroughputInformation model
throughput_information_model_json = {}
throughput_information_model_json['blocks'] = 0
throughput_information_model_json['query'] = 0
throughput_information_model_json['read'] = 0
throughput_information_model_json['write'] = 0
# Construct a model instance of ThroughputInformation by calling from_dict on the json representation
throughput_information_model = ThroughputInformation.from_dict(throughput_information_model_json)
assert throughput_information_model != False
# Construct a model instance of ThroughputInformation by calling from_dict on the json representation
throughput_information_model_dict = ThroughputInformation.from_dict(throughput_information_model_json).__dict__
throughput_information_model2 = ThroughputInformation(**throughput_information_model_dict)
# Verify the model instances are equivalent
assert throughput_information_model == throughput_information_model2
# Convert model instance back to dict and verify no loss of data
throughput_information_model_json2 = throughput_information_model.to_dict()
assert throughput_information_model_json2 == throughput_information_model_json
class TestModel_UpInformation():
"""
Test Class for UpInformation
"""
def test_up_information_serialization(self):
"""
Test serialization/deserialization for UpInformation
"""
# Construct a json representation of a UpInformation model
up_information_model_json = {}
up_information_model_json['seeds'] = { 'foo': 'bar' }
up_information_model_json['status'] = 'maintenance_mode'
# Construct a model instance of UpInformation by calling from_dict on the json representation
up_information_model = UpInformation.from_dict(up_information_model_json)
assert up_information_model != False
# Construct a model instance of UpInformation by calling from_dict on the json representation
up_information_model_dict = UpInformation.from_dict(up_information_model_json).__dict__
up_information_model2 = UpInformation(**up_information_model_dict)
# Verify the model instances are equivalent
assert up_information_model == up_information_model2
# Convert model instance back to dict and verify no loss of data
up_information_model_json2 = up_information_model.to_dict()
assert up_information_model_json2 == up_information_model_json
class TestModel_UserContext():
"""
Test Class for UserContext
"""
def test_user_context_serialization(self):
"""
Test serialization/deserialization for UserContext
"""
# Construct a json representation of a UserContext model
user_context_model_json = {}
user_context_model_json['db'] = 'testString'
user_context_model_json['name'] = 'testString'
user_context_model_json['roles'] = ['_reader']
# Construct a model instance of UserContext by calling from_dict on the json representation
user_context_model = UserContext.from_dict(user_context_model_json)
assert user_context_model != False
# Construct a model instance of UserContext by calling from_dict on the json representation
user_context_model_dict = UserContext.from_dict(user_context_model_json).__dict__
user_context_model2 = UserContext(**user_context_model_dict)
# Verify the model instances are equivalent
assert user_context_model == user_context_model2
# Convert model instance back to dict and verify no loss of data
user_context_model_json2 = user_context_model.to_dict()
assert user_context_model_json2 == user_context_model_json
class TestModel_UuidsResult():
"""
Test Class for UuidsResult
"""
def test_uuids_result_serialization(self):
"""
Test serialization/deserialization for UuidsResult
"""
# Construct a json representation of a UuidsResult model
uuids_result_model_json = {}
uuids_result_model_json['uuids'] = ['testString']
# Construct a model instance of UuidsResult by calling from_dict on the json representation
uuids_result_model = UuidsResult.from_dict(uuids_result_model_json)
assert uuids_result_model != False
# Construct a model instance of UuidsResult by calling from_dict on the json representation
uuids_result_model_dict = UuidsResult.from_dict(uuids_result_model_json).__dict__
uuids_result_model2 = UuidsResult(**uuids_result_model_dict)
# Verify the model instances are equivalent
assert uuids_result_model == uuids_result_model2
# Convert model instance back to dict and verify no loss of data
uuids_result_model_json2 = uuids_result_model.to_dict()
assert uuids_result_model_json2 == uuids_result_model_json
class TestModel_ViewQueriesResult():
"""
Test Class for ViewQueriesResult
"""
def test_view_queries_result_serialization(self):
"""
Test serialization/deserialization for ViewQueriesResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
view_result_row_model = {} # ViewResultRow
view_result_row_model['caused_by'] = 'testString'
view_result_row_model['error'] = 'testString'
view_result_row_model['reason'] = 'testString'
view_result_row_model['doc'] = document_model
view_result_row_model['id'] = 'testString'
view_result_row_model['key'] = 'testString'
view_result_row_model['value'] = 'testString'
view_result_model = {} # ViewResult
view_result_model['total_rows'] = 0
view_result_model['update_seq'] = 'testString'
view_result_model['rows'] = [view_result_row_model]
# Construct a json representation of a ViewQueriesResult model
view_queries_result_model_json = {}
view_queries_result_model_json['results'] = [view_result_model]
# Construct a model instance of ViewQueriesResult by calling from_dict on the json representation
view_queries_result_model = ViewQueriesResult.from_dict(view_queries_result_model_json)
assert view_queries_result_model != False
# Construct a model instance of ViewQueriesResult by calling from_dict on the json representation
view_queries_result_model_dict = ViewQueriesResult.from_dict(view_queries_result_model_json).__dict__
view_queries_result_model2 = ViewQueriesResult(**view_queries_result_model_dict)
# Verify the model instances are equivalent
assert view_queries_result_model == view_queries_result_model2
# Convert model instance back to dict and verify no loss of data
view_queries_result_model_json2 = view_queries_result_model.to_dict()
assert view_queries_result_model_json2 == view_queries_result_model_json
class TestModel_ViewQuery():
"""
Test Class for ViewQuery
"""
def test_view_query_serialization(self):
"""
Test serialization/deserialization for ViewQuery
"""
# Construct a json representation of a ViewQuery model
view_query_model_json = {}
view_query_model_json['att_encoding_info'] = False
view_query_model_json['attachments'] = False
view_query_model_json['conflicts'] = False
view_query_model_json['descending'] = False
view_query_model_json['include_docs'] = False
view_query_model_json['inclusive_end'] = True
view_query_model_json['limit'] = 0
view_query_model_json['skip'] = 0
view_query_model_json['update_seq'] = False
view_query_model_json['endkey'] = 'testString'
view_query_model_json['endkey_docid'] = 'testString'
view_query_model_json['group'] = False
view_query_model_json['group_level'] = 1
view_query_model_json['key'] = 'testString'
view_query_model_json['keys'] = ['testString']
view_query_model_json['reduce'] = True
view_query_model_json['stable'] = False
view_query_model_json['startkey'] = 'testString'
view_query_model_json['startkey_docid'] = 'testString'
view_query_model_json['update'] = 'true'
# Construct a model instance of ViewQuery by calling from_dict on the json representation
view_query_model = ViewQuery.from_dict(view_query_model_json)
assert view_query_model != False
# Construct a model instance of ViewQuery by calling from_dict on the json representation
view_query_model_dict = ViewQuery.from_dict(view_query_model_json).__dict__
view_query_model2 = ViewQuery(**view_query_model_dict)
# Verify the model instances are equivalent
assert view_query_model == view_query_model2
# Convert model instance back to dict and verify no loss of data
view_query_model_json2 = view_query_model.to_dict()
assert view_query_model_json2 == view_query_model_json
class TestModel_ViewResult():
"""
Test Class for ViewResult
"""
def test_view_result_serialization(self):
"""
Test serialization/deserialization for ViewResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
view_result_row_model = {} # ViewResultRow
view_result_row_model['caused_by'] = 'testString'
view_result_row_model['error'] = 'testString'
view_result_row_model['reason'] = 'testString'
view_result_row_model['doc'] = document_model
view_result_row_model['id'] = 'testString'
view_result_row_model['key'] = 'testString'
view_result_row_model['value'] = 'testString'
# Construct a json representation of a ViewResult model
view_result_model_json = {}
view_result_model_json['total_rows'] = 0
view_result_model_json['update_seq'] = 'testString'
view_result_model_json['rows'] = [view_result_row_model]
# Construct a model instance of ViewResult by calling from_dict on the json representation
view_result_model = ViewResult.from_dict(view_result_model_json)
assert view_result_model != False
# Construct a model instance of ViewResult by calling from_dict on the json representation
view_result_model_dict = ViewResult.from_dict(view_result_model_json).__dict__
view_result_model2 = ViewResult(**view_result_model_dict)
# Verify the model instances are equivalent
assert view_result_model == view_result_model2
# Convert model instance back to dict and verify no loss of data
view_result_model_json2 = view_result_model.to_dict()
assert view_result_model_json2 == view_result_model_json
class TestModel_ViewResultRow():
"""
Test Class for ViewResultRow
"""
def test_view_result_row_serialization(self):
"""
Test serialization/deserialization for ViewResultRow
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a ViewResultRow model
view_result_row_model_json = {}
view_result_row_model_json['caused_by'] = 'testString'
view_result_row_model_json['error'] = 'testString'
view_result_row_model_json['reason'] = 'testString'
view_result_row_model_json['doc'] = document_model
view_result_row_model_json['id'] = 'testString'
view_result_row_model_json['key'] = 'testString'
view_result_row_model_json['value'] = 'testString'
# Construct a model instance of ViewResultRow by calling from_dict on the json representation
view_result_row_model = ViewResultRow.from_dict(view_result_row_model_json)
assert view_result_row_model != False
# Construct a model instance of ViewResultRow by calling from_dict on the json representation
view_result_row_model_dict = ViewResultRow.from_dict(view_result_row_model_json).__dict__
view_result_row_model2 = ViewResultRow(**view_result_row_model_dict)
# Verify the model instances are equivalent
assert view_result_row_model == view_result_row_model2
# Convert model instance back to dict and verify no loss of data
view_result_row_model_json2 = view_result_row_model.to_dict()
assert view_result_row_model_json2 == view_result_row_model_json
class TestModel_GeoJsonGeometry():
"""
Test Class for GeoJsonGeometry
"""
def test_geo_json_geometry_serialization(self):
"""
Test serialization/deserialization for GeoJsonGeometry
"""
# Construct a json representation of a GeoJsonGeometry model
geo_json_geometry_model_json = {}
geo_json_geometry_model_json['type'] = 'Point'
geo_json_geometry_model_json['coordinates'] = ['testString']
# Construct a model instance of GeoJsonGeometry by calling from_dict on the json representation
geo_json_geometry_model = GeoJsonGeometry.from_dict(geo_json_geometry_model_json)
assert geo_json_geometry_model != False
# Construct a model instance of GeoJsonGeometry by calling from_dict on the json representation
geo_json_geometry_model_dict = GeoJsonGeometry.from_dict(geo_json_geometry_model_json).__dict__
geo_json_geometry_model2 = GeoJsonGeometry(**geo_json_geometry_model_dict)
# Verify the model instances are equivalent
assert geo_json_geometry_model == geo_json_geometry_model2
# Convert model instance back to dict and verify no loss of data
geo_json_geometry_model_json2 = geo_json_geometry_model.to_dict()
assert geo_json_geometry_model_json2 == geo_json_geometry_model_json
class TestModel_GeoJsonGeometryCollection():
"""
Test Class for GeoJsonGeometryCollection
"""
def test_geo_json_geometry_collection_serialization(self):
"""
Test serialization/deserialization for GeoJsonGeometryCollection
"""
# Construct dict forms of any model objects needed in order to build this model.
geo_json_geometry_model = {} # GeoJsonGeometry
geo_json_geometry_model['type'] = 'Point'
geo_json_geometry_model['coordinates'] = ['testString']
# Construct a json representation of a GeoJsonGeometryCollection model
geo_json_geometry_collection_model_json = {}
geo_json_geometry_collection_model_json['type'] = 'Point'
geo_json_geometry_collection_model_json['geometries'] = [geo_json_geometry_model]
# Construct a model instance of GeoJsonGeometryCollection by calling from_dict on the json representation
geo_json_geometry_collection_model = GeoJsonGeometryCollection.from_dict(geo_json_geometry_collection_model_json)
assert geo_json_geometry_collection_model != False
# Construct a model instance of GeoJsonGeometryCollection by calling from_dict on the json representation
geo_json_geometry_collection_model_dict = GeoJsonGeometryCollection.from_dict(geo_json_geometry_collection_model_json).__dict__
geo_json_geometry_collection_model2 = GeoJsonGeometryCollection(**geo_json_geometry_collection_model_dict)
# Verify the model instances are equivalent
assert geo_json_geometry_collection_model == geo_json_geometry_collection_model2
# Convert model instance back to dict and verify no loss of data
geo_json_geometry_collection_model_json2 = geo_json_geometry_collection_model.to_dict()
assert geo_json_geometry_collection_model_json2 == geo_json_geometry_collection_model_json
# endregion
##############################################################################
# End of Model Tests
##############################################################################
| # -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit Tests for CloudantV1
"""
from datetime import datetime, timezone
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime
import base64
import inspect
import io
import json
import os
import pytest
import re
import requests
import requests.models
import responses
import tempfile
import urllib
import gzip
from ibmcloudant.cloudant_v1 import *
_service = CloudantV1(
authenticator=NoAuthAuthenticator()
)
_base_url = 'http://localhost:5984'
_service.set_service_url(_base_url)
##############################################################################
# Start of Service: Server
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetServerInformation():
"""
Test Class for get_server_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_server_information_all_params(self):
"""
get_server_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/')
mock_response = '{"couchdb": "couchdb", "features": ["features"], "vendor": {"name": "name", "variant": "variant", "version": "version"}, "version": "version", "features_flags": ["features_flags"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_server_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_server_information_all_params_with_retries(self):
# Enable retries and run test_get_server_information_all_params.
_service.enable_retries()
self.test_get_server_information_all_params()
# Disable retries and run test_get_server_information_all_params.
_service.disable_retries()
self.test_get_server_information_all_params()
class TestGetMembershipInformation():
"""
Test Class for get_membership_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_membership_information_all_params(self):
"""
get_membership_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_membership')
mock_response = '{"all_nodes": ["all_nodes"], "cluster_nodes": ["cluster_nodes"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_membership_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_membership_information_all_params_with_retries(self):
# Enable retries and run test_get_membership_information_all_params.
_service.enable_retries()
self.test_get_membership_information_all_params()
# Disable retries and run test_get_membership_information_all_params.
_service.disable_retries()
self.test_get_membership_information_all_params()
class TestGetUuids():
"""
Test Class for get_uuids
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_uuids_all_params(self):
"""
get_uuids()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_uuids')
mock_response = '{"uuids": ["uuids"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
count = 1
# Invoke method
response = _service.get_uuids(
count=count,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'count={}'.format(count) in query_string
def test_get_uuids_all_params_with_retries(self):
# Enable retries and run test_get_uuids_all_params.
_service.enable_retries()
self.test_get_uuids_all_params()
# Disable retries and run test_get_uuids_all_params.
_service.disable_retries()
self.test_get_uuids_all_params()
@responses.activate
def test_get_uuids_required_params(self):
"""
test_get_uuids_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_uuids')
mock_response = '{"uuids": ["uuids"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_uuids()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_uuids_required_params_with_retries(self):
# Enable retries and run test_get_uuids_required_params.
_service.enable_retries()
self.test_get_uuids_required_params()
# Disable retries and run test_get_uuids_required_params.
_service.disable_retries()
self.test_get_uuids_required_params()
class TestGetCapacityThroughputInformation():
"""
Test Class for get_capacity_throughput_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_capacity_throughput_information_all_params(self):
"""
get_capacity_throughput_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/capacity/throughput')
mock_response = '{"current": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}, "target": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_capacity_throughput_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_capacity_throughput_information_all_params_with_retries(self):
# Enable retries and run test_get_capacity_throughput_information_all_params.
_service.enable_retries()
self.test_get_capacity_throughput_information_all_params()
# Disable retries and run test_get_capacity_throughput_information_all_params.
_service.disable_retries()
self.test_get_capacity_throughput_information_all_params()
class TestPutCapacityThroughputConfiguration():
"""
Test Class for put_capacity_throughput_configuration
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_capacity_throughput_configuration_all_params(self):
"""
put_capacity_throughput_configuration()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/capacity/throughput')
mock_response = '{"current": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}, "target": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
blocks = 0
# Invoke method
response = _service.put_capacity_throughput_configuration(
blocks,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['blocks'] == 0
def test_put_capacity_throughput_configuration_all_params_with_retries(self):
# Enable retries and run test_put_capacity_throughput_configuration_all_params.
_service.enable_retries()
self.test_put_capacity_throughput_configuration_all_params()
# Disable retries and run test_put_capacity_throughput_configuration_all_params.
_service.disable_retries()
self.test_put_capacity_throughput_configuration_all_params()
@responses.activate
def test_put_capacity_throughput_configuration_value_error(self):
"""
test_put_capacity_throughput_configuration_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/capacity/throughput')
mock_response = '{"current": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}, "target": {"throughput": {"blocks": 0, "query": 0, "read": 0, "write": 0}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
blocks = 0
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"blocks": blocks,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_capacity_throughput_configuration(**req_copy)
def test_put_capacity_throughput_configuration_value_error_with_retries(self):
# Enable retries and run test_put_capacity_throughput_configuration_value_error.
_service.enable_retries()
self.test_put_capacity_throughput_configuration_value_error()
# Disable retries and run test_put_capacity_throughput_configuration_value_error.
_service.disable_retries()
self.test_put_capacity_throughput_configuration_value_error()
# endregion
##############################################################################
# End of Service: Server
##############################################################################
##############################################################################
# Start of Service: Changes
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetDbUpdates():
"""
Test Class for get_db_updates
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_db_updates_all_params(self):
"""
get_db_updates()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_db_updates')
mock_response = '{"last_seq": "last_seq", "results": [{"account": "account", "db_name": "db_name", "seq": "seq", "type": "created"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
feed = 'normal'
heartbeat = 0
timeout = 0
since = '0'
# Invoke method
response = _service.get_db_updates(
feed=feed,
heartbeat=heartbeat,
timeout=timeout,
since=since,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'feed={}'.format(feed) in query_string
assert 'heartbeat={}'.format(heartbeat) in query_string
assert 'timeout={}'.format(timeout) in query_string
assert 'since={}'.format(since) in query_string
def test_get_db_updates_all_params_with_retries(self):
# Enable retries and run test_get_db_updates_all_params.
_service.enable_retries()
self.test_get_db_updates_all_params()
# Disable retries and run test_get_db_updates_all_params.
_service.disable_retries()
self.test_get_db_updates_all_params()
@responses.activate
def test_get_db_updates_required_params(self):
"""
test_get_db_updates_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_db_updates')
mock_response = '{"last_seq": "last_seq", "results": [{"account": "account", "db_name": "db_name", "seq": "seq", "type": "created"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_db_updates()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_db_updates_required_params_with_retries(self):
# Enable retries and run test_get_db_updates_required_params.
_service.enable_retries()
self.test_get_db_updates_required_params()
# Disable retries and run test_get_db_updates_required_params.
_service.disable_retries()
self.test_get_db_updates_required_params()
class TestPostChanges():
"""
Test Class for post_changes
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_changes_all_params(self):
"""
post_changes()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"last_seq": "last_seq", "pending": 7, "results": [{"changes": [{"rev": "rev"}], "deleted": false, "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "seq": "seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['testString']
fields = ['testString']
selector = {}
last_event_id = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
feed = 'normal'
filter = 'testString'
heartbeat = 0
include_docs = False
limit = 0
seq_interval = 1
since = '0'
style = 'main_only'
timeout = 0
view = 'testString'
# Invoke method
response = _service.post_changes(
db,
doc_ids=doc_ids,
fields=fields,
selector=selector,
last_event_id=last_event_id,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
feed=feed,
filter=filter,
heartbeat=heartbeat,
include_docs=include_docs,
limit=limit,
seq_interval=seq_interval,
since=since,
style=style,
timeout=timeout,
view=view,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'descending={}'.format('true' if descending else 'false') in query_string
assert 'feed={}'.format(feed) in query_string
assert 'filter={}'.format(filter) in query_string
assert 'heartbeat={}'.format(heartbeat) in query_string
assert 'include_docs={}'.format('true' if include_docs else 'false') in query_string
assert 'limit={}'.format(limit) in query_string
assert 'seq_interval={}'.format(seq_interval) in query_string
assert 'since={}'.format(since) in query_string
assert 'style={}'.format(style) in query_string
assert 'timeout={}'.format(timeout) in query_string
assert 'view={}'.format(view) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['doc_ids'] == ['testString']
assert req_body['fields'] == ['testString']
assert req_body['selector'] == {}
def test_post_changes_all_params_with_retries(self):
# Enable retries and run test_post_changes_all_params.
_service.enable_retries()
self.test_post_changes_all_params()
# Disable retries and run test_post_changes_all_params.
_service.disable_retries()
self.test_post_changes_all_params()
@responses.activate
def test_post_changes_required_params(self):
"""
test_post_changes_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"last_seq": "last_seq", "pending": 7, "results": [{"changes": [{"rev": "rev"}], "deleted": false, "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "seq": "seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['testString']
fields = ['testString']
selector = {}
# Invoke method
response = _service.post_changes(
db,
doc_ids=doc_ids,
fields=fields,
selector=selector,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['doc_ids'] == ['testString']
assert req_body['fields'] == ['testString']
assert req_body['selector'] == {}
def test_post_changes_required_params_with_retries(self):
# Enable retries and run test_post_changes_required_params.
_service.enable_retries()
self.test_post_changes_required_params()
# Disable retries and run test_post_changes_required_params.
_service.disable_retries()
self.test_post_changes_required_params()
@responses.activate
def test_post_changes_value_error(self):
"""
test_post_changes_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"last_seq": "last_seq", "pending": 7, "results": [{"changes": [{"rev": "rev"}], "deleted": false, "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "seq": "seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['testString']
fields = ['testString']
selector = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_changes(**req_copy)
def test_post_changes_value_error_with_retries(self):
# Enable retries and run test_post_changes_value_error.
_service.enable_retries()
self.test_post_changes_value_error()
# Disable retries and run test_post_changes_value_error.
_service.disable_retries()
self.test_post_changes_value_error()
class TestPostChangesAsStream():
"""
Test Class for post_changes_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_changes_as_stream_all_params(self):
"""
post_changes_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['0007741142412418284']
fields = ['testString']
selector = {}
last_event_id = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
feed = 'normal'
filter = 'testString'
heartbeat = 0
include_docs = False
limit = 0
seq_interval = 1
since = '0'
style = 'main_only'
timeout = 0
view = 'testString'
# Invoke method
response = _service.post_changes_as_stream(
db,
doc_ids=doc_ids,
fields=fields,
selector=selector,
last_event_id=last_event_id,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
feed=feed,
filter=filter,
heartbeat=heartbeat,
include_docs=include_docs,
limit=limit,
seq_interval=seq_interval,
since=since,
style=style,
timeout=timeout,
view=view,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'descending={}'.format('true' if descending else 'false') in query_string
assert 'feed={}'.format(feed) in query_string
assert 'filter={}'.format(filter) in query_string
assert 'heartbeat={}'.format(heartbeat) in query_string
assert 'include_docs={}'.format('true' if include_docs else 'false') in query_string
assert 'limit={}'.format(limit) in query_string
assert 'seq_interval={}'.format(seq_interval) in query_string
assert 'since={}'.format(since) in query_string
assert 'style={}'.format(style) in query_string
assert 'timeout={}'.format(timeout) in query_string
assert 'view={}'.format(view) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['doc_ids'] == ['0007741142412418284']
assert req_body['fields'] == ['testString']
assert req_body['selector'] == {}
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_changes_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_changes_as_stream_all_params.
_service.enable_retries()
self.test_post_changes_as_stream_all_params()
# Disable retries and run test_post_changes_as_stream_all_params.
_service.disable_retries()
self.test_post_changes_as_stream_all_params()
@responses.activate
def test_post_changes_as_stream_required_params(self):
"""
test_post_changes_as_stream_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['0007741142412418284']
fields = ['testString']
selector = {}
# Invoke method
response = _service.post_changes_as_stream(
db,
doc_ids=doc_ids,
fields=fields,
selector=selector,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['doc_ids'] == ['0007741142412418284']
assert req_body['fields'] == ['testString']
assert req_body['selector'] == {}
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_changes_as_stream_required_params_with_retries(self):
# Enable retries and run test_post_changes_as_stream_required_params.
_service.enable_retries()
self.test_post_changes_as_stream_required_params()
# Disable retries and run test_post_changes_as_stream_required_params.
_service.disable_retries()
self.test_post_changes_as_stream_required_params()
@responses.activate
def test_post_changes_as_stream_value_error(self):
"""
test_post_changes_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_changes')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_ids = ['0007741142412418284']
fields = ['testString']
selector = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_changes_as_stream(**req_copy)
def test_post_changes_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_changes_as_stream_value_error.
_service.enable_retries()
self.test_post_changes_as_stream_value_error()
# Disable retries and run test_post_changes_as_stream_value_error.
_service.disable_retries()
self.test_post_changes_as_stream_value_error()
# endregion
##############################################################################
# End of Service: Changes
##############################################################################
##############################################################################
# Start of Service: Databases
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadDatabase():
"""
Test Class for head_database
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_database_all_params(self):
"""
head_database()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.head_database(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_database_all_params_with_retries(self):
# Enable retries and run test_head_database_all_params.
_service.enable_retries()
self.test_head_database_all_params()
# Disable retries and run test_head_database_all_params.
_service.disable_retries()
self.test_head_database_all_params()
@responses.activate
def test_head_database_value_error(self):
"""
test_head_database_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_database(**req_copy)
def test_head_database_value_error_with_retries(self):
# Enable retries and run test_head_database_value_error.
_service.enable_retries()
self.test_head_database_value_error()
# Disable retries and run test_head_database_value_error.
_service.disable_retries()
self.test_head_database_value_error()
class TestGetAllDbs():
"""
Test Class for get_all_dbs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_all_dbs_all_params(self):
"""
get_all_dbs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_all_dbs')
mock_response = '["operation_response"]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
descending = False
endkey = 'testString'
limit = 0
skip = 0
startkey = 'testString'
# Invoke method
response = _service.get_all_dbs(
descending=descending,
endkey=endkey,
limit=limit,
skip=skip,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'descending={}'.format('true' if descending else 'false') in query_string
assert 'endkey={}'.format(endkey) in query_string
assert 'limit={}'.format(limit) in query_string
assert 'skip={}'.format(skip) in query_string
assert 'startkey={}'.format(startkey) in query_string
def test_get_all_dbs_all_params_with_retries(self):
# Enable retries and run test_get_all_dbs_all_params.
_service.enable_retries()
self.test_get_all_dbs_all_params()
# Disable retries and run test_get_all_dbs_all_params.
_service.disable_retries()
self.test_get_all_dbs_all_params()
@responses.activate
def test_get_all_dbs_required_params(self):
"""
test_get_all_dbs_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_all_dbs')
mock_response = '["operation_response"]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_all_dbs()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_all_dbs_required_params_with_retries(self):
# Enable retries and run test_get_all_dbs_required_params.
_service.enable_retries()
self.test_get_all_dbs_required_params()
# Disable retries and run test_get_all_dbs_required_params.
_service.disable_retries()
self.test_get_all_dbs_required_params()
class TestPostDbsInfo():
"""
Test Class for post_dbs_info
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_dbs_info_all_params(self):
"""
post_dbs_info()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_dbs_info')
mock_response = '[{"error": "error", "info": {"cluster": {"n": 1, "q": 1, "r": 1, "w": 1}, "committed_update_seq": "committed_update_seq", "compact_running": false, "compacted_seq": "compacted_seq", "db_name": "db_name", "disk_format_version": 19, "doc_count": 0, "doc_del_count": 0, "engine": "engine", "props": {"partitioned": false}, "sizes": {"active": 6, "external": 8, "file": 4}, "update_seq": "update_seq", "uuid": "uuid"}, "key": "key"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
keys = ['testString']
# Invoke method
response = _service.post_dbs_info(
keys,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['keys'] == ['testString']
def test_post_dbs_info_all_params_with_retries(self):
# Enable retries and run test_post_dbs_info_all_params.
_service.enable_retries()
self.test_post_dbs_info_all_params()
# Disable retries and run test_post_dbs_info_all_params.
_service.disable_retries()
self.test_post_dbs_info_all_params()
@responses.activate
def test_post_dbs_info_value_error(self):
"""
test_post_dbs_info_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_dbs_info')
mock_response = '[{"error": "error", "info": {"cluster": {"n": 1, "q": 1, "r": 1, "w": 1}, "committed_update_seq": "committed_update_seq", "compact_running": false, "compacted_seq": "compacted_seq", "db_name": "db_name", "disk_format_version": 19, "doc_count": 0, "doc_del_count": 0, "engine": "engine", "props": {"partitioned": false}, "sizes": {"active": 6, "external": 8, "file": 4}, "update_seq": "update_seq", "uuid": "uuid"}, "key": "key"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
keys = ['testString']
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"keys": keys,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_dbs_info(**req_copy)
def test_post_dbs_info_value_error_with_retries(self):
# Enable retries and run test_post_dbs_info_value_error.
_service.enable_retries()
self.test_post_dbs_info_value_error()
# Disable retries and run test_post_dbs_info_value_error.
_service.disable_retries()
self.test_post_dbs_info_value_error()
class TestDeleteDatabase():
"""
Test Class for delete_database
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_database_all_params(self):
"""
delete_database()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.delete_database(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_database_all_params_with_retries(self):
# Enable retries and run test_delete_database_all_params.
_service.enable_retries()
self.test_delete_database_all_params()
# Disable retries and run test_delete_database_all_params.
_service.disable_retries()
self.test_delete_database_all_params()
@responses.activate
def test_delete_database_value_error(self):
"""
test_delete_database_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_database(**req_copy)
def test_delete_database_value_error_with_retries(self):
# Enable retries and run test_delete_database_value_error.
_service.enable_retries()
self.test_delete_database_value_error()
# Disable retries and run test_delete_database_value_error.
_service.disable_retries()
self.test_delete_database_value_error()
class TestGetDatabaseInformation():
"""
Test Class for get_database_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_database_information_all_params(self):
"""
get_database_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"cluster": {"n": 1, "q": 1, "r": 1, "w": 1}, "committed_update_seq": "committed_update_seq", "compact_running": false, "compacted_seq": "compacted_seq", "db_name": "db_name", "disk_format_version": 19, "doc_count": 0, "doc_del_count": 0, "engine": "engine", "props": {"partitioned": false}, "sizes": {"active": 6, "external": 8, "file": 4}, "update_seq": "update_seq", "uuid": "uuid"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.get_database_information(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_database_information_all_params_with_retries(self):
# Enable retries and run test_get_database_information_all_params.
_service.enable_retries()
self.test_get_database_information_all_params()
# Disable retries and run test_get_database_information_all_params.
_service.disable_retries()
self.test_get_database_information_all_params()
@responses.activate
def test_get_database_information_value_error(self):
"""
test_get_database_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"cluster": {"n": 1, "q": 1, "r": 1, "w": 1}, "committed_update_seq": "committed_update_seq", "compact_running": false, "compacted_seq": "compacted_seq", "db_name": "db_name", "disk_format_version": 19, "doc_count": 0, "doc_del_count": 0, "engine": "engine", "props": {"partitioned": false}, "sizes": {"active": 6, "external": 8, "file": 4}, "update_seq": "update_seq", "uuid": "uuid"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_database_information(**req_copy)
def test_get_database_information_value_error_with_retries(self):
# Enable retries and run test_get_database_information_value_error.
_service.enable_retries()
self.test_get_database_information_value_error()
# Disable retries and run test_get_database_information_value_error.
_service.disable_retries()
self.test_get_database_information_value_error()
class TestPutDatabase():
"""
Test Class for put_database
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_database_all_params(self):
"""
put_database()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
partitioned = False
q = 1
# Invoke method
response = _service.put_database(
db,
partitioned=partitioned,
q=q,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'partitioned={}'.format('true' if partitioned else 'false') in query_string
assert 'q={}'.format(q) in query_string
def test_put_database_all_params_with_retries(self):
# Enable retries and run test_put_database_all_params.
_service.enable_retries()
self.test_put_database_all_params()
# Disable retries and run test_put_database_all_params.
_service.disable_retries()
self.test_put_database_all_params()
@responses.activate
def test_put_database_required_params(self):
"""
test_put_database_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.put_database(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
def test_put_database_required_params_with_retries(self):
# Enable retries and run test_put_database_required_params.
_service.enable_retries()
self.test_put_database_required_params()
# Disable retries and run test_put_database_required_params.
_service.disable_retries()
self.test_put_database_required_params()
@responses.activate
def test_put_database_value_error(self):
"""
test_put_database_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_database(**req_copy)
def test_put_database_value_error_with_retries(self):
# Enable retries and run test_put_database_value_error.
_service.enable_retries()
self.test_put_database_value_error()
# Disable retries and run test_put_database_value_error.
_service.disable_retries()
self.test_put_database_value_error()
# endregion
##############################################################################
# End of Service: Databases
##############################################################################
##############################################################################
# Start of Service: Documents
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadDocument():
"""
Test Class for head_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_document_all_params(self):
"""
head_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
latest = False
rev = 'testString'
# Invoke method
response = _service.head_document(
db,
doc_id,
if_none_match=if_none_match,
latest=latest,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
def test_head_document_all_params_with_retries(self):
# Enable retries and run test_head_document_all_params.
_service.enable_retries()
self.test_head_document_all_params()
# Disable retries and run test_head_document_all_params.
_service.disable_retries()
self.test_head_document_all_params()
@responses.activate
def test_head_document_required_params(self):
"""
test_head_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.head_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_document_required_params_with_retries(self):
# Enable retries and run test_head_document_required_params.
_service.enable_retries()
self.test_head_document_required_params()
# Disable retries and run test_head_document_required_params.
_service.disable_retries()
self.test_head_document_required_params()
@responses.activate
def test_head_document_value_error(self):
"""
test_head_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_document(**req_copy)
def test_head_document_value_error_with_retries(self):
# Enable retries and run test_head_document_value_error.
_service.enable_retries()
self.test_head_document_value_error()
# Disable retries and run test_head_document_value_error.
_service.disable_retries()
self.test_head_document_value_error()
class TestPostDocument():
"""
Test Class for post_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_document_all_params(self):
"""
post_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
document = document_model
content_type = 'application/json'
batch = 'ok'
# Invoke method
response = _service.post_document(
db,
document,
content_type=content_type,
batch=batch,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_post_document_all_params_with_retries(self):
# Enable retries and run test_post_document_all_params.
_service.enable_retries()
self.test_post_document_all_params()
# Disable retries and run test_post_document_all_params.
_service.disable_retries()
self.test_post_document_all_params()
@responses.activate
def test_post_document_required_params(self):
"""
test_post_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
document = document_model
# Invoke method
response = _service.post_document(
db,
document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_post_document_required_params_with_retries(self):
# Enable retries and run test_post_document_required_params.
_service.enable_retries()
self.test_post_document_required_params()
# Disable retries and run test_post_document_required_params.
_service.disable_retries()
self.test_post_document_required_params()
@responses.activate
def test_post_document_value_error(self):
"""
test_post_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
document = document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"document": document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_document(**req_copy)
def test_post_document_value_error_with_retries(self):
# Enable retries and run test_post_document_value_error.
_service.enable_retries()
self.test_post_document_value_error()
# Disable retries and run test_post_document_value_error.
_service.disable_retries()
self.test_post_document_value_error()
class TestPostAllDocs():
"""
Test Class for post_all_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_all_docs_all_params(self):
"""
post_all_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 0
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = 'testString'
# Invoke method
response = _service.post_all_docs(
db,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == 'testString'
def test_post_all_docs_all_params_with_retries(self):
# Enable retries and run test_post_all_docs_all_params.
_service.enable_retries()
self.test_post_all_docs_all_params()
# Disable retries and run test_post_all_docs_all_params.
_service.disable_retries()
self.test_post_all_docs_all_params()
@responses.activate
def test_post_all_docs_value_error(self):
"""
test_post_all_docs_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 0
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_all_docs(**req_copy)
def test_post_all_docs_value_error_with_retries(self):
# Enable retries and run test_post_all_docs_value_error.
_service.enable_retries()
self.test_post_all_docs_value_error()
# Disable retries and run test_post_all_docs_value_error.
_service.disable_retries()
self.test_post_all_docs_value_error()
class TestPostAllDocsAsStream():
"""
Test Class for post_all_docs_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_all_docs_as_stream_all_params(self):
"""
post_all_docs_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Invoke method
response = _service.post_all_docs_as_stream(
db,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_all_docs_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_all_docs_as_stream_all_params.
_service.enable_retries()
self.test_post_all_docs_as_stream_all_params()
# Disable retries and run test_post_all_docs_as_stream_all_params.
_service.disable_retries()
self.test_post_all_docs_as_stream_all_params()
@responses.activate
def test_post_all_docs_as_stream_value_error(self):
"""
test_post_all_docs_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_all_docs_as_stream(**req_copy)
def test_post_all_docs_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_all_docs_as_stream_value_error.
_service.enable_retries()
self.test_post_all_docs_as_stream_value_error()
# Disable retries and run test_post_all_docs_as_stream_value_error.
_service.disable_retries()
self.test_post_all_docs_as_stream_value_error()
class TestPostAllDocsQueries():
"""
Test Class for post_all_docs_queries
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_all_docs_queries_all_params(self):
"""
post_all_docs_queries()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['testString']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Invoke method
response = _service.post_all_docs_queries(
db,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [all_docs_query_model]
def test_post_all_docs_queries_all_params_with_retries(self):
# Enable retries and run test_post_all_docs_queries_all_params.
_service.enable_retries()
self.test_post_all_docs_queries_all_params()
# Disable retries and run test_post_all_docs_queries_all_params.
_service.disable_retries()
self.test_post_all_docs_queries_all_params()
@responses.activate
def test_post_all_docs_queries_value_error(self):
"""
test_post_all_docs_queries_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['testString']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_all_docs_queries(**req_copy)
def test_post_all_docs_queries_value_error_with_retries(self):
# Enable retries and run test_post_all_docs_queries_value_error.
_service.enable_retries()
self.test_post_all_docs_queries_value_error()
# Disable retries and run test_post_all_docs_queries_value_error.
_service.disable_retries()
self.test_post_all_docs_queries_value_error()
class TestPostAllDocsQueriesAsStream():
"""
Test Class for post_all_docs_queries_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_all_docs_queries_as_stream_all_params(self):
"""
post_all_docs_queries_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs/queries')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Invoke method
response = _service.post_all_docs_queries_as_stream(
db,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [all_docs_query_model]
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_all_docs_queries_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_all_docs_queries_as_stream_all_params.
_service.enable_retries()
self.test_post_all_docs_queries_as_stream_all_params()
# Disable retries and run test_post_all_docs_queries_as_stream_all_params.
_service.disable_retries()
self.test_post_all_docs_queries_as_stream_all_params()
@responses.activate
def test_post_all_docs_queries_as_stream_value_error(self):
"""
test_post_all_docs_queries_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_all_docs/queries')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_all_docs_queries_as_stream(**req_copy)
def test_post_all_docs_queries_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_all_docs_queries_as_stream_value_error.
_service.enable_retries()
self.test_post_all_docs_queries_as_stream_value_error()
# Disable retries and run test_post_all_docs_queries_as_stream_value_error.
_service.disable_retries()
self.test_post_all_docs_queries_as_stream_value_error()
class TestPostBulkDocs():
"""
Test Class for post_bulk_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_docs_all_params(self):
"""
post_bulk_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_docs')
mock_response = '[{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a dict representation of a BulkDocs model
bulk_docs_model = {}
bulk_docs_model['docs'] = [document_model]
bulk_docs_model['new_edits'] = True
# Set up parameter values
db = 'testString'
bulk_docs = bulk_docs_model
# Invoke method
response = _service.post_bulk_docs(
db,
bulk_docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == bulk_docs
def test_post_bulk_docs_all_params_with_retries(self):
# Enable retries and run test_post_bulk_docs_all_params.
_service.enable_retries()
self.test_post_bulk_docs_all_params()
# Disable retries and run test_post_bulk_docs_all_params.
_service.disable_retries()
self.test_post_bulk_docs_all_params()
@responses.activate
def test_post_bulk_docs_value_error(self):
"""
test_post_bulk_docs_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_docs')
mock_response = '[{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}]'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a dict representation of a BulkDocs model
bulk_docs_model = {}
bulk_docs_model['docs'] = [document_model]
bulk_docs_model['new_edits'] = True
# Set up parameter values
db = 'testString'
bulk_docs = bulk_docs_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"bulk_docs": bulk_docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_docs(**req_copy)
def test_post_bulk_docs_value_error_with_retries(self):
# Enable retries and run test_post_bulk_docs_value_error.
_service.enable_retries()
self.test_post_bulk_docs_value_error()
# Disable retries and run test_post_bulk_docs_value_error.
_service.disable_retries()
self.test_post_bulk_docs_value_error()
class TestPostBulkGet():
"""
Test Class for post_bulk_get
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_get_all_params(self):
"""
post_bulk_get()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"results": [{"docs": [{"error": {"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}, "ok": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}}], "id": "id"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'testString'
bulk_get_query_document_model['rev'] = 'testString'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
attachments = False
att_encoding_info = False
latest = False
revs = False
# Invoke method
response = _service.post_bulk_get(
db,
docs,
attachments=attachments,
att_encoding_info=att_encoding_info,
latest=latest,
revs=revs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_all_params_with_retries(self):
# Enable retries and run test_post_bulk_get_all_params.
_service.enable_retries()
self.test_post_bulk_get_all_params()
# Disable retries and run test_post_bulk_get_all_params.
_service.disable_retries()
self.test_post_bulk_get_all_params()
@responses.activate
def test_post_bulk_get_required_params(self):
"""
test_post_bulk_get_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"results": [{"docs": [{"error": {"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}, "ok": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}}], "id": "id"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'testString'
bulk_get_query_document_model['rev'] = 'testString'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Invoke method
response = _service.post_bulk_get(
db,
docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_required_params_with_retries(self):
# Enable retries and run test_post_bulk_get_required_params.
_service.enable_retries()
self.test_post_bulk_get_required_params()
# Disable retries and run test_post_bulk_get_required_params.
_service.disable_retries()
self.test_post_bulk_get_required_params()
@responses.activate
def test_post_bulk_get_value_error(self):
"""
test_post_bulk_get_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"results": [{"docs": [{"error": {"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}, "ok": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}}], "id": "id"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'testString'
bulk_get_query_document_model['rev'] = 'testString'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"docs": docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_get(**req_copy)
def test_post_bulk_get_value_error_with_retries(self):
# Enable retries and run test_post_bulk_get_value_error.
_service.enable_retries()
self.test_post_bulk_get_value_error()
# Disable retries and run test_post_bulk_get_value_error.
_service.disable_retries()
self.test_post_bulk_get_value_error()
class TestPostBulkGetAsMixed():
"""
Test Class for post_bulk_get_as_mixed
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_get_as_mixed_all_params(self):
"""
post_bulk_get_as_mixed()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
attachments = False
att_encoding_info = False
latest = False
revs = False
# Invoke method
response = _service.post_bulk_get_as_mixed(
db,
docs,
attachments=attachments,
att_encoding_info=att_encoding_info,
latest=latest,
revs=revs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_as_mixed_all_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_mixed_all_params.
_service.enable_retries()
self.test_post_bulk_get_as_mixed_all_params()
# Disable retries and run test_post_bulk_get_as_mixed_all_params.
_service.disable_retries()
self.test_post_bulk_get_as_mixed_all_params()
@responses.activate
def test_post_bulk_get_as_mixed_required_params(self):
"""
test_post_bulk_get_as_mixed_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Invoke method
response = _service.post_bulk_get_as_mixed(
db,
docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_as_mixed_required_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_mixed_required_params.
_service.enable_retries()
self.test_post_bulk_get_as_mixed_required_params()
# Disable retries and run test_post_bulk_get_as_mixed_required_params.
_service.disable_retries()
self.test_post_bulk_get_as_mixed_required_params()
@responses.activate
def test_post_bulk_get_as_mixed_value_error(self):
"""
test_post_bulk_get_as_mixed_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"docs": docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_get_as_mixed(**req_copy)
def test_post_bulk_get_as_mixed_value_error_with_retries(self):
# Enable retries and run test_post_bulk_get_as_mixed_value_error.
_service.enable_retries()
self.test_post_bulk_get_as_mixed_value_error()
# Disable retries and run test_post_bulk_get_as_mixed_value_error.
_service.disable_retries()
self.test_post_bulk_get_as_mixed_value_error()
class TestPostBulkGetAsRelated():
"""
Test Class for post_bulk_get_as_related
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_get_as_related_all_params(self):
"""
post_bulk_get_as_related()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
attachments = False
att_encoding_info = False
latest = False
revs = False
# Invoke method
response = _service.post_bulk_get_as_related(
db,
docs,
attachments=attachments,
att_encoding_info=att_encoding_info,
latest=latest,
revs=revs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_as_related_all_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_related_all_params.
_service.enable_retries()
self.test_post_bulk_get_as_related_all_params()
# Disable retries and run test_post_bulk_get_as_related_all_params.
_service.disable_retries()
self.test_post_bulk_get_as_related_all_params()
@responses.activate
def test_post_bulk_get_as_related_required_params(self):
"""
test_post_bulk_get_as_related_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Invoke method
response = _service.post_bulk_get_as_related(
db,
docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
def test_post_bulk_get_as_related_required_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_related_required_params.
_service.enable_retries()
self.test_post_bulk_get_as_related_required_params()
# Disable retries and run test_post_bulk_get_as_related_required_params.
_service.disable_retries()
self.test_post_bulk_get_as_related_required_params()
@responses.activate
def test_post_bulk_get_as_related_value_error(self):
"""
test_post_bulk_get_as_related_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = 'This is a mock binary response.'
responses.add(responses.POST,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"docs": docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_get_as_related(**req_copy)
def test_post_bulk_get_as_related_value_error_with_retries(self):
# Enable retries and run test_post_bulk_get_as_related_value_error.
_service.enable_retries()
self.test_post_bulk_get_as_related_value_error()
# Disable retries and run test_post_bulk_get_as_related_value_error.
_service.disable_retries()
self.test_post_bulk_get_as_related_value_error()
class TestPostBulkGetAsStream():
"""
Test Class for post_bulk_get_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_bulk_get_as_stream_all_params(self):
"""
post_bulk_get_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
attachments = False
att_encoding_info = False
latest = False
revs = False
# Invoke method
response = _service.post_bulk_get_as_stream(
db,
docs,
attachments=attachments,
att_encoding_info=att_encoding_info,
latest=latest,
revs=revs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_bulk_get_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_stream_all_params.
_service.enable_retries()
self.test_post_bulk_get_as_stream_all_params()
# Disable retries and run test_post_bulk_get_as_stream_all_params.
_service.disable_retries()
self.test_post_bulk_get_as_stream_all_params()
@responses.activate
def test_post_bulk_get_as_stream_required_params(self):
"""
test_post_bulk_get_as_stream_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Invoke method
response = _service.post_bulk_get_as_stream(
db,
docs,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['docs'] == [bulk_get_query_document_model]
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_bulk_get_as_stream_required_params_with_retries(self):
# Enable retries and run test_post_bulk_get_as_stream_required_params.
_service.enable_retries()
self.test_post_bulk_get_as_stream_required_params()
# Disable retries and run test_post_bulk_get_as_stream_required_params.
_service.disable_retries()
self.test_post_bulk_get_as_stream_required_params()
@responses.activate
def test_post_bulk_get_as_stream_value_error(self):
"""
test_post_bulk_get_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_bulk_get')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a BulkGetQueryDocument model
bulk_get_query_document_model = {}
bulk_get_query_document_model['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model['id'] = 'order00067'
bulk_get_query_document_model['rev'] = '3-917fa2381192822767f010b95b45325b'
# Set up parameter values
db = 'testString'
docs = [bulk_get_query_document_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"docs": docs,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_bulk_get_as_stream(**req_copy)
def test_post_bulk_get_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_bulk_get_as_stream_value_error.
_service.enable_retries()
self.test_post_bulk_get_as_stream_value_error()
# Disable retries and run test_post_bulk_get_as_stream_value_error.
_service.disable_retries()
self.test_post_bulk_get_as_stream_value_error()
class TestDeleteDocument():
"""
Test Class for delete_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_document_all_params(self):
"""
delete_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_match = 'testString'
batch = 'ok'
rev = 'testString'
# Invoke method
response = _service.delete_document(
db,
doc_id,
if_match=if_match,
batch=batch,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'rev={}'.format(rev) in query_string
def test_delete_document_all_params_with_retries(self):
# Enable retries and run test_delete_document_all_params.
_service.enable_retries()
self.test_delete_document_all_params()
# Disable retries and run test_delete_document_all_params.
_service.disable_retries()
self.test_delete_document_all_params()
@responses.activate
def test_delete_document_required_params(self):
"""
test_delete_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.delete_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_document_required_params_with_retries(self):
# Enable retries and run test_delete_document_required_params.
_service.enable_retries()
self.test_delete_document_required_params()
# Disable retries and run test_delete_document_required_params.
_service.disable_retries()
self.test_delete_document_required_params()
@responses.activate
def test_delete_document_value_error(self):
"""
test_delete_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_document(**req_copy)
def test_delete_document_value_error_with_retries(self):
# Enable retries and run test_delete_document_value_error.
_service.enable_retries()
self.test_delete_document_value_error()
# Disable retries and run test_delete_document_value_error.
_service.disable_retries()
self.test_delete_document_value_error()
class TestGetDocument():
"""
Test Class for get_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_all_params(self):
"""
get_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_document(
db,
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_document_all_params_with_retries(self):
# Enable retries and run test_get_document_all_params.
_service.enable_retries()
self.test_get_document_all_params()
# Disable retries and run test_get_document_all_params.
_service.disable_retries()
self.test_get_document_all_params()
@responses.activate
def test_get_document_required_params(self):
"""
test_get_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_document_required_params_with_retries(self):
# Enable retries and run test_get_document_required_params.
_service.enable_retries()
self.test_get_document_required_params()
# Disable retries and run test_get_document_required_params.
_service.disable_retries()
self.test_get_document_required_params()
@responses.activate
def test_get_document_value_error(self):
"""
test_get_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document(**req_copy)
def test_get_document_value_error_with_retries(self):
# Enable retries and run test_get_document_value_error.
_service.enable_retries()
self.test_get_document_value_error()
# Disable retries and run test_get_document_value_error.
_service.disable_retries()
self.test_get_document_value_error()
class TestGetDocumentAsMixed():
"""
Test Class for get_document_as_mixed
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_as_mixed_all_params(self):
"""
get_document_as_mixed()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_document_as_mixed(
db,
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_document_as_mixed_all_params_with_retries(self):
# Enable retries and run test_get_document_as_mixed_all_params.
_service.enable_retries()
self.test_get_document_as_mixed_all_params()
# Disable retries and run test_get_document_as_mixed_all_params.
_service.disable_retries()
self.test_get_document_as_mixed_all_params()
@responses.activate
def test_get_document_as_mixed_required_params(self):
"""
test_get_document_as_mixed_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document_as_mixed(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_document_as_mixed_required_params_with_retries(self):
# Enable retries and run test_get_document_as_mixed_required_params.
_service.enable_retries()
self.test_get_document_as_mixed_required_params()
# Disable retries and run test_get_document_as_mixed_required_params.
_service.disable_retries()
self.test_get_document_as_mixed_required_params()
@responses.activate
def test_get_document_as_mixed_value_error(self):
"""
test_get_document_as_mixed_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/mixed',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document_as_mixed(**req_copy)
def test_get_document_as_mixed_value_error_with_retries(self):
# Enable retries and run test_get_document_as_mixed_value_error.
_service.enable_retries()
self.test_get_document_as_mixed_value_error()
# Disable retries and run test_get_document_as_mixed_value_error.
_service.disable_retries()
self.test_get_document_as_mixed_value_error()
class TestGetDocumentAsRelated():
"""
Test Class for get_document_as_related
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_as_related_all_params(self):
"""
get_document_as_related()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_document_as_related(
db,
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_document_as_related_all_params_with_retries(self):
# Enable retries and run test_get_document_as_related_all_params.
_service.enable_retries()
self.test_get_document_as_related_all_params()
# Disable retries and run test_get_document_as_related_all_params.
_service.disable_retries()
self.test_get_document_as_related_all_params()
@responses.activate
def test_get_document_as_related_required_params(self):
"""
test_get_document_as_related_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document_as_related(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_document_as_related_required_params_with_retries(self):
# Enable retries and run test_get_document_as_related_required_params.
_service.enable_retries()
self.test_get_document_as_related_required_params()
# Disable retries and run test_get_document_as_related_required_params.
_service.disable_retries()
self.test_get_document_as_related_required_params()
@responses.activate
def test_get_document_as_related_value_error(self):
"""
test_get_document_as_related_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='multipart/related',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document_as_related(**req_copy)
def test_get_document_as_related_value_error_with_retries(self):
# Enable retries and run test_get_document_as_related_value_error.
_service.enable_retries()
self.test_get_document_as_related_value_error()
# Disable retries and run test_get_document_as_related_value_error.
_service.disable_retries()
self.test_get_document_as_related_value_error()
class TestGetDocumentAsStream():
"""
Test Class for get_document_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_as_stream_all_params(self):
"""
get_document_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_document_as_stream(
db,
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_get_document_as_stream_all_params_with_retries(self):
# Enable retries and run test_get_document_as_stream_all_params.
_service.enable_retries()
self.test_get_document_as_stream_all_params()
# Disable retries and run test_get_document_as_stream_all_params.
_service.disable_retries()
self.test_get_document_as_stream_all_params()
@responses.activate
def test_get_document_as_stream_required_params(self):
"""
test_get_document_as_stream_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document_as_stream(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_get_document_as_stream_required_params_with_retries(self):
# Enable retries and run test_get_document_as_stream_required_params.
_service.enable_retries()
self.test_get_document_as_stream_required_params()
# Disable retries and run test_get_document_as_stream_required_params.
_service.disable_retries()
self.test_get_document_as_stream_required_params()
@responses.activate
def test_get_document_as_stream_value_error(self):
"""
test_get_document_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document_as_stream(**req_copy)
def test_get_document_as_stream_value_error_with_retries(self):
# Enable retries and run test_get_document_as_stream_value_error.
_service.enable_retries()
self.test_get_document_as_stream_value_error()
# Disable retries and run test_get_document_as_stream_value_error.
_service.disable_retries()
self.test_get_document_as_stream_value_error()
class TestPutDocument():
"""
Test Class for put_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_document_all_params(self):
"""
put_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
content_type = 'application/json'
if_match = 'testString'
batch = 'ok'
new_edits = False
rev = 'testString'
# Invoke method
response = _service.put_document(
db,
doc_id,
document,
content_type=content_type,
if_match=if_match,
batch=batch,
new_edits=new_edits,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'new_edits={}'.format('true' if new_edits else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_document_all_params_with_retries(self):
# Enable retries and run test_put_document_all_params.
_service.enable_retries()
self.test_put_document_all_params()
# Disable retries and run test_put_document_all_params.
_service.disable_retries()
self.test_put_document_all_params()
@responses.activate
def test_put_document_required_params(self):
"""
test_put_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
# Invoke method
response = _service.put_document(
db,
doc_id,
document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_document_required_params_with_retries(self):
# Enable retries and run test_put_document_required_params.
_service.enable_retries()
self.test_put_document_required_params()
# Disable retries and run test_put_document_required_params.
_service.disable_retries()
self.test_put_document_required_params()
@responses.activate
def test_put_document_value_error(self):
"""
test_put_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"document": document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_document(**req_copy)
def test_put_document_value_error_with_retries(self):
# Enable retries and run test_put_document_value_error.
_service.enable_retries()
self.test_put_document_value_error()
# Disable retries and run test_put_document_value_error.
_service.disable_retries()
self.test_put_document_value_error()
# endregion
##############################################################################
# End of Service: Documents
##############################################################################
##############################################################################
# Start of Service: DesignDocuments
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadDesignDocument():
"""
Test Class for head_design_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_design_document_all_params(self):
"""
head_design_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
if_none_match = 'testString'
# Invoke method
response = _service.head_design_document(
db,
ddoc,
if_none_match=if_none_match,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_design_document_all_params_with_retries(self):
# Enable retries and run test_head_design_document_all_params.
_service.enable_retries()
self.test_head_design_document_all_params()
# Disable retries and run test_head_design_document_all_params.
_service.disable_retries()
self.test_head_design_document_all_params()
@responses.activate
def test_head_design_document_required_params(self):
"""
test_head_design_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Invoke method
response = _service.head_design_document(
db,
ddoc,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_design_document_required_params_with_retries(self):
# Enable retries and run test_head_design_document_required_params.
_service.enable_retries()
self.test_head_design_document_required_params()
# Disable retries and run test_head_design_document_required_params.
_service.disable_retries()
self.test_head_design_document_required_params()
@responses.activate
def test_head_design_document_value_error(self):
"""
test_head_design_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_design_document(**req_copy)
def test_head_design_document_value_error_with_retries(self):
# Enable retries and run test_head_design_document_value_error.
_service.enable_retries()
self.test_head_design_document_value_error()
# Disable retries and run test_head_design_document_value_error.
_service.disable_retries()
self.test_head_design_document_value_error()
class TestDeleteDesignDocument():
"""
Test Class for delete_design_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_design_document_all_params(self):
"""
delete_design_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
if_match = 'testString'
batch = 'ok'
rev = 'testString'
# Invoke method
response = _service.delete_design_document(
db,
ddoc,
if_match=if_match,
batch=batch,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'rev={}'.format(rev) in query_string
def test_delete_design_document_all_params_with_retries(self):
# Enable retries and run test_delete_design_document_all_params.
_service.enable_retries()
self.test_delete_design_document_all_params()
# Disable retries and run test_delete_design_document_all_params.
_service.disable_retries()
self.test_delete_design_document_all_params()
@responses.activate
def test_delete_design_document_required_params(self):
"""
test_delete_design_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Invoke method
response = _service.delete_design_document(
db,
ddoc,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_design_document_required_params_with_retries(self):
# Enable retries and run test_delete_design_document_required_params.
_service.enable_retries()
self.test_delete_design_document_required_params()
# Disable retries and run test_delete_design_document_required_params.
_service.disable_retries()
self.test_delete_design_document_required_params()
@responses.activate
def test_delete_design_document_value_error(self):
"""
test_delete_design_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_design_document(**req_copy)
def test_delete_design_document_value_error_with_retries(self):
# Enable retries and run test_delete_design_document_value_error.
_service.enable_retries()
self.test_delete_design_document_value_error()
# Disable retries and run test_delete_design_document_value_error.
_service.disable_retries()
self.test_delete_design_document_value_error()
class TestGetDesignDocument():
"""
Test Class for get_design_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_design_document_all_params(self):
"""
get_design_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "autoupdate": true, "filters": {"mapKey": "inner"}, "indexes": {"mapKey": {"analyzer": {"name": "classic", "stopwords": ["stopwords"], "fields": {"mapKey": {"name": "classic", "stopwords": ["stopwords"]}}}, "index": "index"}}, "language": "javascript", "options": {"partitioned": false}, "validate_doc_update": "validate_doc_update", "views": {"mapKey": {"map": "map", "reduce": "reduce"}}, "st_indexes": {"mapKey": {"index": "index"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_design_document(
db,
ddoc,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_design_document_all_params_with_retries(self):
# Enable retries and run test_get_design_document_all_params.
_service.enable_retries()
self.test_get_design_document_all_params()
# Disable retries and run test_get_design_document_all_params.
_service.disable_retries()
self.test_get_design_document_all_params()
@responses.activate
def test_get_design_document_required_params(self):
"""
test_get_design_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "autoupdate": true, "filters": {"mapKey": "inner"}, "indexes": {"mapKey": {"analyzer": {"name": "classic", "stopwords": ["stopwords"], "fields": {"mapKey": {"name": "classic", "stopwords": ["stopwords"]}}}, "index": "index"}}, "language": "javascript", "options": {"partitioned": false}, "validate_doc_update": "validate_doc_update", "views": {"mapKey": {"map": "map", "reduce": "reduce"}}, "st_indexes": {"mapKey": {"index": "index"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Invoke method
response = _service.get_design_document(
db,
ddoc,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_design_document_required_params_with_retries(self):
# Enable retries and run test_get_design_document_required_params.
_service.enable_retries()
self.test_get_design_document_required_params()
# Disable retries and run test_get_design_document_required_params.
_service.disable_retries()
self.test_get_design_document_required_params()
@responses.activate
def test_get_design_document_value_error(self):
"""
test_get_design_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "autoupdate": true, "filters": {"mapKey": "inner"}, "indexes": {"mapKey": {"analyzer": {"name": "classic", "stopwords": ["stopwords"], "fields": {"mapKey": {"name": "classic", "stopwords": ["stopwords"]}}}, "index": "index"}}, "language": "javascript", "options": {"partitioned": false}, "validate_doc_update": "validate_doc_update", "views": {"mapKey": {"map": "map", "reduce": "reduce"}}, "st_indexes": {"mapKey": {"index": "index"}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_design_document(**req_copy)
def test_get_design_document_value_error_with_retries(self):
# Enable retries and run test_get_design_document_value_error.
_service.enable_retries()
self.test_get_design_document_value_error()
# Disable retries and run test_get_design_document_value_error.
_service.disable_retries()
self.test_get_design_document_value_error()
class TestPutDesignDocument():
"""
Test Class for put_design_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_design_document_all_params(self):
"""
put_design_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a AnalyzerConfiguration model
analyzer_configuration_model = {}
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
# Construct a dict representation of a SearchIndexDefinition model
search_index_definition_model = {}
search_index_definition_model['analyzer'] = analyzer_configuration_model
search_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocumentOptions model
design_document_options_model = {}
design_document_options_model['partitioned'] = True
# Construct a dict representation of a DesignDocumentViewsMapReduce model
design_document_views_map_reduce_model = {}
design_document_views_map_reduce_model['map'] = 'testString'
design_document_views_map_reduce_model['reduce'] = 'testString'
# Construct a dict representation of a GeoIndexDefinition model
geo_index_definition_model = {}
geo_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocument model
design_document_model = {}
design_document_model['_attachments'] = {}
design_document_model['_conflicts'] = ['testString']
design_document_model['_deleted'] = True
design_document_model['_deleted_conflicts'] = ['testString']
design_document_model['_id'] = 'testString'
design_document_model['_local_seq'] = 'testString'
design_document_model['_rev'] = 'testString'
design_document_model['_revisions'] = revisions_model
design_document_model['_revs_info'] = [document_revision_status_model]
design_document_model['autoupdate'] = True
design_document_model['filters'] = {}
design_document_model['indexes'] = {}
design_document_model['language'] = 'javascript'
design_document_model['options'] = design_document_options_model
design_document_model['validate_doc_update'] = 'testString'
design_document_model['views'] = {}
design_document_model['st_indexes'] = {}
design_document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
design_document = design_document_model
if_match = 'testString'
batch = 'ok'
new_edits = False
rev = 'testString'
# Invoke method
response = _service.put_design_document(
db,
ddoc,
design_document,
if_match=if_match,
batch=batch,
new_edits=new_edits,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'new_edits={}'.format('true' if new_edits else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == design_document
def test_put_design_document_all_params_with_retries(self):
# Enable retries and run test_put_design_document_all_params.
_service.enable_retries()
self.test_put_design_document_all_params()
# Disable retries and run test_put_design_document_all_params.
_service.disable_retries()
self.test_put_design_document_all_params()
@responses.activate
def test_put_design_document_required_params(self):
"""
test_put_design_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a AnalyzerConfiguration model
analyzer_configuration_model = {}
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
# Construct a dict representation of a SearchIndexDefinition model
search_index_definition_model = {}
search_index_definition_model['analyzer'] = analyzer_configuration_model
search_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocumentOptions model
design_document_options_model = {}
design_document_options_model['partitioned'] = True
# Construct a dict representation of a DesignDocumentViewsMapReduce model
design_document_views_map_reduce_model = {}
design_document_views_map_reduce_model['map'] = 'testString'
design_document_views_map_reduce_model['reduce'] = 'testString'
# Construct a dict representation of a GeoIndexDefinition model
geo_index_definition_model = {}
geo_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocument model
design_document_model = {}
design_document_model['_attachments'] = {}
design_document_model['_conflicts'] = ['testString']
design_document_model['_deleted'] = True
design_document_model['_deleted_conflicts'] = ['testString']
design_document_model['_id'] = 'testString'
design_document_model['_local_seq'] = 'testString'
design_document_model['_rev'] = 'testString'
design_document_model['_revisions'] = revisions_model
design_document_model['_revs_info'] = [document_revision_status_model]
design_document_model['autoupdate'] = True
design_document_model['filters'] = {}
design_document_model['indexes'] = {}
design_document_model['language'] = 'javascript'
design_document_model['options'] = design_document_options_model
design_document_model['validate_doc_update'] = 'testString'
design_document_model['views'] = {}
design_document_model['st_indexes'] = {}
design_document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
design_document = design_document_model
# Invoke method
response = _service.put_design_document(
db,
ddoc,
design_document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == design_document
def test_put_design_document_required_params_with_retries(self):
# Enable retries and run test_put_design_document_required_params.
_service.enable_retries()
self.test_put_design_document_required_params()
# Disable retries and run test_put_design_document_required_params.
_service.disable_retries()
self.test_put_design_document_required_params()
@responses.activate
def test_put_design_document_value_error(self):
"""
test_put_design_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a AnalyzerConfiguration model
analyzer_configuration_model = {}
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
# Construct a dict representation of a SearchIndexDefinition model
search_index_definition_model = {}
search_index_definition_model['analyzer'] = analyzer_configuration_model
search_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocumentOptions model
design_document_options_model = {}
design_document_options_model['partitioned'] = True
# Construct a dict representation of a DesignDocumentViewsMapReduce model
design_document_views_map_reduce_model = {}
design_document_views_map_reduce_model['map'] = 'testString'
design_document_views_map_reduce_model['reduce'] = 'testString'
# Construct a dict representation of a GeoIndexDefinition model
geo_index_definition_model = {}
geo_index_definition_model['index'] = 'testString'
# Construct a dict representation of a DesignDocument model
design_document_model = {}
design_document_model['_attachments'] = {}
design_document_model['_conflicts'] = ['testString']
design_document_model['_deleted'] = True
design_document_model['_deleted_conflicts'] = ['testString']
design_document_model['_id'] = 'testString'
design_document_model['_local_seq'] = 'testString'
design_document_model['_rev'] = 'testString'
design_document_model['_revisions'] = revisions_model
design_document_model['_revs_info'] = [document_revision_status_model]
design_document_model['autoupdate'] = True
design_document_model['filters'] = {}
design_document_model['indexes'] = {}
design_document_model['language'] = 'javascript'
design_document_model['options'] = design_document_options_model
design_document_model['validate_doc_update'] = 'testString'
design_document_model['views'] = {}
design_document_model['st_indexes'] = {}
design_document_model['foo'] = 'testString'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
design_document = design_document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"design_document": design_document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_design_document(**req_copy)
def test_put_design_document_value_error_with_retries(self):
# Enable retries and run test_put_design_document_value_error.
_service.enable_retries()
self.test_put_design_document_value_error()
# Disable retries and run test_put_design_document_value_error.
_service.disable_retries()
self.test_put_design_document_value_error()
class TestGetDesignDocumentInformation():
"""
Test Class for get_design_document_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_design_document_information_all_params(self):
"""
get_design_document_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_info')
mock_response = '{"name": "name", "view_index": {"compact_running": false, "language": "language", "signature": "signature", "sizes": {"active": 6, "external": 8, "file": 4}, "updater_running": false, "waiting_clients": 0, "waiting_commit": true}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Invoke method
response = _service.get_design_document_information(
db,
ddoc,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_design_document_information_all_params_with_retries(self):
# Enable retries and run test_get_design_document_information_all_params.
_service.enable_retries()
self.test_get_design_document_information_all_params()
# Disable retries and run test_get_design_document_information_all_params.
_service.disable_retries()
self.test_get_design_document_information_all_params()
@responses.activate
def test_get_design_document_information_value_error(self):
"""
test_get_design_document_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_info')
mock_response = '{"name": "name", "view_index": {"compact_running": false, "language": "language", "signature": "signature", "sizes": {"active": 6, "external": 8, "file": 4}, "updater_running": false, "waiting_clients": 0, "waiting_commit": true}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_design_document_information(**req_copy)
def test_get_design_document_information_value_error_with_retries(self):
# Enable retries and run test_get_design_document_information_value_error.
_service.enable_retries()
self.test_get_design_document_information_value_error()
# Disable retries and run test_get_design_document_information_value_error.
_service.disable_retries()
self.test_get_design_document_information_value_error()
class TestPostDesignDocs():
"""
Test Class for post_design_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_design_docs_all_params(self):
"""
post_design_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
accept = 'application/json'
# Invoke method
response = _service.post_design_docs(
db,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
accept=accept,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
def test_post_design_docs_all_params_with_retries(self):
# Enable retries and run test_post_design_docs_all_params.
_service.enable_retries()
self.test_post_design_docs_all_params()
# Disable retries and run test_post_design_docs_all_params.
_service.disable_retries()
self.test_post_design_docs_all_params()
@responses.activate
def test_post_design_docs_required_params(self):
"""
test_post_design_docs_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Invoke method
response = _service.post_design_docs(
db,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
def test_post_design_docs_required_params_with_retries(self):
# Enable retries and run test_post_design_docs_required_params.
_service.enable_retries()
self.test_post_design_docs_required_params()
# Disable retries and run test_post_design_docs_required_params.
_service.disable_retries()
self.test_post_design_docs_required_params()
@responses.activate
def test_post_design_docs_value_error(self):
"""
test_post_design_docs_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_design_docs(**req_copy)
def test_post_design_docs_value_error_with_retries(self):
# Enable retries and run test_post_design_docs_value_error.
_service.enable_retries()
self.test_post_design_docs_value_error()
# Disable retries and run test_post_design_docs_value_error.
_service.disable_retries()
self.test_post_design_docs_value_error()
class TestPostDesignDocsQueries():
"""
Test Class for post_design_docs_queries
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_design_docs_queries_all_params(self):
"""
post_design_docs_queries()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
accept = 'application/json'
# Invoke method
response = _service.post_design_docs_queries(
db,
queries,
accept=accept,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [all_docs_query_model]
def test_post_design_docs_queries_all_params_with_retries(self):
# Enable retries and run test_post_design_docs_queries_all_params.
_service.enable_retries()
self.test_post_design_docs_queries_all_params()
# Disable retries and run test_post_design_docs_queries_all_params.
_service.disable_retries()
self.test_post_design_docs_queries_all_params()
@responses.activate
def test_post_design_docs_queries_required_params(self):
"""
test_post_design_docs_queries_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Invoke method
response = _service.post_design_docs_queries(
db,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [all_docs_query_model]
def test_post_design_docs_queries_required_params_with_retries(self):
# Enable retries and run test_post_design_docs_queries_required_params.
_service.enable_retries()
self.test_post_design_docs_queries_required_params()
# Disable retries and run test_post_design_docs_queries_required_params.
_service.disable_retries()
self.test_post_design_docs_queries_required_params()
@responses.activate
def test_post_design_docs_queries_value_error(self):
"""
test_post_design_docs_queries_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design_docs/queries')
mock_response = '{"results": [{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a AllDocsQuery model
all_docs_query_model = {}
all_docs_query_model['att_encoding_info'] = False
all_docs_query_model['attachments'] = False
all_docs_query_model['conflicts'] = False
all_docs_query_model['descending'] = False
all_docs_query_model['include_docs'] = False
all_docs_query_model['inclusive_end'] = True
all_docs_query_model['limit'] = 0
all_docs_query_model['skip'] = 0
all_docs_query_model['update_seq'] = False
all_docs_query_model['endkey'] = 'testString'
all_docs_query_model['key'] = 'testString'
all_docs_query_model['keys'] = ['small-appliances:1000042', 'small-appliances:1000043']
all_docs_query_model['startkey'] = 'testString'
# Set up parameter values
db = 'testString'
queries = [all_docs_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_design_docs_queries(**req_copy)
def test_post_design_docs_queries_value_error_with_retries(self):
# Enable retries and run test_post_design_docs_queries_value_error.
_service.enable_retries()
self.test_post_design_docs_queries_value_error()
# Disable retries and run test_post_design_docs_queries_value_error.
_service.disable_retries()
self.test_post_design_docs_queries_value_error()
# endregion
##############################################################################
# End of Service: DesignDocuments
##############################################################################
##############################################################################
# Start of Service: Views
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestPostView():
"""
Test Class for post_view
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_view_all_params(self):
"""
post_view()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString')
mock_response = '{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 0
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['testString']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Invoke method
response = _service.post_view(
db,
ddoc,
view,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
endkey_docid=endkey_docid,
group=group,
group_level=group_level,
key=key,
keys=keys,
reduce=reduce,
stable=stable,
startkey=startkey,
startkey_docid=startkey_docid,
update=update,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['endkey_docid'] == 'testString'
assert req_body['group'] == False
assert req_body['group_level'] == 1
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['reduce'] == True
assert req_body['stable'] == False
assert req_body['startkey'] == 'testString'
assert req_body['startkey_docid'] == 'testString'
assert req_body['update'] == 'true'
def test_post_view_all_params_with_retries(self):
# Enable retries and run test_post_view_all_params.
_service.enable_retries()
self.test_post_view_all_params()
# Disable retries and run test_post_view_all_params.
_service.disable_retries()
self.test_post_view_all_params()
@responses.activate
def test_post_view_value_error(self):
"""
test_post_view_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString')
mock_response = '{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 0
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['testString']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"view": view,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_view(**req_copy)
def test_post_view_value_error_with_retries(self):
# Enable retries and run test_post_view_value_error.
_service.enable_retries()
self.test_post_view_value_error()
# Disable retries and run test_post_view_value_error.
_service.disable_retries()
self.test_post_view_value_error()
class TestPostViewAsStream():
"""
Test Class for post_view_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_view_as_stream_all_params(self):
"""
post_view_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Invoke method
response = _service.post_view_as_stream(
db,
ddoc,
view,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
endkey_docid=endkey_docid,
group=group,
group_level=group_level,
key=key,
keys=keys,
reduce=reduce,
stable=stable,
startkey=startkey,
startkey_docid=startkey_docid,
update=update,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == True
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['endkey_docid'] == 'testString'
assert req_body['group'] == False
assert req_body['group_level'] == 1
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['examplekey']
assert req_body['reduce'] == True
assert req_body['stable'] == False
assert req_body['startkey'] == 'testString'
assert req_body['startkey_docid'] == 'testString'
assert req_body['update'] == 'true'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_view_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_view_as_stream_all_params.
_service.enable_retries()
self.test_post_view_as_stream_all_params()
# Disable retries and run test_post_view_as_stream_all_params.
_service.disable_retries()
self.test_post_view_as_stream_all_params()
@responses.activate
def test_post_view_as_stream_value_error(self):
"""
test_post_view_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"view": view,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_view_as_stream(**req_copy)
def test_post_view_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_view_as_stream_value_error.
_service.enable_retries()
self.test_post_view_as_stream_value_error()
# Disable retries and run test_post_view_as_stream_value_error.
_service.disable_retries()
self.test_post_view_as_stream_value_error()
class TestPostViewQueries():
"""
Test Class for post_view_queries
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_view_queries_all_params(self):
"""
post_view_queries()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString/queries')
mock_response = '{"results": [{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a ViewQuery model
view_query_model = {}
view_query_model['att_encoding_info'] = False
view_query_model['attachments'] = False
view_query_model['conflicts'] = False
view_query_model['descending'] = False
view_query_model['include_docs'] = False
view_query_model['inclusive_end'] = True
view_query_model['limit'] = 0
view_query_model['skip'] = 0
view_query_model['update_seq'] = False
view_query_model['endkey'] = 'testString'
view_query_model['endkey_docid'] = 'testString'
view_query_model['group'] = False
view_query_model['group_level'] = 1
view_query_model['key'] = 'testString'
view_query_model['keys'] = ['testString']
view_query_model['reduce'] = True
view_query_model['stable'] = False
view_query_model['startkey'] = 'testString'
view_query_model['startkey_docid'] = 'testString'
view_query_model['update'] = 'true'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
queries = [view_query_model]
# Invoke method
response = _service.post_view_queries(
db,
ddoc,
view,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [view_query_model]
def test_post_view_queries_all_params_with_retries(self):
# Enable retries and run test_post_view_queries_all_params.
_service.enable_retries()
self.test_post_view_queries_all_params()
# Disable retries and run test_post_view_queries_all_params.
_service.disable_retries()
self.test_post_view_queries_all_params()
@responses.activate
def test_post_view_queries_value_error(self):
"""
test_post_view_queries_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString/queries')
mock_response = '{"results": [{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a ViewQuery model
view_query_model = {}
view_query_model['att_encoding_info'] = False
view_query_model['attachments'] = False
view_query_model['conflicts'] = False
view_query_model['descending'] = False
view_query_model['include_docs'] = False
view_query_model['inclusive_end'] = True
view_query_model['limit'] = 0
view_query_model['skip'] = 0
view_query_model['update_seq'] = False
view_query_model['endkey'] = 'testString'
view_query_model['endkey_docid'] = 'testString'
view_query_model['group'] = False
view_query_model['group_level'] = 1
view_query_model['key'] = 'testString'
view_query_model['keys'] = ['testString']
view_query_model['reduce'] = True
view_query_model['stable'] = False
view_query_model['startkey'] = 'testString'
view_query_model['startkey_docid'] = 'testString'
view_query_model['update'] = 'true'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
queries = [view_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"view": view,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_view_queries(**req_copy)
def test_post_view_queries_value_error_with_retries(self):
# Enable retries and run test_post_view_queries_value_error.
_service.enable_retries()
self.test_post_view_queries_value_error()
# Disable retries and run test_post_view_queries_value_error.
_service.disable_retries()
self.test_post_view_queries_value_error()
class TestPostViewQueriesAsStream():
"""
Test Class for post_view_queries_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_view_queries_as_stream_all_params(self):
"""
post_view_queries_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString/queries')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a ViewQuery model
view_query_model = {}
view_query_model['att_encoding_info'] = False
view_query_model['attachments'] = False
view_query_model['conflicts'] = False
view_query_model['descending'] = False
view_query_model['include_docs'] = True
view_query_model['inclusive_end'] = True
view_query_model['limit'] = 5
view_query_model['skip'] = 0
view_query_model['update_seq'] = False
view_query_model['endkey'] = 'testString'
view_query_model['endkey_docid'] = 'testString'
view_query_model['group'] = False
view_query_model['group_level'] = 1
view_query_model['key'] = 'testString'
view_query_model['keys'] = ['testString']
view_query_model['reduce'] = True
view_query_model['stable'] = False
view_query_model['startkey'] = 'testString'
view_query_model['startkey_docid'] = 'testString'
view_query_model['update'] = 'true'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
queries = [view_query_model]
# Invoke method
response = _service.post_view_queries_as_stream(
db,
ddoc,
view,
queries,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['queries'] == [view_query_model]
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_view_queries_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_view_queries_as_stream_all_params.
_service.enable_retries()
self.test_post_view_queries_as_stream_all_params()
# Disable retries and run test_post_view_queries_as_stream_all_params.
_service.disable_retries()
self.test_post_view_queries_as_stream_all_params()
@responses.activate
def test_post_view_queries_as_stream_value_error(self):
"""
test_post_view_queries_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_view/testString/queries')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a ViewQuery model
view_query_model = {}
view_query_model['att_encoding_info'] = False
view_query_model['attachments'] = False
view_query_model['conflicts'] = False
view_query_model['descending'] = False
view_query_model['include_docs'] = True
view_query_model['inclusive_end'] = True
view_query_model['limit'] = 5
view_query_model['skip'] = 0
view_query_model['update_seq'] = False
view_query_model['endkey'] = 'testString'
view_query_model['endkey_docid'] = 'testString'
view_query_model['group'] = False
view_query_model['group_level'] = 1
view_query_model['key'] = 'testString'
view_query_model['keys'] = ['testString']
view_query_model['reduce'] = True
view_query_model['stable'] = False
view_query_model['startkey'] = 'testString'
view_query_model['startkey_docid'] = 'testString'
view_query_model['update'] = 'true'
# Set up parameter values
db = 'testString'
ddoc = 'testString'
view = 'testString'
queries = [view_query_model]
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"view": view,
"queries": queries,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_view_queries_as_stream(**req_copy)
def test_post_view_queries_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_view_queries_as_stream_value_error.
_service.enable_retries()
self.test_post_view_queries_as_stream_value_error()
# Disable retries and run test_post_view_queries_as_stream_value_error.
_service.disable_retries()
self.test_post_view_queries_as_stream_value_error()
# endregion
##############################################################################
# End of Service: Views
##############################################################################
##############################################################################
# Start of Service: PartitionedDatabases
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetPartitionInformation():
"""
Test Class for get_partition_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_partition_information_all_params(self):
"""
get_partition_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString')
mock_response = '{"db_name": "db_name", "doc_count": 0, "doc_del_count": 0, "partition": "partition", "partitioned_indexes": {"count": 0, "indexes": {"search": 0, "view": 0}, "limit": 0}, "sizes": {"active": 0, "external": 0}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
# Invoke method
response = _service.get_partition_information(
db,
partition_key,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_partition_information_all_params_with_retries(self):
# Enable retries and run test_get_partition_information_all_params.
_service.enable_retries()
self.test_get_partition_information_all_params()
# Disable retries and run test_get_partition_information_all_params.
_service.disable_retries()
self.test_get_partition_information_all_params()
@responses.activate
def test_get_partition_information_value_error(self):
"""
test_get_partition_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString')
mock_response = '{"db_name": "db_name", "doc_count": 0, "doc_del_count": 0, "partition": "partition", "partitioned_indexes": {"count": 0, "indexes": {"search": 0, "view": 0}, "limit": 0}, "sizes": {"active": 0, "external": 0}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_partition_information(**req_copy)
def test_get_partition_information_value_error_with_retries(self):
# Enable retries and run test_get_partition_information_value_error.
_service.enable_retries()
self.test_get_partition_information_value_error()
# Disable retries and run test_get_partition_information_value_error.
_service.disable_retries()
self.test_get_partition_information_value_error()
class TestPostPartitionAllDocs():
"""
Test Class for post_partition_all_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_all_docs_all_params(self):
"""
post_partition_all_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_all_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Invoke method
response = _service.post_partition_all_docs(
db,
partition_key,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
def test_post_partition_all_docs_all_params_with_retries(self):
# Enable retries and run test_post_partition_all_docs_all_params.
_service.enable_retries()
self.test_post_partition_all_docs_all_params()
# Disable retries and run test_post_partition_all_docs_all_params.
_service.disable_retries()
self.test_post_partition_all_docs_all_params()
@responses.activate
def test_post_partition_all_docs_value_error(self):
"""
test_post_partition_all_docs_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_all_docs')
mock_response = '{"total_rows": 0, "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "key", "value": {"rev": "rev"}}], "update_seq": "update_seq"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_all_docs(**req_copy)
def test_post_partition_all_docs_value_error_with_retries(self):
# Enable retries and run test_post_partition_all_docs_value_error.
_service.enable_retries()
self.test_post_partition_all_docs_value_error()
# Disable retries and run test_post_partition_all_docs_value_error.
_service.disable_retries()
self.test_post_partition_all_docs_value_error()
class TestPostPartitionAllDocsAsStream():
"""
Test Class for post_partition_all_docs_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_all_docs_as_stream_all_params(self):
"""
post_partition_all_docs_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_all_docs')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Invoke method
response = _service.post_partition_all_docs_as_stream(
db,
partition_key,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
key=key,
keys=keys,
startkey=startkey,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == False
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['testString']
assert req_body['startkey'] == '0007741142412418284'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_partition_all_docs_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_partition_all_docs_as_stream_all_params.
_service.enable_retries()
self.test_post_partition_all_docs_as_stream_all_params()
# Disable retries and run test_post_partition_all_docs_as_stream_all_params.
_service.disable_retries()
self.test_post_partition_all_docs_as_stream_all_params()
@responses.activate
def test_post_partition_all_docs_as_stream_value_error(self):
"""
test_post_partition_all_docs_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_all_docs')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = False
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
key = 'testString'
keys = ['testString']
startkey = '0007741142412418284'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_all_docs_as_stream(**req_copy)
def test_post_partition_all_docs_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_partition_all_docs_as_stream_value_error.
_service.enable_retries()
self.test_post_partition_all_docs_as_stream_value_error()
# Disable retries and run test_post_partition_all_docs_as_stream_value_error.
_service.disable_retries()
self.test_post_partition_all_docs_as_stream_value_error()
class TestPostPartitionSearch():
"""
Test Class for post_partition_search
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_search_all_params(self):
"""
post_partition_search()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_search/testString')
mock_response = '{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}], "groups": [{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 0
sort = ['testString']
stale = 'ok'
# Invoke method
response = _service.post_partition_search(
db,
partition_key,
ddoc,
index,
query,
bookmark=bookmark,
highlight_fields=highlight_fields,
highlight_number=highlight_number,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
highlight_size=highlight_size,
include_docs=include_docs,
include_fields=include_fields,
limit=limit,
sort=sort,
stale=stale,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['query'] == 'testString'
assert req_body['bookmark'] == 'testString'
assert req_body['highlight_fields'] == ['testString']
assert req_body['highlight_number'] == 1
assert req_body['highlight_post_tag'] == '</em>'
assert req_body['highlight_pre_tag'] == '<em>'
assert req_body['highlight_size'] == 1
assert req_body['include_docs'] == False
assert req_body['include_fields'] == ['testString']
assert req_body['limit'] == 0
assert req_body['sort'] == ['testString']
assert req_body['stale'] == 'ok'
def test_post_partition_search_all_params_with_retries(self):
# Enable retries and run test_post_partition_search_all_params.
_service.enable_retries()
self.test_post_partition_search_all_params()
# Disable retries and run test_post_partition_search_all_params.
_service.disable_retries()
self.test_post_partition_search_all_params()
@responses.activate
def test_post_partition_search_value_error(self):
"""
test_post_partition_search_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_search/testString')
mock_response = '{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}], "groups": [{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 0
sort = ['testString']
stale = 'ok'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"ddoc": ddoc,
"index": index,
"query": query,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_search(**req_copy)
def test_post_partition_search_value_error_with_retries(self):
# Enable retries and run test_post_partition_search_value_error.
_service.enable_retries()
self.test_post_partition_search_value_error()
# Disable retries and run test_post_partition_search_value_error.
_service.disable_retries()
self.test_post_partition_search_value_error()
class TestPostPartitionSearchAsStream():
"""
Test Class for post_partition_search_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_search_as_stream_all_params(self):
"""
post_partition_search_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_search/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 3
sort = ['testString']
stale = 'ok'
# Invoke method
response = _service.post_partition_search_as_stream(
db,
partition_key,
ddoc,
index,
query,
bookmark=bookmark,
highlight_fields=highlight_fields,
highlight_number=highlight_number,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
highlight_size=highlight_size,
include_docs=include_docs,
include_fields=include_fields,
limit=limit,
sort=sort,
stale=stale,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['query'] == 'testString'
assert req_body['bookmark'] == 'testString'
assert req_body['highlight_fields'] == ['testString']
assert req_body['highlight_number'] == 1
assert req_body['highlight_post_tag'] == '</em>'
assert req_body['highlight_pre_tag'] == '<em>'
assert req_body['highlight_size'] == 1
assert req_body['include_docs'] == False
assert req_body['include_fields'] == ['testString']
assert req_body['limit'] == 3
assert req_body['sort'] == ['testString']
assert req_body['stale'] == 'ok'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_partition_search_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_partition_search_as_stream_all_params.
_service.enable_retries()
self.test_post_partition_search_as_stream_all_params()
# Disable retries and run test_post_partition_search_as_stream_all_params.
_service.disable_retries()
self.test_post_partition_search_as_stream_all_params()
@responses.activate
def test_post_partition_search_as_stream_value_error(self):
"""
test_post_partition_search_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_search/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 3
sort = ['testString']
stale = 'ok'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"ddoc": ddoc,
"index": index,
"query": query,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_search_as_stream(**req_copy)
def test_post_partition_search_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_partition_search_as_stream_value_error.
_service.enable_retries()
self.test_post_partition_search_as_stream_value_error()
# Disable retries and run test_post_partition_search_as_stream_value_error.
_service.disable_retries()
self.test_post_partition_search_as_stream_value_error()
class TestPostPartitionView():
"""
Test Class for post_partition_view
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_view_all_params(self):
"""
post_partition_view()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_view/testString')
mock_response = '{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Invoke method
response = _service.post_partition_view(
db,
partition_key,
ddoc,
view,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
endkey_docid=endkey_docid,
group=group,
group_level=group_level,
key=key,
keys=keys,
reduce=reduce,
stable=stable,
startkey=startkey,
startkey_docid=startkey_docid,
update=update,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == True
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['endkey_docid'] == 'testString'
assert req_body['group'] == False
assert req_body['group_level'] == 1
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['examplekey']
assert req_body['reduce'] == True
assert req_body['stable'] == False
assert req_body['startkey'] == 'testString'
assert req_body['startkey_docid'] == 'testString'
assert req_body['update'] == 'true'
def test_post_partition_view_all_params_with_retries(self):
# Enable retries and run test_post_partition_view_all_params.
_service.enable_retries()
self.test_post_partition_view_all_params()
# Disable retries and run test_post_partition_view_all_params.
_service.disable_retries()
self.test_post_partition_view_all_params()
@responses.activate
def test_post_partition_view_value_error(self):
"""
test_post_partition_view_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_view/testString')
mock_response = '{"total_rows": 0, "update_seq": "update_seq", "rows": [{"caused_by": "caused_by", "error": "error", "reason": "reason", "doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "id": "id", "key": "anyValue", "value": "anyValue"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"ddoc": ddoc,
"view": view,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_view(**req_copy)
def test_post_partition_view_value_error_with_retries(self):
# Enable retries and run test_post_partition_view_value_error.
_service.enable_retries()
self.test_post_partition_view_value_error()
# Disable retries and run test_post_partition_view_value_error.
_service.disable_retries()
self.test_post_partition_view_value_error()
class TestPostPartitionViewAsStream():
"""
Test Class for post_partition_view_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_view_as_stream_all_params(self):
"""
post_partition_view_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_view/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Invoke method
response = _service.post_partition_view_as_stream(
db,
partition_key,
ddoc,
view,
att_encoding_info=att_encoding_info,
attachments=attachments,
conflicts=conflicts,
descending=descending,
include_docs=include_docs,
inclusive_end=inclusive_end,
limit=limit,
skip=skip,
update_seq=update_seq,
endkey=endkey,
endkey_docid=endkey_docid,
group=group,
group_level=group_level,
key=key,
keys=keys,
reduce=reduce,
stable=stable,
startkey=startkey,
startkey_docid=startkey_docid,
update=update,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['att_encoding_info'] == False
assert req_body['attachments'] == False
assert req_body['conflicts'] == False
assert req_body['descending'] == False
assert req_body['include_docs'] == True
assert req_body['inclusive_end'] == True
assert req_body['limit'] == 10
assert req_body['skip'] == 0
assert req_body['update_seq'] == False
assert req_body['endkey'] == 'testString'
assert req_body['endkey_docid'] == 'testString'
assert req_body['group'] == False
assert req_body['group_level'] == 1
assert req_body['key'] == 'testString'
assert req_body['keys'] == ['examplekey']
assert req_body['reduce'] == True
assert req_body['stable'] == False
assert req_body['startkey'] == 'testString'
assert req_body['startkey_docid'] == 'testString'
assert req_body['update'] == 'true'
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_partition_view_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_partition_view_as_stream_all_params.
_service.enable_retries()
self.test_post_partition_view_as_stream_all_params()
# Disable retries and run test_post_partition_view_as_stream_all_params.
_service.disable_retries()
self.test_post_partition_view_as_stream_all_params()
@responses.activate
def test_post_partition_view_as_stream_value_error(self):
"""
test_post_partition_view_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_design/testString/_view/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
ddoc = 'testString'
view = 'testString'
att_encoding_info = False
attachments = False
conflicts = False
descending = False
include_docs = True
inclusive_end = True
limit = 10
skip = 0
update_seq = False
endkey = 'testString'
endkey_docid = 'testString'
group = False
group_level = 1
key = 'testString'
keys = ['examplekey']
reduce = True
stable = False
startkey = 'testString'
startkey_docid = 'testString'
update = 'true'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"ddoc": ddoc,
"view": view,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_view_as_stream(**req_copy)
def test_post_partition_view_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_partition_view_as_stream_value_error.
_service.enable_retries()
self.test_post_partition_view_as_stream_value_error()
# Disable retries and run test_post_partition_view_as_stream_value_error.
_service.disable_retries()
self.test_post_partition_view_as_stream_value_error()
class TestPostPartitionFind():
"""
Test Class for post_partition_find
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_find_all_params(self):
"""
post_partition_find()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_find')
mock_response = '{"bookmark": "bookmark", "docs": [{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}], "execution_stats": {"execution_time_ms": 17, "results_returned": 0, "total_docs_examined": 0, "total_keys_examined": 0, "total_quorum_docs_examined": 0}, "warning": "warning"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['testString']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
# Invoke method
response = _service.post_partition_find(
db,
partition_key,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['testString']
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
def test_post_partition_find_all_params_with_retries(self):
# Enable retries and run test_post_partition_find_all_params.
_service.enable_retries()
self.test_post_partition_find_all_params()
# Disable retries and run test_post_partition_find_all_params.
_service.disable_retries()
self.test_post_partition_find_all_params()
@responses.activate
def test_post_partition_find_value_error(self):
"""
test_post_partition_find_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_find')
mock_response = '{"bookmark": "bookmark", "docs": [{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}], "execution_stats": {"execution_time_ms": 17, "results_returned": 0, "total_docs_examined": 0, "total_keys_examined": 0, "total_quorum_docs_examined": 0}, "warning": "warning"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['testString']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_find(**req_copy)
def test_post_partition_find_value_error_with_retries(self):
# Enable retries and run test_post_partition_find_value_error.
_service.enable_retries()
self.test_post_partition_find_value_error()
# Disable retries and run test_post_partition_find_value_error.
_service.disable_retries()
self.test_post_partition_find_value_error()
class TestPostPartitionFindAsStream():
"""
Test Class for post_partition_find_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_partition_find_as_stream_all_params(self):
"""
post_partition_find_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_find')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['productid', 'name', 'description']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
# Invoke method
response = _service.post_partition_find_as_stream(
db,
partition_key,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['productid', 'name', 'description']
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_partition_find_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_partition_find_as_stream_all_params.
_service.enable_retries()
self.test_post_partition_find_as_stream_all_params()
# Disable retries and run test_post_partition_find_as_stream_all_params.
_service.disable_retries()
self.test_post_partition_find_as_stream_all_params()
@responses.activate
def test_post_partition_find_as_stream_value_error(self):
"""
test_post_partition_find_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_partition/testString/_find')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
partition_key = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['productid', 'name', 'description']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"partition_key": partition_key,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_partition_find_as_stream(**req_copy)
def test_post_partition_find_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_partition_find_as_stream_value_error.
_service.enable_retries()
self.test_post_partition_find_as_stream_value_error()
# Disable retries and run test_post_partition_find_as_stream_value_error.
_service.disable_retries()
self.test_post_partition_find_as_stream_value_error()
# endregion
##############################################################################
# End of Service: PartitionedDatabases
##############################################################################
##############################################################################
# Start of Service: Queries
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestPostExplain():
"""
Test Class for post_explain
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_explain_all_params(self):
"""
post_explain()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_explain')
mock_response = '{"dbname": "dbname", "fields": ["fields"], "index": {"ddoc": "ddoc", "def": {"default_analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "default_field": {"analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "enabled": true}, "fields": [{"name": "name", "type": "boolean"}], "index_array_lengths": true, "partial_filter_selector": {"mapKey": "anyValue"}}, "name": "name", "type": "json"}, "limit": 0, "opts": {"mapKey": "anyValue"}, "range": {"end_key": ["anyValue"], "start_key": ["anyValue"]}, "selector": {"mapKey": "anyValue"}, "skip": 0}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['testString']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Invoke method
response = _service.post_explain(
db,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
r=r,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['testString']
assert req_body['limit'] == 0
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
assert req_body['r'] == 1
def test_post_explain_all_params_with_retries(self):
# Enable retries and run test_post_explain_all_params.
_service.enable_retries()
self.test_post_explain_all_params()
# Disable retries and run test_post_explain_all_params.
_service.disable_retries()
self.test_post_explain_all_params()
@responses.activate
def test_post_explain_value_error(self):
"""
test_post_explain_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_explain')
mock_response = '{"dbname": "dbname", "fields": ["fields"], "index": {"ddoc": "ddoc", "def": {"default_analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "default_field": {"analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "enabled": true}, "fields": [{"name": "name", "type": "boolean"}], "index_array_lengths": true, "partial_filter_selector": {"mapKey": "anyValue"}}, "name": "name", "type": "json"}, "limit": 0, "opts": {"mapKey": "anyValue"}, "range": {"end_key": ["anyValue"], "start_key": ["anyValue"]}, "selector": {"mapKey": "anyValue"}, "skip": 0}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['testString']
limit = 0
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_explain(**req_copy)
def test_post_explain_value_error_with_retries(self):
# Enable retries and run test_post_explain_value_error.
_service.enable_retries()
self.test_post_explain_value_error()
# Disable retries and run test_post_explain_value_error.
_service.disable_retries()
self.test_post_explain_value_error()
class TestPostFind():
"""
Test Class for post_find
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_find_all_params(self):
"""
post_find()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_find')
mock_response = '{"bookmark": "bookmark", "docs": [{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}], "execution_stats": {"execution_time_ms": 17, "results_returned": 0, "total_docs_examined": 0, "total_keys_examined": 0, "total_quorum_docs_examined": 0}, "warning": "warning"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['_id', 'type', 'name', 'email']
limit = 3
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Invoke method
response = _service.post_find(
db,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
r=r,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['_id', 'type', 'name', 'email']
assert req_body['limit'] == 3
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
assert req_body['r'] == 1
def test_post_find_all_params_with_retries(self):
# Enable retries and run test_post_find_all_params.
_service.enable_retries()
self.test_post_find_all_params()
# Disable retries and run test_post_find_all_params.
_service.disable_retries()
self.test_post_find_all_params()
@responses.activate
def test_post_find_value_error(self):
"""
test_post_find_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_find')
mock_response = '{"bookmark": "bookmark", "docs": [{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}], "execution_stats": {"execution_time_ms": 17, "results_returned": 0, "total_docs_examined": 0, "total_keys_examined": 0, "total_quorum_docs_examined": 0}, "warning": "warning"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['_id', 'type', 'name', 'email']
limit = 3
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_find(**req_copy)
def test_post_find_value_error_with_retries(self):
# Enable retries and run test_post_find_value_error.
_service.enable_retries()
self.test_post_find_value_error()
# Disable retries and run test_post_find_value_error.
_service.disable_retries()
self.test_post_find_value_error()
class TestPostFindAsStream():
"""
Test Class for post_find_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_find_as_stream_all_params(self):
"""
post_find_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_find')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['_id', 'type', 'name', 'email']
limit = 3
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Invoke method
response = _service.post_find_as_stream(
db,
selector,
bookmark=bookmark,
conflicts=conflicts,
execution_stats=execution_stats,
fields=fields,
limit=limit,
skip=skip,
sort=sort,
stable=stable,
update=update,
use_index=use_index,
r=r,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['selector'] == {}
assert req_body['bookmark'] == 'testString'
assert req_body['conflicts'] == True
assert req_body['execution_stats'] == True
assert req_body['fields'] == ['_id', 'type', 'name', 'email']
assert req_body['limit'] == 3
assert req_body['skip'] == 0
assert req_body['sort'] == [{}]
assert req_body['stable'] == True
assert req_body['update'] == 'true'
assert req_body['use_index'] == ['testString']
assert req_body['r'] == 1
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_find_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_find_as_stream_all_params.
_service.enable_retries()
self.test_post_find_as_stream_all_params()
# Disable retries and run test_post_find_as_stream_all_params.
_service.disable_retries()
self.test_post_find_as_stream_all_params()
@responses.activate
def test_post_find_as_stream_value_error(self):
"""
test_post_find_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_find')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
selector = {}
bookmark = 'testString'
conflicts = True
execution_stats = True
fields = ['_id', 'type', 'name', 'email']
limit = 3
skip = 0
sort = [{}]
stable = True
update = 'true'
use_index = ['testString']
r = 1
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"selector": selector,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_find_as_stream(**req_copy)
def test_post_find_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_find_as_stream_value_error.
_service.enable_retries()
self.test_post_find_as_stream_value_error()
# Disable retries and run test_post_find_as_stream_value_error.
_service.disable_retries()
self.test_post_find_as_stream_value_error()
class TestGetIndexesInformation():
"""
Test Class for get_indexes_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_indexes_information_all_params(self):
"""
get_indexes_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index')
mock_response = '{"total_rows": 0, "indexes": [{"ddoc": "ddoc", "def": {"default_analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "default_field": {"analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "enabled": true}, "fields": [{"name": "name", "type": "boolean"}], "index_array_lengths": true, "partial_filter_selector": {"mapKey": "anyValue"}}, "name": "name", "type": "json"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.get_indexes_information(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_indexes_information_all_params_with_retries(self):
# Enable retries and run test_get_indexes_information_all_params.
_service.enable_retries()
self.test_get_indexes_information_all_params()
# Disable retries and run test_get_indexes_information_all_params.
_service.disable_retries()
self.test_get_indexes_information_all_params()
@responses.activate
def test_get_indexes_information_value_error(self):
"""
test_get_indexes_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index')
mock_response = '{"total_rows": 0, "indexes": [{"ddoc": "ddoc", "def": {"default_analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "default_field": {"analyzer": {"name": "classic", "stopwords": ["stopwords"]}, "enabled": true}, "fields": [{"name": "name", "type": "boolean"}], "index_array_lengths": true, "partial_filter_selector": {"mapKey": "anyValue"}}, "name": "name", "type": "json"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_indexes_information(**req_copy)
def test_get_indexes_information_value_error_with_retries(self):
# Enable retries and run test_get_indexes_information_value_error.
_service.enable_retries()
self.test_get_indexes_information_value_error()
# Disable retries and run test_get_indexes_information_value_error.
_service.disable_retries()
self.test_get_indexes_information_value_error()
class TestPostIndex():
"""
Test Class for post_index
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_index_all_params(self):
"""
post_index()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index')
mock_response = '{"id": "id", "name": "name", "result": "created"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a IndexTextOperatorDefaultField model
index_text_operator_default_field_model = {}
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
# Construct a dict representation of a IndexField model
index_field_model = {}
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
# Construct a dict representation of a IndexDefinition model
index_definition_model = {}
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
# Set up parameter values
db = 'testString'
index = index_definition_model
ddoc = 'testString'
def_ = index_definition_model
name = 'testString'
partitioned = True
type = 'json'
# Invoke method
response = _service.post_index(
db,
index,
ddoc=ddoc,
def_=def_,
name=name,
partitioned=partitioned,
type=type,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['index'] == index_definition_model
assert req_body['ddoc'] == 'testString'
assert req_body['def'] == index_definition_model
assert req_body['name'] == 'testString'
assert req_body['partitioned'] == True
assert req_body['type'] == 'json'
def test_post_index_all_params_with_retries(self):
# Enable retries and run test_post_index_all_params.
_service.enable_retries()
self.test_post_index_all_params()
# Disable retries and run test_post_index_all_params.
_service.disable_retries()
self.test_post_index_all_params()
@responses.activate
def test_post_index_value_error(self):
"""
test_post_index_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index')
mock_response = '{"id": "id", "name": "name", "result": "created"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a Analyzer model
analyzer_model = {}
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a dict representation of a IndexTextOperatorDefaultField model
index_text_operator_default_field_model = {}
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
# Construct a dict representation of a IndexField model
index_field_model = {}
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
# Construct a dict representation of a IndexDefinition model
index_definition_model = {}
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
# Set up parameter values
db = 'testString'
index = index_definition_model
ddoc = 'testString'
def_ = index_definition_model
name = 'testString'
partitioned = True
type = 'json'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_index(**req_copy)
def test_post_index_value_error_with_retries(self):
# Enable retries and run test_post_index_value_error.
_service.enable_retries()
self.test_post_index_value_error()
# Disable retries and run test_post_index_value_error.
_service.disable_retries()
self.test_post_index_value_error()
class TestDeleteIndex():
"""
Test Class for delete_index
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_index_all_params(self):
"""
delete_index()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index/_design/testString/json/testString')
mock_response = '{"ok": true}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
type = 'json'
index = 'testString'
# Invoke method
response = _service.delete_index(
db,
ddoc,
type,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_index_all_params_with_retries(self):
# Enable retries and run test_delete_index_all_params.
_service.enable_retries()
self.test_delete_index_all_params()
# Disable retries and run test_delete_index_all_params.
_service.disable_retries()
self.test_delete_index_all_params()
@responses.activate
def test_delete_index_value_error(self):
"""
test_delete_index_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_index/_design/testString/json/testString')
mock_response = '{"ok": true}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
type = 'json'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"type": type,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_index(**req_copy)
def test_delete_index_value_error_with_retries(self):
# Enable retries and run test_delete_index_value_error.
_service.enable_retries()
self.test_delete_index_value_error()
# Disable retries and run test_delete_index_value_error.
_service.disable_retries()
self.test_delete_index_value_error()
# endregion
##############################################################################
# End of Service: Queries
##############################################################################
##############################################################################
# Start of Service: Searches
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestPostSearchAnalyze():
"""
Test Class for post_search_analyze
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_search_analyze_all_params(self):
"""
post_search_analyze()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_search_analyze')
mock_response = '{"tokens": ["tokens"]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
analyzer = 'arabic'
text = 'testString'
# Invoke method
response = _service.post_search_analyze(
analyzer,
text,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['analyzer'] == 'arabic'
assert req_body['text'] == 'testString'
def test_post_search_analyze_all_params_with_retries(self):
# Enable retries and run test_post_search_analyze_all_params.
_service.enable_retries()
self.test_post_search_analyze_all_params()
# Disable retries and run test_post_search_analyze_all_params.
_service.disable_retries()
self.test_post_search_analyze_all_params()
@responses.activate
def test_post_search_analyze_value_error(self):
"""
test_post_search_analyze_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_search_analyze')
mock_response = '{"tokens": ["tokens"]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
analyzer = 'arabic'
text = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"analyzer": analyzer,
"text": text,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_search_analyze(**req_copy)
def test_post_search_analyze_value_error_with_retries(self):
# Enable retries and run test_post_search_analyze_value_error.
_service.enable_retries()
self.test_post_search_analyze_value_error()
# Disable retries and run test_post_search_analyze_value_error.
_service.disable_retries()
self.test_post_search_analyze_value_error()
class TestPostSearch():
"""
Test Class for post_search
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_search_all_params(self):
"""
post_search()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search/testString')
mock_response = '{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}], "groups": [{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 0
sort = ['testString']
stale = 'ok'
counts = ['testString']
drilldown = [['testString']]
group_field = 'testString'
group_limit = 1
group_sort = ['testString']
ranges = {}
# Invoke method
response = _service.post_search(
db,
ddoc,
index,
query,
bookmark=bookmark,
highlight_fields=highlight_fields,
highlight_number=highlight_number,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
highlight_size=highlight_size,
include_docs=include_docs,
include_fields=include_fields,
limit=limit,
sort=sort,
stale=stale,
counts=counts,
drilldown=drilldown,
group_field=group_field,
group_limit=group_limit,
group_sort=group_sort,
ranges=ranges,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['query'] == 'testString'
assert req_body['bookmark'] == 'testString'
assert req_body['highlight_fields'] == ['testString']
assert req_body['highlight_number'] == 1
assert req_body['highlight_post_tag'] == '</em>'
assert req_body['highlight_pre_tag'] == '<em>'
assert req_body['highlight_size'] == 1
assert req_body['include_docs'] == False
assert req_body['include_fields'] == ['testString']
assert req_body['limit'] == 0
assert req_body['sort'] == ['testString']
assert req_body['stale'] == 'ok'
assert req_body['counts'] == ['testString']
assert req_body['drilldown'] == [['testString']]
assert req_body['group_field'] == 'testString'
assert req_body['group_limit'] == 1
assert req_body['group_sort'] == ['testString']
assert req_body['ranges'] == {}
def test_post_search_all_params_with_retries(self):
# Enable retries and run test_post_search_all_params.
_service.enable_retries()
self.test_post_search_all_params()
# Disable retries and run test_post_search_all_params.
_service.disable_retries()
self.test_post_search_all_params()
@responses.activate
def test_post_search_value_error(self):
"""
test_post_search_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search/testString')
mock_response = '{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}], "groups": [{"total_rows": 0, "bookmark": "bookmark", "by": "by", "counts": {"mapKey": {"mapKey": 0}}, "ranges": {"mapKey": {"mapKey": 0}}, "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "fields": {"mapKey": "anyValue"}, "highlights": {"mapKey": ["inner"]}, "id": "id"}]}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 0
sort = ['testString']
stale = 'ok'
counts = ['testString']
drilldown = [['testString']]
group_field = 'testString'
group_limit = 1
group_sort = ['testString']
ranges = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
"query": query,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_search(**req_copy)
def test_post_search_value_error_with_retries(self):
# Enable retries and run test_post_search_value_error.
_service.enable_retries()
self.test_post_search_value_error()
# Disable retries and run test_post_search_value_error.
_service.disable_retries()
self.test_post_search_value_error()
class TestPostSearchAsStream():
"""
Test Class for post_search_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_search_as_stream_all_params(self):
"""
post_search_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 3
sort = ['testString']
stale = 'ok'
counts = ['testString']
drilldown = [['testString']]
group_field = 'testString'
group_limit = 1
group_sort = ['testString']
ranges = {}
# Invoke method
response = _service.post_search_as_stream(
db,
ddoc,
index,
query,
bookmark=bookmark,
highlight_fields=highlight_fields,
highlight_number=highlight_number,
highlight_post_tag=highlight_post_tag,
highlight_pre_tag=highlight_pre_tag,
highlight_size=highlight_size,
include_docs=include_docs,
include_fields=include_fields,
limit=limit,
sort=sort,
stale=stale,
counts=counts,
drilldown=drilldown,
group_field=group_field,
group_limit=group_limit,
group_sort=group_sort,
ranges=ranges,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['query'] == 'testString'
assert req_body['bookmark'] == 'testString'
assert req_body['highlight_fields'] == ['testString']
assert req_body['highlight_number'] == 1
assert req_body['highlight_post_tag'] == '</em>'
assert req_body['highlight_pre_tag'] == '<em>'
assert req_body['highlight_size'] == 1
assert req_body['include_docs'] == False
assert req_body['include_fields'] == ['testString']
assert req_body['limit'] == 3
assert req_body['sort'] == ['testString']
assert req_body['stale'] == 'ok'
assert req_body['counts'] == ['testString']
assert req_body['drilldown'] == [['testString']]
assert req_body['group_field'] == 'testString'
assert req_body['group_limit'] == 1
assert req_body['group_sort'] == ['testString']
assert req_body['ranges'] == {}
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_post_search_as_stream_all_params_with_retries(self):
# Enable retries and run test_post_search_as_stream_all_params.
_service.enable_retries()
self.test_post_search_as_stream_all_params()
# Disable retries and run test_post_search_as_stream_all_params.
_service.disable_retries()
self.test_post_search_as_stream_all_params()
@responses.activate
def test_post_search_as_stream_value_error(self):
"""
test_post_search_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
query = 'testString'
bookmark = 'testString'
highlight_fields = ['testString']
highlight_number = 1
highlight_post_tag = '</em>'
highlight_pre_tag = '<em>'
highlight_size = 1
include_docs = False
include_fields = ['testString']
limit = 3
sort = ['testString']
stale = 'ok'
counts = ['testString']
drilldown = [['testString']]
group_field = 'testString'
group_limit = 1
group_sort = ['testString']
ranges = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
"query": query,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_search_as_stream(**req_copy)
def test_post_search_as_stream_value_error_with_retries(self):
# Enable retries and run test_post_search_as_stream_value_error.
_service.enable_retries()
self.test_post_search_as_stream_value_error()
# Disable retries and run test_post_search_as_stream_value_error.
_service.disable_retries()
self.test_post_search_as_stream_value_error()
class TestGetSearchInfo():
"""
Test Class for get_search_info
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_search_info_all_params(self):
"""
get_search_info()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search_info/testString')
mock_response = '{"name": "name", "search_index": {"committed_seq": 13, "disk_size": 0, "doc_count": 0, "doc_del_count": 0, "pending_seq": 11}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Invoke method
response = _service.get_search_info(
db,
ddoc,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_search_info_all_params_with_retries(self):
# Enable retries and run test_get_search_info_all_params.
_service.enable_retries()
self.test_get_search_info_all_params()
# Disable retries and run test_get_search_info_all_params.
_service.disable_retries()
self.test_get_search_info_all_params()
@responses.activate
def test_get_search_info_value_error(self):
"""
test_get_search_info_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_search_info/testString')
mock_response = '{"name": "name", "search_index": {"committed_seq": 13, "disk_size": 0, "doc_count": 0, "doc_del_count": 0, "pending_seq": 11}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_search_info(**req_copy)
def test_get_search_info_value_error_with_retries(self):
# Enable retries and run test_get_search_info_value_error.
_service.enable_retries()
self.test_get_search_info_value_error()
# Disable retries and run test_get_search_info_value_error.
_service.disable_retries()
self.test_get_search_info_value_error()
# endregion
##############################################################################
# End of Service: Searches
##############################################################################
##############################################################################
# Start of Service: Geospatial
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetGeo():
"""
Test Class for get_geo
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_geo_all_params(self):
"""
get_geo()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"bookmark": "bookmark", "features": [{"_id": "id", "_rev": "rev", "bbox": [4], "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "properties": {"mapKey": "anyValue"}, "type": "Feature"}], "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "id": "id", "rev": "rev"}], "type": "FeatureCollection"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
bbox = 'testString'
bookmark = 'testString'
format = 'view'
g = 'testString'
include_docs = False
lat = -90
limit = 0
lon = -180
nearest = False
radius = 0
rangex = 0
rangey = 0
relation = 'intersects'
skip = 0
stale = 'ok'
# Invoke method
response = _service.get_geo(
db,
ddoc,
index,
bbox=bbox,
bookmark=bookmark,
format=format,
g=g,
include_docs=include_docs,
lat=lat,
limit=limit,
lon=lon,
nearest=nearest,
radius=radius,
rangex=rangex,
rangey=rangey,
relation=relation,
skip=skip,
stale=stale,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'bbox={}'.format(bbox) in query_string
assert 'bookmark={}'.format(bookmark) in query_string
assert 'format={}'.format(format) in query_string
assert 'g={}'.format(g) in query_string
assert 'include_docs={}'.format('true' if include_docs else 'false') in query_string
assert 'lat={}'.format(lat) in query_string
assert 'limit={}'.format(limit) in query_string
assert 'lon={}'.format(lon) in query_string
assert 'nearest={}'.format('true' if nearest else 'false') in query_string
assert 'radius={}'.format(radius) in query_string
assert 'rangex={}'.format(rangex) in query_string
assert 'rangey={}'.format(rangey) in query_string
assert 'relation={}'.format(relation) in query_string
assert 'skip={}'.format(skip) in query_string
assert 'stale={}'.format(stale) in query_string
def test_get_geo_all_params_with_retries(self):
# Enable retries and run test_get_geo_all_params.
_service.enable_retries()
self.test_get_geo_all_params()
# Disable retries and run test_get_geo_all_params.
_service.disable_retries()
self.test_get_geo_all_params()
@responses.activate
def test_get_geo_required_params(self):
"""
test_get_geo_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"bookmark": "bookmark", "features": [{"_id": "id", "_rev": "rev", "bbox": [4], "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "properties": {"mapKey": "anyValue"}, "type": "Feature"}], "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "id": "id", "rev": "rev"}], "type": "FeatureCollection"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Invoke method
response = _service.get_geo(
db,
ddoc,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_geo_required_params_with_retries(self):
# Enable retries and run test_get_geo_required_params.
_service.enable_retries()
self.test_get_geo_required_params()
# Disable retries and run test_get_geo_required_params.
_service.disable_retries()
self.test_get_geo_required_params()
@responses.activate
def test_get_geo_value_error(self):
"""
test_get_geo_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"bookmark": "bookmark", "features": [{"_id": "id", "_rev": "rev", "bbox": [4], "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "properties": {"mapKey": "anyValue"}, "type": "Feature"}], "rows": [{"doc": {"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}, "geometry": {"type": "Point", "coordinates": ["anyValue"]}, "id": "id", "rev": "rev"}], "type": "FeatureCollection"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_geo(**req_copy)
def test_get_geo_value_error_with_retries(self):
# Enable retries and run test_get_geo_value_error.
_service.enable_retries()
self.test_get_geo_value_error()
# Disable retries and run test_get_geo_value_error.
_service.disable_retries()
self.test_get_geo_value_error()
class TestGetGeoAsStream():
"""
Test Class for get_geo_as_stream
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_geo_as_stream_all_params(self):
"""
get_geo_as_stream()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
bbox = 'testString'
bookmark = 'testString'
format = 'view'
g = 'testString'
include_docs = False
lat = -90
limit = 0
lon = -180
nearest = False
radius = 0
rangex = 0
rangey = 0
relation = 'intersects'
skip = 0
stale = 'ok'
# Invoke method
response = _service.get_geo_as_stream(
db,
ddoc,
index,
bbox=bbox,
bookmark=bookmark,
format=format,
g=g,
include_docs=include_docs,
lat=lat,
limit=limit,
lon=lon,
nearest=nearest,
radius=radius,
rangex=rangex,
rangey=rangey,
relation=relation,
skip=skip,
stale=stale,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'bbox={}'.format(bbox) in query_string
assert 'bookmark={}'.format(bookmark) in query_string
assert 'format={}'.format(format) in query_string
assert 'g={}'.format(g) in query_string
assert 'include_docs={}'.format('true' if include_docs else 'false') in query_string
assert 'lat={}'.format(lat) in query_string
assert 'limit={}'.format(limit) in query_string
assert 'lon={}'.format(lon) in query_string
assert 'nearest={}'.format('true' if nearest else 'false') in query_string
assert 'radius={}'.format(radius) in query_string
assert 'rangex={}'.format(rangex) in query_string
assert 'rangey={}'.format(rangey) in query_string
assert 'relation={}'.format(relation) in query_string
assert 'skip={}'.format(skip) in query_string
assert 'stale={}'.format(stale) in query_string
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_get_geo_as_stream_all_params_with_retries(self):
# Enable retries and run test_get_geo_as_stream_all_params.
_service.enable_retries()
self.test_get_geo_as_stream_all_params()
# Disable retries and run test_get_geo_as_stream_all_params.
_service.disable_retries()
self.test_get_geo_as_stream_all_params()
@responses.activate
def test_get_geo_as_stream_required_params(self):
"""
test_get_geo_as_stream_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Invoke method
response = _service.get_geo_as_stream(
db,
ddoc,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Verify streamed JSON response
result = response.get_result()
assert isinstance(result, requests.models.Response)
response_buf = result.iter_content(chunk_size=1024)
assert str(next(response_buf), "utf-8") == mock_response
def test_get_geo_as_stream_required_params_with_retries(self):
# Enable retries and run test_get_geo_as_stream_required_params.
_service.enable_retries()
self.test_get_geo_as_stream_required_params()
# Disable retries and run test_get_geo_as_stream_required_params.
_service.disable_retries()
self.test_get_geo_as_stream_required_params()
@responses.activate
def test_get_geo_as_stream_value_error(self):
"""
test_get_geo_as_stream_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo/testString')
mock_response = '{"foo": "this is a mock response for JSON streaming"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_geo_as_stream(**req_copy)
def test_get_geo_as_stream_value_error_with_retries(self):
# Enable retries and run test_get_geo_as_stream_value_error.
_service.enable_retries()
self.test_get_geo_as_stream_value_error()
# Disable retries and run test_get_geo_as_stream_value_error.
_service.disable_retries()
self.test_get_geo_as_stream_value_error()
class TestPostGeoCleanup():
"""
Test Class for post_geo_cleanup
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_geo_cleanup_all_params(self):
"""
post_geo_cleanup()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_geo_cleanup')
mock_response = '{"ok": true}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=202)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.post_geo_cleanup(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 202
def test_post_geo_cleanup_all_params_with_retries(self):
# Enable retries and run test_post_geo_cleanup_all_params.
_service.enable_retries()
self.test_post_geo_cleanup_all_params()
# Disable retries and run test_post_geo_cleanup_all_params.
_service.disable_retries()
self.test_post_geo_cleanup_all_params()
@responses.activate
def test_post_geo_cleanup_value_error(self):
"""
test_post_geo_cleanup_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_geo_cleanup')
mock_response = '{"ok": true}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=202)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_geo_cleanup(**req_copy)
def test_post_geo_cleanup_value_error_with_retries(self):
# Enable retries and run test_post_geo_cleanup_value_error.
_service.enable_retries()
self.test_post_geo_cleanup_value_error()
# Disable retries and run test_post_geo_cleanup_value_error.
_service.disable_retries()
self.test_post_geo_cleanup_value_error()
class TestGetGeoIndexInformation():
"""
Test Class for get_geo_index_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_geo_index_information_all_params(self):
"""
get_geo_index_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo_info/testString')
mock_response = '{"geo_index": {"data_size": 0, "disk_size": 0, "doc_count": 0}, "name": "name"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Invoke method
response = _service.get_geo_index_information(
db,
ddoc,
index,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_geo_index_information_all_params_with_retries(self):
# Enable retries and run test_get_geo_index_information_all_params.
_service.enable_retries()
self.test_get_geo_index_information_all_params()
# Disable retries and run test_get_geo_index_information_all_params.
_service.disable_retries()
self.test_get_geo_index_information_all_params()
@responses.activate
def test_get_geo_index_information_value_error(self):
"""
test_get_geo_index_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_design/testString/_geo_info/testString')
mock_response = '{"geo_index": {"data_size": 0, "disk_size": 0, "doc_count": 0}, "name": "name"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
ddoc = 'testString'
index = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"ddoc": ddoc,
"index": index,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_geo_index_information(**req_copy)
def test_get_geo_index_information_value_error_with_retries(self):
# Enable retries and run test_get_geo_index_information_value_error.
_service.enable_retries()
self.test_get_geo_index_information_value_error()
# Disable retries and run test_get_geo_index_information_value_error.
_service.disable_retries()
self.test_get_geo_index_information_value_error()
# endregion
##############################################################################
# End of Service: Geospatial
##############################################################################
##############################################################################
# Start of Service: Replication
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadReplicationDocument():
"""
Test Class for head_replication_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_replication_document_all_params(self):
"""
head_replication_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
if_none_match = 'testString'
# Invoke method
response = _service.head_replication_document(
doc_id,
if_none_match=if_none_match,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_replication_document_all_params_with_retries(self):
# Enable retries and run test_head_replication_document_all_params.
_service.enable_retries()
self.test_head_replication_document_all_params()
# Disable retries and run test_head_replication_document_all_params.
_service.disable_retries()
self.test_head_replication_document_all_params()
@responses.activate
def test_head_replication_document_required_params(self):
"""
test_head_replication_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.head_replication_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_replication_document_required_params_with_retries(self):
# Enable retries and run test_head_replication_document_required_params.
_service.enable_retries()
self.test_head_replication_document_required_params()
# Disable retries and run test_head_replication_document_required_params.
_service.disable_retries()
self.test_head_replication_document_required_params()
@responses.activate
def test_head_replication_document_value_error(self):
"""
test_head_replication_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_replication_document(**req_copy)
def test_head_replication_document_value_error_with_retries(self):
# Enable retries and run test_head_replication_document_value_error.
_service.enable_retries()
self.test_head_replication_document_value_error()
# Disable retries and run test_head_replication_document_value_error.
_service.disable_retries()
self.test_head_replication_document_value_error()
class TestHeadSchedulerDocument():
"""
Test Class for head_scheduler_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_scheduler_document_all_params(self):
"""
head_scheduler_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.head_scheduler_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_scheduler_document_all_params_with_retries(self):
# Enable retries and run test_head_scheduler_document_all_params.
_service.enable_retries()
self.test_head_scheduler_document_all_params()
# Disable retries and run test_head_scheduler_document_all_params.
_service.disable_retries()
self.test_head_scheduler_document_all_params()
@responses.activate
def test_head_scheduler_document_value_error(self):
"""
test_head_scheduler_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs/_replicator/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_scheduler_document(**req_copy)
def test_head_scheduler_document_value_error_with_retries(self):
# Enable retries and run test_head_scheduler_document_value_error.
_service.enable_retries()
self.test_head_scheduler_document_value_error()
# Disable retries and run test_head_scheduler_document_value_error.
_service.disable_retries()
self.test_head_scheduler_document_value_error()
class TestHeadSchedulerJob():
"""
Test Class for head_scheduler_job
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_scheduler_job_all_params(self):
"""
head_scheduler_job()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
job_id = 'testString'
# Invoke method
response = _service.head_scheduler_job(
job_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_scheduler_job_all_params_with_retries(self):
# Enable retries and run test_head_scheduler_job_all_params.
_service.enable_retries()
self.test_head_scheduler_job_all_params()
# Disable retries and run test_head_scheduler_job_all_params.
_service.disable_retries()
self.test_head_scheduler_job_all_params()
@responses.activate
def test_head_scheduler_job_value_error(self):
"""
test_head_scheduler_job_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
job_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"job_id": job_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_scheduler_job(**req_copy)
def test_head_scheduler_job_value_error_with_retries(self):
# Enable retries and run test_head_scheduler_job_value_error.
_service.enable_retries()
self.test_head_scheduler_job_value_error()
# Disable retries and run test_head_scheduler_job_value_error.
_service.disable_retries()
self.test_head_scheduler_job_value_error()
class TestDeleteReplicationDocument():
"""
Test Class for delete_replication_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_replication_document_all_params(self):
"""
delete_replication_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
doc_id = 'testString'
if_match = 'testString'
batch = 'ok'
rev = 'testString'
# Invoke method
response = _service.delete_replication_document(
doc_id,
if_match=if_match,
batch=batch,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'rev={}'.format(rev) in query_string
def test_delete_replication_document_all_params_with_retries(self):
# Enable retries and run test_delete_replication_document_all_params.
_service.enable_retries()
self.test_delete_replication_document_all_params()
# Disable retries and run test_delete_replication_document_all_params.
_service.disable_retries()
self.test_delete_replication_document_all_params()
@responses.activate
def test_delete_replication_document_required_params(self):
"""
test_delete_replication_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.delete_replication_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
def test_delete_replication_document_required_params_with_retries(self):
# Enable retries and run test_delete_replication_document_required_params.
_service.enable_retries()
self.test_delete_replication_document_required_params()
# Disable retries and run test_delete_replication_document_required_params.
_service.disable_retries()
self.test_delete_replication_document_required_params()
@responses.activate
def test_delete_replication_document_value_error(self):
"""
test_delete_replication_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_replication_document(**req_copy)
def test_delete_replication_document_value_error_with_retries(self):
# Enable retries and run test_delete_replication_document_value_error.
_service.enable_retries()
self.test_delete_replication_document_value_error()
# Disable retries and run test_delete_replication_document_value_error.
_service.disable_retries()
self.test_delete_replication_document_value_error()
class TestGetReplicationDocument():
"""
Test Class for get_replication_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_replication_document_all_params(self):
"""
get_replication_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "cancel": true, "checkpoint_interval": 0, "connection_timeout": 0, "continuous": false, "create_target": false, "create_target_params": {"n": 1, "partitioned": false, "q": 1}, "doc_ids": ["doc_ids"], "filter": "filter", "http_connections": 1, "query_params": {"mapKey": "inner"}, "retries_per_request": 0, "selector": {"mapKey": "anyValue"}, "since_seq": "since_seq", "socket_options": "socket_options", "source": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "source_proxy": "source_proxy", "target": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "target_proxy": "target_proxy", "use_checkpoints": true, "user_ctx": {"db": "db", "name": "name", "roles": ["_reader"]}, "worker_batch_size": 1, "worker_processes": 1}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
conflicts = False
deleted_conflicts = False
latest = False
local_seq = False
meta = False
rev = 'testString'
revs = False
revs_info = False
# Invoke method
response = _service.get_replication_document(
doc_id,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
conflicts=conflicts,
deleted_conflicts=deleted_conflicts,
latest=latest,
local_seq=local_seq,
meta=meta,
rev=rev,
revs=revs,
revs_info=revs_info,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'conflicts={}'.format('true' if conflicts else 'false') in query_string
assert 'deleted_conflicts={}'.format('true' if deleted_conflicts else 'false') in query_string
assert 'latest={}'.format('true' if latest else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
assert 'meta={}'.format('true' if meta else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
assert 'revs={}'.format('true' if revs else 'false') in query_string
assert 'revs_info={}'.format('true' if revs_info else 'false') in query_string
def test_get_replication_document_all_params_with_retries(self):
# Enable retries and run test_get_replication_document_all_params.
_service.enable_retries()
self.test_get_replication_document_all_params()
# Disable retries and run test_get_replication_document_all_params.
_service.disable_retries()
self.test_get_replication_document_all_params()
@responses.activate
def test_get_replication_document_required_params(self):
"""
test_get_replication_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "cancel": true, "checkpoint_interval": 0, "connection_timeout": 0, "continuous": false, "create_target": false, "create_target_params": {"n": 1, "partitioned": false, "q": 1}, "doc_ids": ["doc_ids"], "filter": "filter", "http_connections": 1, "query_params": {"mapKey": "inner"}, "retries_per_request": 0, "selector": {"mapKey": "anyValue"}, "since_seq": "since_seq", "socket_options": "socket_options", "source": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "source_proxy": "source_proxy", "target": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "target_proxy": "target_proxy", "use_checkpoints": true, "user_ctx": {"db": "db", "name": "name", "roles": ["_reader"]}, "worker_batch_size": 1, "worker_processes": 1}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.get_replication_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_replication_document_required_params_with_retries(self):
# Enable retries and run test_get_replication_document_required_params.
_service.enable_retries()
self.test_get_replication_document_required_params()
# Disable retries and run test_get_replication_document_required_params.
_service.disable_retries()
self.test_get_replication_document_required_params()
@responses.activate
def test_get_replication_document_value_error(self):
"""
test_get_replication_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}], "cancel": true, "checkpoint_interval": 0, "connection_timeout": 0, "continuous": false, "create_target": false, "create_target_params": {"n": 1, "partitioned": false, "q": 1}, "doc_ids": ["doc_ids"], "filter": "filter", "http_connections": 1, "query_params": {"mapKey": "inner"}, "retries_per_request": 0, "selector": {"mapKey": "anyValue"}, "since_seq": "since_seq", "socket_options": "socket_options", "source": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "source_proxy": "source_proxy", "target": {"auth": {"basic": {"password": "password", "username": "username"}, "iam": {"api_key": "api_key"}}, "headers": {"mapKey": "inner"}, "url": "url"}, "target_proxy": "target_proxy", "use_checkpoints": true, "user_ctx": {"db": "db", "name": "name", "roles": ["_reader"]}, "worker_batch_size": 1, "worker_processes": 1}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_replication_document(**req_copy)
def test_get_replication_document_value_error_with_retries(self):
# Enable retries and run test_get_replication_document_value_error.
_service.enable_retries()
self.test_get_replication_document_value_error()
# Disable retries and run test_get_replication_document_value_error.
_service.disable_retries()
self.test_get_replication_document_value_error()
class TestPutReplicationDocument():
"""
Test Class for put_replication_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_replication_document_all_params(self):
"""
put_replication_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a ReplicationCreateTargetParameters model
replication_create_target_parameters_model = {}
replication_create_target_parameters_model['n'] = 1
replication_create_target_parameters_model['partitioned'] = False
replication_create_target_parameters_model['q'] = 1
# Construct a dict representation of a ReplicationDatabaseAuthBasic model
replication_database_auth_basic_model = {}
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuthIam model
replication_database_auth_iam_model = {}
replication_database_auth_iam_model['api_key'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuth model
replication_database_auth_model = {}
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
# Construct a dict representation of a ReplicationDatabase model
replication_database_model = {}
replication_database_model['auth'] = replication_database_auth_model
replication_database_model['headers'] = {}
replication_database_model['url'] = 'testString'
# Construct a dict representation of a UserContext model
user_context_model = {}
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a dict representation of a ReplicationDocument model
replication_document_model = {}
replication_document_model['_attachments'] = {}
replication_document_model['_conflicts'] = ['testString']
replication_document_model['_deleted'] = True
replication_document_model['_deleted_conflicts'] = ['testString']
replication_document_model['_id'] = 'testString'
replication_document_model['_local_seq'] = 'testString'
replication_document_model['_rev'] = 'testString'
replication_document_model['_revisions'] = revisions_model
replication_document_model['_revs_info'] = [document_revision_status_model]
replication_document_model['cancel'] = True
replication_document_model['checkpoint_interval'] = 0
replication_document_model['connection_timeout'] = 0
replication_document_model['continuous'] = False
replication_document_model['create_target'] = False
replication_document_model['create_target_params'] = replication_create_target_parameters_model
replication_document_model['doc_ids'] = ['testString']
replication_document_model['filter'] = 'testString'
replication_document_model['http_connections'] = 1
replication_document_model['query_params'] = {}
replication_document_model['retries_per_request'] = 0
replication_document_model['selector'] = {}
replication_document_model['since_seq'] = 'testString'
replication_document_model['socket_options'] = 'testString'
replication_document_model['source'] = replication_database_model
replication_document_model['source_proxy'] = 'testString'
replication_document_model['target'] = replication_database_model
replication_document_model['target_proxy'] = 'testString'
replication_document_model['use_checkpoints'] = True
replication_document_model['user_ctx'] = user_context_model
replication_document_model['worker_batch_size'] = 1
replication_document_model['worker_processes'] = 1
replication_document_model['foo'] = 'testString'
# Set up parameter values
doc_id = 'testString'
replication_document = replication_document_model
if_match = 'testString'
batch = 'ok'
new_edits = False
rev = 'testString'
# Invoke method
response = _service.put_replication_document(
doc_id,
replication_document,
if_match=if_match,
batch=batch,
new_edits=new_edits,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
assert 'new_edits={}'.format('true' if new_edits else 'false') in query_string
assert 'rev={}'.format(rev) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == replication_document
def test_put_replication_document_all_params_with_retries(self):
# Enable retries and run test_put_replication_document_all_params.
_service.enable_retries()
self.test_put_replication_document_all_params()
# Disable retries and run test_put_replication_document_all_params.
_service.disable_retries()
self.test_put_replication_document_all_params()
@responses.activate
def test_put_replication_document_required_params(self):
"""
test_put_replication_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a ReplicationCreateTargetParameters model
replication_create_target_parameters_model = {}
replication_create_target_parameters_model['n'] = 1
replication_create_target_parameters_model['partitioned'] = False
replication_create_target_parameters_model['q'] = 1
# Construct a dict representation of a ReplicationDatabaseAuthBasic model
replication_database_auth_basic_model = {}
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuthIam model
replication_database_auth_iam_model = {}
replication_database_auth_iam_model['api_key'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuth model
replication_database_auth_model = {}
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
# Construct a dict representation of a ReplicationDatabase model
replication_database_model = {}
replication_database_model['auth'] = replication_database_auth_model
replication_database_model['headers'] = {}
replication_database_model['url'] = 'testString'
# Construct a dict representation of a UserContext model
user_context_model = {}
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a dict representation of a ReplicationDocument model
replication_document_model = {}
replication_document_model['_attachments'] = {}
replication_document_model['_conflicts'] = ['testString']
replication_document_model['_deleted'] = True
replication_document_model['_deleted_conflicts'] = ['testString']
replication_document_model['_id'] = 'testString'
replication_document_model['_local_seq'] = 'testString'
replication_document_model['_rev'] = 'testString'
replication_document_model['_revisions'] = revisions_model
replication_document_model['_revs_info'] = [document_revision_status_model]
replication_document_model['cancel'] = True
replication_document_model['checkpoint_interval'] = 0
replication_document_model['connection_timeout'] = 0
replication_document_model['continuous'] = False
replication_document_model['create_target'] = False
replication_document_model['create_target_params'] = replication_create_target_parameters_model
replication_document_model['doc_ids'] = ['testString']
replication_document_model['filter'] = 'testString'
replication_document_model['http_connections'] = 1
replication_document_model['query_params'] = {}
replication_document_model['retries_per_request'] = 0
replication_document_model['selector'] = {}
replication_document_model['since_seq'] = 'testString'
replication_document_model['socket_options'] = 'testString'
replication_document_model['source'] = replication_database_model
replication_document_model['source_proxy'] = 'testString'
replication_document_model['target'] = replication_database_model
replication_document_model['target_proxy'] = 'testString'
replication_document_model['use_checkpoints'] = True
replication_document_model['user_ctx'] = user_context_model
replication_document_model['worker_batch_size'] = 1
replication_document_model['worker_processes'] = 1
replication_document_model['foo'] = 'testString'
# Set up parameter values
doc_id = 'testString'
replication_document = replication_document_model
# Invoke method
response = _service.put_replication_document(
doc_id,
replication_document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == replication_document
def test_put_replication_document_required_params_with_retries(self):
# Enable retries and run test_put_replication_document_required_params.
_service.enable_retries()
self.test_put_replication_document_required_params()
# Disable retries and run test_put_replication_document_required_params.
_service.disable_retries()
self.test_put_replication_document_required_params()
@responses.activate
def test_put_replication_document_value_error(self):
"""
test_put_replication_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_replicator/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a ReplicationCreateTargetParameters model
replication_create_target_parameters_model = {}
replication_create_target_parameters_model['n'] = 1
replication_create_target_parameters_model['partitioned'] = False
replication_create_target_parameters_model['q'] = 1
# Construct a dict representation of a ReplicationDatabaseAuthBasic model
replication_database_auth_basic_model = {}
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuthIam model
replication_database_auth_iam_model = {}
replication_database_auth_iam_model['api_key'] = 'testString'
# Construct a dict representation of a ReplicationDatabaseAuth model
replication_database_auth_model = {}
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
# Construct a dict representation of a ReplicationDatabase model
replication_database_model = {}
replication_database_model['auth'] = replication_database_auth_model
replication_database_model['headers'] = {}
replication_database_model['url'] = 'testString'
# Construct a dict representation of a UserContext model
user_context_model = {}
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a dict representation of a ReplicationDocument model
replication_document_model = {}
replication_document_model['_attachments'] = {}
replication_document_model['_conflicts'] = ['testString']
replication_document_model['_deleted'] = True
replication_document_model['_deleted_conflicts'] = ['testString']
replication_document_model['_id'] = 'testString'
replication_document_model['_local_seq'] = 'testString'
replication_document_model['_rev'] = 'testString'
replication_document_model['_revisions'] = revisions_model
replication_document_model['_revs_info'] = [document_revision_status_model]
replication_document_model['cancel'] = True
replication_document_model['checkpoint_interval'] = 0
replication_document_model['connection_timeout'] = 0
replication_document_model['continuous'] = False
replication_document_model['create_target'] = False
replication_document_model['create_target_params'] = replication_create_target_parameters_model
replication_document_model['doc_ids'] = ['testString']
replication_document_model['filter'] = 'testString'
replication_document_model['http_connections'] = 1
replication_document_model['query_params'] = {}
replication_document_model['retries_per_request'] = 0
replication_document_model['selector'] = {}
replication_document_model['since_seq'] = 'testString'
replication_document_model['socket_options'] = 'testString'
replication_document_model['source'] = replication_database_model
replication_document_model['source_proxy'] = 'testString'
replication_document_model['target'] = replication_database_model
replication_document_model['target_proxy'] = 'testString'
replication_document_model['use_checkpoints'] = True
replication_document_model['user_ctx'] = user_context_model
replication_document_model['worker_batch_size'] = 1
replication_document_model['worker_processes'] = 1
replication_document_model['foo'] = 'testString'
# Set up parameter values
doc_id = 'testString'
replication_document = replication_document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
"replication_document": replication_document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_replication_document(**req_copy)
def test_put_replication_document_value_error_with_retries(self):
# Enable retries and run test_put_replication_document_value_error.
_service.enable_retries()
self.test_put_replication_document_value_error()
# Disable retries and run test_put_replication_document_value_error.
_service.disable_retries()
self.test_put_replication_document_value_error()
class TestGetSchedulerDocs():
"""
Test Class for get_scheduler_docs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_scheduler_docs_all_params(self):
"""
get_scheduler_docs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs')
mock_response = '{"total_rows": 0, "docs": [{"database": "database", "doc_id": "doc_id", "error_count": 0, "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "last_updated": "2019-01-01T12:00:00.000Z", "node": "node", "source": "source", "source_proxy": "source_proxy", "start_time": "2019-01-01T12:00:00.000Z", "state": "initializing", "target": "target", "target_proxy": "target_proxy"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
limit = 0
skip = 0
states = ['initializing']
# Invoke method
response = _service.get_scheduler_docs(
limit=limit,
skip=skip,
states=states,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'limit={}'.format(limit) in query_string
assert 'skip={}'.format(skip) in query_string
assert 'states={}'.format(','.join(states)) in query_string
def test_get_scheduler_docs_all_params_with_retries(self):
# Enable retries and run test_get_scheduler_docs_all_params.
_service.enable_retries()
self.test_get_scheduler_docs_all_params()
# Disable retries and run test_get_scheduler_docs_all_params.
_service.disable_retries()
self.test_get_scheduler_docs_all_params()
@responses.activate
def test_get_scheduler_docs_required_params(self):
"""
test_get_scheduler_docs_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs')
mock_response = '{"total_rows": 0, "docs": [{"database": "database", "doc_id": "doc_id", "error_count": 0, "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "last_updated": "2019-01-01T12:00:00.000Z", "node": "node", "source": "source", "source_proxy": "source_proxy", "start_time": "2019-01-01T12:00:00.000Z", "state": "initializing", "target": "target", "target_proxy": "target_proxy"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_scheduler_docs()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_scheduler_docs_required_params_with_retries(self):
# Enable retries and run test_get_scheduler_docs_required_params.
_service.enable_retries()
self.test_get_scheduler_docs_required_params()
# Disable retries and run test_get_scheduler_docs_required_params.
_service.disable_retries()
self.test_get_scheduler_docs_required_params()
class TestGetSchedulerDocument():
"""
Test Class for get_scheduler_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_scheduler_document_all_params(self):
"""
get_scheduler_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs/_replicator/testString')
mock_response = '{"database": "database", "doc_id": "doc_id", "error_count": 0, "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "last_updated": "2019-01-01T12:00:00.000Z", "node": "node", "source": "source", "source_proxy": "source_proxy", "start_time": "2019-01-01T12:00:00.000Z", "state": "initializing", "target": "target", "target_proxy": "target_proxy"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
# Invoke method
response = _service.get_scheduler_document(
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_scheduler_document_all_params_with_retries(self):
# Enable retries and run test_get_scheduler_document_all_params.
_service.enable_retries()
self.test_get_scheduler_document_all_params()
# Disable retries and run test_get_scheduler_document_all_params.
_service.disable_retries()
self.test_get_scheduler_document_all_params()
@responses.activate
def test_get_scheduler_document_value_error(self):
"""
test_get_scheduler_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/docs/_replicator/testString')
mock_response = '{"database": "database", "doc_id": "doc_id", "error_count": 0, "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "last_updated": "2019-01-01T12:00:00.000Z", "node": "node", "source": "source", "source_proxy": "source_proxy", "start_time": "2019-01-01T12:00:00.000Z", "state": "initializing", "target": "target", "target_proxy": "target_proxy"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_scheduler_document(**req_copy)
def test_get_scheduler_document_value_error_with_retries(self):
# Enable retries and run test_get_scheduler_document_value_error.
_service.enable_retries()
self.test_get_scheduler_document_value_error()
# Disable retries and run test_get_scheduler_document_value_error.
_service.disable_retries()
self.test_get_scheduler_document_value_error()
class TestGetSchedulerJobs():
"""
Test Class for get_scheduler_jobs
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_scheduler_jobs_all_params(self):
"""
get_scheduler_jobs()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs')
mock_response = '{"total_rows": 0, "jobs": [{"database": "database", "doc_id": "doc_id", "history": [{"reason": "reason", "timestamp": "2019-01-01T12:00:00.000Z", "type": "type"}], "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "node": "node", "pid": "pid", "source": "source", "start_time": "2019-01-01T12:00:00.000Z", "target": "target", "user": "user"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
limit = 0
skip = 0
# Invoke method
response = _service.get_scheduler_jobs(
limit=limit,
skip=skip,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'limit={}'.format(limit) in query_string
assert 'skip={}'.format(skip) in query_string
def test_get_scheduler_jobs_all_params_with_retries(self):
# Enable retries and run test_get_scheduler_jobs_all_params.
_service.enable_retries()
self.test_get_scheduler_jobs_all_params()
# Disable retries and run test_get_scheduler_jobs_all_params.
_service.disable_retries()
self.test_get_scheduler_jobs_all_params()
@responses.activate
def test_get_scheduler_jobs_required_params(self):
"""
test_get_scheduler_jobs_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs')
mock_response = '{"total_rows": 0, "jobs": [{"database": "database", "doc_id": "doc_id", "history": [{"reason": "reason", "timestamp": "2019-01-01T12:00:00.000Z", "type": "type"}], "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "node": "node", "pid": "pid", "source": "source", "start_time": "2019-01-01T12:00:00.000Z", "target": "target", "user": "user"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_scheduler_jobs()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_scheduler_jobs_required_params_with_retries(self):
# Enable retries and run test_get_scheduler_jobs_required_params.
_service.enable_retries()
self.test_get_scheduler_jobs_required_params()
# Disable retries and run test_get_scheduler_jobs_required_params.
_service.disable_retries()
self.test_get_scheduler_jobs_required_params()
class TestGetSchedulerJob():
"""
Test Class for get_scheduler_job
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_scheduler_job_all_params(self):
"""
get_scheduler_job()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs/testString')
mock_response = '{"database": "database", "doc_id": "doc_id", "history": [{"reason": "reason", "timestamp": "2019-01-01T12:00:00.000Z", "type": "type"}], "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "node": "node", "pid": "pid", "source": "source", "start_time": "2019-01-01T12:00:00.000Z", "target": "target", "user": "user"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
job_id = 'testString'
# Invoke method
response = _service.get_scheduler_job(
job_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_scheduler_job_all_params_with_retries(self):
# Enable retries and run test_get_scheduler_job_all_params.
_service.enable_retries()
self.test_get_scheduler_job_all_params()
# Disable retries and run test_get_scheduler_job_all_params.
_service.disable_retries()
self.test_get_scheduler_job_all_params()
@responses.activate
def test_get_scheduler_job_value_error(self):
"""
test_get_scheduler_job_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_scheduler/jobs/testString')
mock_response = '{"database": "database", "doc_id": "doc_id", "history": [{"reason": "reason", "timestamp": "2019-01-01T12:00:00.000Z", "type": "type"}], "id": "id", "info": {"changes_pending": 0, "checkpointed_source_seq": "checkpointed_source_seq", "doc_write_failures": 0, "docs_read": 0, "docs_written": 0, "error": "error", "missing_revisions_found": 0, "revisions_checked": 0, "source_seq": "source_seq", "through_seq": "through_seq"}, "node": "node", "pid": "pid", "source": "source", "start_time": "2019-01-01T12:00:00.000Z", "target": "target", "user": "user"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
job_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"job_id": job_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_scheduler_job(**req_copy)
def test_get_scheduler_job_value_error_with_retries(self):
# Enable retries and run test_get_scheduler_job_value_error.
_service.enable_retries()
self.test_get_scheduler_job_value_error()
# Disable retries and run test_get_scheduler_job_value_error.
_service.disable_retries()
self.test_get_scheduler_job_value_error()
# endregion
##############################################################################
# End of Service: Replication
##############################################################################
##############################################################################
# Start of Service: Authentication
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetSessionInformation():
"""
Test Class for get_session_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_session_information_all_params(self):
"""
get_session_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_session')
mock_response = '{"ok": true, "info": {"authenticated": "authenticated", "authentication_db": "authentication_db", "authentication_handlers": ["authentication_handlers"]}, "userCtx": {"db": "db", "name": "name", "roles": ["_reader"]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_session_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_session_information_all_params_with_retries(self):
# Enable retries and run test_get_session_information_all_params.
_service.enable_retries()
self.test_get_session_information_all_params()
# Disable retries and run test_get_session_information_all_params.
_service.disable_retries()
self.test_get_session_information_all_params()
# endregion
##############################################################################
# End of Service: Authentication
##############################################################################
##############################################################################
# Start of Service: Authorization
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetSecurity():
"""
Test Class for get_security
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_security_all_params(self):
"""
get_security()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_security')
mock_response = '{"admins": {"names": ["names"], "roles": ["roles"]}, "members": {"names": ["names"], "roles": ["roles"]}, "cloudant": {"mapKey": ["_reader"]}, "couchdb_auth_only": false}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.get_security(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_security_all_params_with_retries(self):
# Enable retries and run test_get_security_all_params.
_service.enable_retries()
self.test_get_security_all_params()
# Disable retries and run test_get_security_all_params.
_service.disable_retries()
self.test_get_security_all_params()
@responses.activate
def test_get_security_value_error(self):
"""
test_get_security_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_security')
mock_response = '{"admins": {"names": ["names"], "roles": ["roles"]}, "members": {"names": ["names"], "roles": ["roles"]}, "cloudant": {"mapKey": ["_reader"]}, "couchdb_auth_only": false}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_security(**req_copy)
def test_get_security_value_error_with_retries(self):
# Enable retries and run test_get_security_value_error.
_service.enable_retries()
self.test_get_security_value_error()
# Disable retries and run test_get_security_value_error.
_service.disable_retries()
self.test_get_security_value_error()
class TestPutSecurity():
"""
Test Class for put_security
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_security_all_params(self):
"""
put_security()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_security')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a SecurityObject model
security_object_model = {}
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Set up parameter values
db = 'testString'
admins = security_object_model
members = security_object_model
cloudant = {}
couchdb_auth_only = True
# Invoke method
response = _service.put_security(
db,
admins=admins,
members=members,
cloudant=cloudant,
couchdb_auth_only=couchdb_auth_only,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['admins'] == security_object_model
assert req_body['members'] == security_object_model
assert req_body['cloudant'] == {}
assert req_body['couchdb_auth_only'] == True
def test_put_security_all_params_with_retries(self):
# Enable retries and run test_put_security_all_params.
_service.enable_retries()
self.test_put_security_all_params()
# Disable retries and run test_put_security_all_params.
_service.disable_retries()
self.test_put_security_all_params()
@responses.activate
def test_put_security_value_error(self):
"""
test_put_security_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_security')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a SecurityObject model
security_object_model = {}
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Set up parameter values
db = 'testString'
admins = security_object_model
members = security_object_model
cloudant = {}
couchdb_auth_only = True
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_security(**req_copy)
def test_put_security_value_error_with_retries(self):
# Enable retries and run test_put_security_value_error.
_service.enable_retries()
self.test_put_security_value_error()
# Disable retries and run test_put_security_value_error.
_service.disable_retries()
self.test_put_security_value_error()
class TestPostApiKeys():
"""
Test Class for post_api_keys
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_api_keys_all_params(self):
"""
post_api_keys()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/api_keys')
mock_response = '{"ok": true, "key": "key", "password": "password"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Invoke method
response = _service.post_api_keys()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
def test_post_api_keys_all_params_with_retries(self):
# Enable retries and run test_post_api_keys_all_params.
_service.enable_retries()
self.test_post_api_keys_all_params()
# Disable retries and run test_post_api_keys_all_params.
_service.disable_retries()
self.test_post_api_keys_all_params()
class TestPutCloudantSecurityConfiguration():
"""
Test Class for put_cloudant_security_configuration
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_cloudant_security_configuration_all_params(self):
"""
put_cloudant_security_configuration()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/db/testString/_security')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a SecurityObject model
security_object_model = {}
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Set up parameter values
db = 'testString'
cloudant = {}
admins = security_object_model
members = security_object_model
couchdb_auth_only = True
# Invoke method
response = _service.put_cloudant_security_configuration(
db,
cloudant,
admins=admins,
members=members,
couchdb_auth_only=couchdb_auth_only,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['cloudant'] == {}
assert req_body['admins'] == security_object_model
assert req_body['members'] == security_object_model
assert req_body['couchdb_auth_only'] == True
def test_put_cloudant_security_configuration_all_params_with_retries(self):
# Enable retries and run test_put_cloudant_security_configuration_all_params.
_service.enable_retries()
self.test_put_cloudant_security_configuration_all_params()
# Disable retries and run test_put_cloudant_security_configuration_all_params.
_service.disable_retries()
self.test_put_cloudant_security_configuration_all_params()
@responses.activate
def test_put_cloudant_security_configuration_value_error(self):
"""
test_put_cloudant_security_configuration_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/db/testString/_security')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a SecurityObject model
security_object_model = {}
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Set up parameter values
db = 'testString'
cloudant = {}
admins = security_object_model
members = security_object_model
couchdb_auth_only = True
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"cloudant": cloudant,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_cloudant_security_configuration(**req_copy)
def test_put_cloudant_security_configuration_value_error_with_retries(self):
# Enable retries and run test_put_cloudant_security_configuration_value_error.
_service.enable_retries()
self.test_put_cloudant_security_configuration_value_error()
# Disable retries and run test_put_cloudant_security_configuration_value_error.
_service.disable_retries()
self.test_put_cloudant_security_configuration_value_error()
# endregion
##############################################################################
# End of Service: Authorization
##############################################################################
##############################################################################
# Start of Service: CORS
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestGetCorsInformation():
"""
Test Class for get_cors_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_cors_information_all_params(self):
"""
get_cors_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/config/cors')
mock_response = '{"allow_credentials": true, "enable_cors": true, "origins": ["origins"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_cors_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_cors_information_all_params_with_retries(self):
# Enable retries and run test_get_cors_information_all_params.
_service.enable_retries()
self.test_get_cors_information_all_params()
# Disable retries and run test_get_cors_information_all_params.
_service.disable_retries()
self.test_get_cors_information_all_params()
class TestPutCorsConfiguration():
"""
Test Class for put_cors_configuration
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_cors_configuration_all_params(self):
"""
put_cors_configuration()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/config/cors')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
origins = ['testString']
allow_credentials = True
enable_cors = True
# Invoke method
response = _service.put_cors_configuration(
origins,
allow_credentials=allow_credentials,
enable_cors=enable_cors,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['origins'] == ['testString']
assert req_body['allow_credentials'] == True
assert req_body['enable_cors'] == True
def test_put_cors_configuration_all_params_with_retries(self):
# Enable retries and run test_put_cors_configuration_all_params.
_service.enable_retries()
self.test_put_cors_configuration_all_params()
# Disable retries and run test_put_cors_configuration_all_params.
_service.disable_retries()
self.test_put_cors_configuration_all_params()
@responses.activate
def test_put_cors_configuration_value_error(self):
"""
test_put_cors_configuration_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/config/cors')
mock_response = '{"ok": true}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
origins = ['testString']
allow_credentials = True
enable_cors = True
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"origins": origins,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_cors_configuration(**req_copy)
def test_put_cors_configuration_value_error_with_retries(self):
# Enable retries and run test_put_cors_configuration_value_error.
_service.enable_retries()
self.test_put_cors_configuration_value_error()
# Disable retries and run test_put_cors_configuration_value_error.
_service.disable_retries()
self.test_put_cors_configuration_value_error()
# endregion
##############################################################################
# End of Service: CORS
##############################################################################
##############################################################################
# Start of Service: Attachments
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadAttachment():
"""
Test Class for head_attachment
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_attachment_all_params(self):
"""
head_attachment()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
if_match = 'testString'
if_none_match = 'testString'
rev = 'testString'
# Invoke method
response = _service.head_attachment(
db,
doc_id,
attachment_name,
if_match=if_match,
if_none_match=if_none_match,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'rev={}'.format(rev) in query_string
def test_head_attachment_all_params_with_retries(self):
# Enable retries and run test_head_attachment_all_params.
_service.enable_retries()
self.test_head_attachment_all_params()
# Disable retries and run test_head_attachment_all_params.
_service.disable_retries()
self.test_head_attachment_all_params()
@responses.activate
def test_head_attachment_required_params(self):
"""
test_head_attachment_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Invoke method
response = _service.head_attachment(
db,
doc_id,
attachment_name,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_attachment_required_params_with_retries(self):
# Enable retries and run test_head_attachment_required_params.
_service.enable_retries()
self.test_head_attachment_required_params()
# Disable retries and run test_head_attachment_required_params.
_service.disable_retries()
self.test_head_attachment_required_params()
@responses.activate
def test_head_attachment_value_error(self):
"""
test_head_attachment_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"attachment_name": attachment_name,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_attachment(**req_copy)
def test_head_attachment_value_error_with_retries(self):
# Enable retries and run test_head_attachment_value_error.
_service.enable_retries()
self.test_head_attachment_value_error()
# Disable retries and run test_head_attachment_value_error.
_service.disable_retries()
self.test_head_attachment_value_error()
class TestDeleteAttachment():
"""
Test Class for delete_attachment
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_attachment_all_params(self):
"""
delete_attachment()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
if_match = 'testString'
rev = 'testString'
batch = 'ok'
# Invoke method
response = _service.delete_attachment(
db,
doc_id,
attachment_name,
if_match=if_match,
rev=rev,
batch=batch,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'rev={}'.format(rev) in query_string
assert 'batch={}'.format(batch) in query_string
def test_delete_attachment_all_params_with_retries(self):
# Enable retries and run test_delete_attachment_all_params.
_service.enable_retries()
self.test_delete_attachment_all_params()
# Disable retries and run test_delete_attachment_all_params.
_service.disable_retries()
self.test_delete_attachment_all_params()
@responses.activate
def test_delete_attachment_required_params(self):
"""
test_delete_attachment_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Invoke method
response = _service.delete_attachment(
db,
doc_id,
attachment_name,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
def test_delete_attachment_required_params_with_retries(self):
# Enable retries and run test_delete_attachment_required_params.
_service.enable_retries()
self.test_delete_attachment_required_params()
# Disable retries and run test_delete_attachment_required_params.
_service.disable_retries()
self.test_delete_attachment_required_params()
@responses.activate
def test_delete_attachment_value_error(self):
"""
test_delete_attachment_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"attachment_name": attachment_name,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_attachment(**req_copy)
def test_delete_attachment_value_error_with_retries(self):
# Enable retries and run test_delete_attachment_value_error.
_service.enable_retries()
self.test_delete_attachment_value_error()
# Disable retries and run test_delete_attachment_value_error.
_service.disable_retries()
self.test_delete_attachment_value_error()
class TestGetAttachment():
"""
Test Class for get_attachment
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_attachment_all_params(self):
"""
get_attachment()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='*/*',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
if_match = 'testString'
if_none_match = 'testString'
range = 'testString'
rev = 'testString'
# Invoke method
response = _service.get_attachment(
db,
doc_id,
attachment_name,
if_match=if_match,
if_none_match=if_none_match,
range=range,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'rev={}'.format(rev) in query_string
def test_get_attachment_all_params_with_retries(self):
# Enable retries and run test_get_attachment_all_params.
_service.enable_retries()
self.test_get_attachment_all_params()
# Disable retries and run test_get_attachment_all_params.
_service.disable_retries()
self.test_get_attachment_all_params()
@responses.activate
def test_get_attachment_required_params(self):
"""
test_get_attachment_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='*/*',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Invoke method
response = _service.get_attachment(
db,
doc_id,
attachment_name,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_attachment_required_params_with_retries(self):
# Enable retries and run test_get_attachment_required_params.
_service.enable_retries()
self.test_get_attachment_required_params()
# Disable retries and run test_get_attachment_required_params.
_service.disable_retries()
self.test_get_attachment_required_params()
@responses.activate
def test_get_attachment_value_error(self):
"""
test_get_attachment_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = 'This is a mock binary response.'
responses.add(responses.GET,
url,
body=mock_response,
content_type='*/*',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"attachment_name": attachment_name,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_attachment(**req_copy)
def test_get_attachment_value_error_with_retries(self):
# Enable retries and run test_get_attachment_value_error.
_service.enable_retries()
self.test_get_attachment_value_error()
# Disable retries and run test_get_attachment_value_error.
_service.disable_retries()
self.test_get_attachment_value_error()
class TestPutAttachment():
"""
Test Class for put_attachment
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_attachment_all_params(self):
"""
put_attachment()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
attachment = io.BytesIO(b'This is a mock file.').getvalue()
content_type = 'application/octet-stream'
if_match = 'testString'
rev = 'testString'
# Invoke method
response = _service.put_attachment(
db,
doc_id,
attachment_name,
attachment,
content_type,
if_match=if_match,
rev=rev,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'rev={}'.format(rev) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_attachment_all_params_with_retries(self):
# Enable retries and run test_put_attachment_all_params.
_service.enable_retries()
self.test_put_attachment_all_params()
# Disable retries and run test_put_attachment_all_params.
_service.disable_retries()
self.test_put_attachment_all_params()
@responses.activate
def test_put_attachment_required_params(self):
"""
test_put_attachment_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
attachment = io.BytesIO(b'This is a mock file.').getvalue()
content_type = 'application/octet-stream'
# Invoke method
response = _service.put_attachment(
db,
doc_id,
attachment_name,
attachment,
content_type,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_attachment_required_params_with_retries(self):
# Enable retries and run test_put_attachment_required_params.
_service.enable_retries()
self.test_put_attachment_required_params()
# Disable retries and run test_put_attachment_required_params.
_service.disable_retries()
self.test_put_attachment_required_params()
@responses.activate
def test_put_attachment_value_error(self):
"""
test_put_attachment_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/testString/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
attachment_name = 'testString'
attachment = io.BytesIO(b'This is a mock file.').getvalue()
content_type = 'application/octet-stream'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"attachment_name": attachment_name,
"attachment": attachment,
"content_type": content_type,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_attachment(**req_copy)
def test_put_attachment_value_error_with_retries(self):
# Enable retries and run test_put_attachment_value_error.
_service.enable_retries()
self.test_put_attachment_value_error()
# Disable retries and run test_put_attachment_value_error.
_service.disable_retries()
self.test_put_attachment_value_error()
# endregion
##############################################################################
# End of Service: Attachments
##############################################################################
##############################################################################
# Start of Service: LocalDocuments
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadLocalDocument():
"""
Test Class for head_local_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_local_document_all_params(self):
"""
head_local_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
if_none_match = 'testString'
# Invoke method
response = _service.head_local_document(
db,
doc_id,
if_none_match=if_none_match,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_local_document_all_params_with_retries(self):
# Enable retries and run test_head_local_document_all_params.
_service.enable_retries()
self.test_head_local_document_all_params()
# Disable retries and run test_head_local_document_all_params.
_service.disable_retries()
self.test_head_local_document_all_params()
@responses.activate
def test_head_local_document_required_params(self):
"""
test_head_local_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.head_local_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_local_document_required_params_with_retries(self):
# Enable retries and run test_head_local_document_required_params.
_service.enable_retries()
self.test_head_local_document_required_params()
# Disable retries and run test_head_local_document_required_params.
_service.disable_retries()
self.test_head_local_document_required_params()
@responses.activate
def test_head_local_document_value_error(self):
"""
test_head_local_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
responses.add(responses.HEAD,
url,
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.head_local_document(**req_copy)
def test_head_local_document_value_error_with_retries(self):
# Enable retries and run test_head_local_document_value_error.
_service.enable_retries()
self.test_head_local_document_value_error()
# Disable retries and run test_head_local_document_value_error.
_service.disable_retries()
self.test_head_local_document_value_error()
class TestDeleteLocalDocument():
"""
Test Class for delete_local_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_delete_local_document_all_params(self):
"""
delete_local_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
batch = 'ok'
# Invoke method
response = _service.delete_local_document(
db,
doc_id,
batch=batch,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
def test_delete_local_document_all_params_with_retries(self):
# Enable retries and run test_delete_local_document_all_params.
_service.enable_retries()
self.test_delete_local_document_all_params()
# Disable retries and run test_delete_local_document_all_params.
_service.disable_retries()
self.test_delete_local_document_all_params()
@responses.activate
def test_delete_local_document_required_params(self):
"""
test_delete_local_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.delete_local_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_delete_local_document_required_params_with_retries(self):
# Enable retries and run test_delete_local_document_required_params.
_service.enable_retries()
self.test_delete_local_document_required_params()
# Disable retries and run test_delete_local_document_required_params.
_service.disable_retries()
self.test_delete_local_document_required_params()
@responses.activate
def test_delete_local_document_value_error(self):
"""
test_delete_local_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.delete_local_document(**req_copy)
def test_delete_local_document_value_error_with_retries(self):
# Enable retries and run test_delete_local_document_value_error.
_service.enable_retries()
self.test_delete_local_document_value_error()
# Disable retries and run test_delete_local_document_value_error.
_service.disable_retries()
self.test_delete_local_document_value_error()
class TestGetLocalDocument():
"""
Test Class for get_local_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_local_document_all_params(self):
"""
get_local_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
accept = 'application/json'
if_none_match = 'testString'
attachments = False
att_encoding_info = False
local_seq = False
# Invoke method
response = _service.get_local_document(
db,
doc_id,
accept=accept,
if_none_match=if_none_match,
attachments=attachments,
att_encoding_info=att_encoding_info,
local_seq=local_seq,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'attachments={}'.format('true' if attachments else 'false') in query_string
assert 'att_encoding_info={}'.format('true' if att_encoding_info else 'false') in query_string
assert 'local_seq={}'.format('true' if local_seq else 'false') in query_string
def test_get_local_document_all_params_with_retries(self):
# Enable retries and run test_get_local_document_all_params.
_service.enable_retries()
self.test_get_local_document_all_params()
# Disable retries and run test_get_local_document_all_params.
_service.disable_retries()
self.test_get_local_document_all_params()
@responses.activate
def test_get_local_document_required_params(self):
"""
test_get_local_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_local_document(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_local_document_required_params_with_retries(self):
# Enable retries and run test_get_local_document_required_params.
_service.enable_retries()
self.test_get_local_document_required_params()
# Disable retries and run test_get_local_document_required_params.
_service.disable_retries()
self.test_get_local_document_required_params()
@responses.activate
def test_get_local_document_value_error(self):
"""
test_get_local_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"_attachments": {"mapKey": {"content_type": "content_type", "data": "VGhpcyBpcyBhbiBlbmNvZGVkIGJ5dGUgYXJyYXku", "digest": "digest", "encoded_length": 0, "encoding": "encoding", "follows": false, "length": 0, "revpos": 1, "stub": true}}, "_conflicts": ["conflicts"], "_deleted": false, "_deleted_conflicts": ["deleted_conflicts"], "_id": "id", "_local_seq": "local_seq", "_rev": "rev", "_revisions": {"ids": ["ids"], "start": 1}, "_revs_info": [{"rev": "rev", "status": "available"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_local_document(**req_copy)
def test_get_local_document_value_error_with_retries(self):
# Enable retries and run test_get_local_document_value_error.
_service.enable_retries()
self.test_get_local_document_value_error()
# Disable retries and run test_get_local_document_value_error.
_service.disable_retries()
self.test_get_local_document_value_error()
class TestPutLocalDocument():
"""
Test Class for put_local_document
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_put_local_document_all_params(self):
"""
put_local_document()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
content_type = 'application/json'
batch = 'ok'
# Invoke method
response = _service.put_local_document(
db,
doc_id,
document,
content_type=content_type,
batch=batch,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = urllib.parse.unquote_plus(query_string)
assert 'batch={}'.format(batch) in query_string
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_local_document_all_params_with_retries(self):
# Enable retries and run test_put_local_document_all_params.
_service.enable_retries()
self.test_put_local_document_all_params()
# Disable retries and run test_put_local_document_all_params.
_service.disable_retries()
self.test_put_local_document_all_params()
@responses.activate
def test_put_local_document_required_params(self):
"""
test_put_local_document_required_params()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
# Invoke method
response = _service.put_local_document(
db,
doc_id,
document,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
def test_put_local_document_required_params_with_retries(self):
# Enable retries and run test_put_local_document_required_params.
_service.enable_retries()
self.test_put_local_document_required_params()
# Disable retries and run test_put_local_document_required_params.
_service.disable_retries()
self.test_put_local_document_required_params()
@responses.activate
def test_put_local_document_value_error(self):
"""
test_put_local_document_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_local/testString')
mock_response = '{"id": "id", "rev": "rev", "ok": true, "caused_by": "caused_by", "error": "error", "reason": "reason"}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=201)
# Construct a dict representation of a Attachment model
attachment_model = {}
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
# Construct a dict representation of a Revisions model
revisions_model = {}
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
# Construct a dict representation of a DocumentRevisionStatus model
document_revision_status_model = {}
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a dict representation of a Document model
document_model = {}
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'exampleid'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['brand'] = 'Foo'
document_model['colours'] = '["red","green","black","blue"]'
document_model['description'] = 'Slim Colourful Design Electronic Cooking Appliance for ...'
document_model['image'] = 'assets/img/0gmsnghhew.jpg'
document_model['keywords'] = '["Foo","Scales","Weight","Digital","Kitchen"]'
document_model['name'] = 'Digital Kitchen Scales'
document_model['price'] = '14.99'
document_model['productid'] = '1000042'
document_model['taxonomy'] = '["Home","Kitchen","Small Appliances"]'
document_model['type'] = 'product'
# Set up parameter values
db = 'testString'
doc_id = 'testString'
document = document_model
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
"document": document,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.put_local_document(**req_copy)
def test_put_local_document_value_error_with_retries(self):
# Enable retries and run test_put_local_document_value_error.
_service.enable_retries()
self.test_put_local_document_value_error()
# Disable retries and run test_put_local_document_value_error.
_service.disable_retries()
self.test_put_local_document_value_error()
# endregion
##############################################################################
# End of Service: LocalDocuments
##############################################################################
##############################################################################
# Start of Service: DatabaseDetails
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestPostRevsDiff():
"""
Test Class for post_revs_diff
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_revs_diff_all_params(self):
"""
post_revs_diff()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_revs_diff')
mock_response = '{"mapKey": {"missing": ["missing"], "possible_ancestors": ["possible_ancestors"]}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
document_revisions = {}
# Invoke method
response = _service.post_revs_diff(
db,
document_revisions,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body == document_revisions
def test_post_revs_diff_all_params_with_retries(self):
# Enable retries and run test_post_revs_diff_all_params.
_service.enable_retries()
self.test_post_revs_diff_all_params()
# Disable retries and run test_post_revs_diff_all_params.
_service.disable_retries()
self.test_post_revs_diff_all_params()
@responses.activate
def test_post_revs_diff_value_error(self):
"""
test_post_revs_diff_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_revs_diff')
mock_response = '{"mapKey": {"missing": ["missing"], "possible_ancestors": ["possible_ancestors"]}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
document_revisions = {}
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"document_revisions": document_revisions,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_revs_diff(**req_copy)
def test_post_revs_diff_value_error_with_retries(self):
# Enable retries and run test_post_revs_diff_value_error.
_service.enable_retries()
self.test_post_revs_diff_value_error()
# Disable retries and run test_post_revs_diff_value_error.
_service.disable_retries()
self.test_post_revs_diff_value_error()
class TestGetShardsInformation():
"""
Test Class for get_shards_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_shards_information_all_params(self):
"""
get_shards_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_shards')
mock_response = '{"shards": {"mapKey": ["inner"]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Invoke method
response = _service.get_shards_information(
db,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_shards_information_all_params_with_retries(self):
# Enable retries and run test_get_shards_information_all_params.
_service.enable_retries()
self.test_get_shards_information_all_params()
# Disable retries and run test_get_shards_information_all_params.
_service.disable_retries()
self.test_get_shards_information_all_params()
@responses.activate
def test_get_shards_information_value_error(self):
"""
test_get_shards_information_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_shards')
mock_response = '{"shards": {"mapKey": ["inner"]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_shards_information(**req_copy)
def test_get_shards_information_value_error_with_retries(self):
# Enable retries and run test_get_shards_information_value_error.
_service.enable_retries()
self.test_get_shards_information_value_error()
# Disable retries and run test_get_shards_information_value_error.
_service.disable_retries()
self.test_get_shards_information_value_error()
class TestGetDocumentShardsInfo():
"""
Test Class for get_document_shards_info
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_document_shards_info_all_params(self):
"""
get_document_shards_info()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_shards/testString')
mock_response = '{"nodes": ["nodes"], "range": "range"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Invoke method
response = _service.get_document_shards_info(
db,
doc_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_document_shards_info_all_params_with_retries(self):
# Enable retries and run test_get_document_shards_info_all_params.
_service.enable_retries()
self.test_get_document_shards_info_all_params()
# Disable retries and run test_get_document_shards_info_all_params.
_service.disable_retries()
self.test_get_document_shards_info_all_params()
@responses.activate
def test_get_document_shards_info_value_error(self):
"""
test_get_document_shards_info_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/testString/_shards/testString')
mock_response = '{"nodes": ["nodes"], "range": "range"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
db = 'testString'
doc_id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"db": db,
"doc_id": doc_id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.get_document_shards_info(**req_copy)
def test_get_document_shards_info_value_error_with_retries(self):
# Enable retries and run test_get_document_shards_info_value_error.
_service.enable_retries()
self.test_get_document_shards_info_value_error()
# Disable retries and run test_get_document_shards_info_value_error.
_service.disable_retries()
self.test_get_document_shards_info_value_error()
# endregion
##############################################################################
# End of Service: DatabaseDetails
##############################################################################
##############################################################################
# Start of Service: Monitoring
##############################################################################
# region
class TestNewInstance():
"""
Test Class for new_instance
"""
def test_new_instance(self):
"""
new_instance()
"""
os.environ['TEST_SERVICE_AUTH_TYPE'] = 'noAuth'
service = CloudantV1.new_instance(
service_name='TEST_SERVICE',
)
assert service is not None
assert isinstance(service, CloudantV1)
def test_new_instance_without_authenticator(self):
"""
new_instance_without_authenticator()
"""
with pytest.raises(ValueError, match='authenticator must be provided'):
service = CloudantV1.new_instance(
)
class TestHeadUpInformation():
"""
Test Class for head_up_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_head_up_information_all_params(self):
"""
head_up_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_up')
responses.add(responses.HEAD,
url,
status=200)
# Invoke method
response = _service.head_up_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_head_up_information_all_params_with_retries(self):
# Enable retries and run test_head_up_information_all_params.
_service.enable_retries()
self.test_head_up_information_all_params()
# Disable retries and run test_head_up_information_all_params.
_service.disable_retries()
self.test_head_up_information_all_params()
class TestGetActiveTasks():
"""
Test Class for get_active_tasks
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_active_tasks_all_params(self):
"""
get_active_tasks()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_active_tasks')
mock_response = '[{"changes_done": 0, "database": "database", "node": "node", "pid": "pid", "progress": 0, "started_on": 0, "status": "status", "task": "task", "total_changes": 0, "type": "type", "updated_on": 0}]'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_active_tasks()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_active_tasks_all_params_with_retries(self):
# Enable retries and run test_get_active_tasks_all_params.
_service.enable_retries()
self.test_get_active_tasks_all_params()
# Disable retries and run test_get_active_tasks_all_params.
_service.disable_retries()
self.test_get_active_tasks_all_params()
class TestGetUpInformation():
"""
Test Class for get_up_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_up_information_all_params(self):
"""
get_up_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_up')
mock_response = '{"seeds": {"anyKey": "anyValue"}, "status": "maintenance_mode"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_up_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_up_information_all_params_with_retries(self):
# Enable retries and run test_get_up_information_all_params.
_service.enable_retries()
self.test_get_up_information_all_params()
# Disable retries and run test_get_up_information_all_params.
_service.disable_retries()
self.test_get_up_information_all_params()
class TestGetActivityTrackerEvents():
"""
Test Class for get_activity_tracker_events
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_activity_tracker_events_all_params(self):
"""
get_activity_tracker_events()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/activity_tracker/events')
mock_response = '{"types": ["management"]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_activity_tracker_events()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_activity_tracker_events_all_params_with_retries(self):
# Enable retries and run test_get_activity_tracker_events_all_params.
_service.enable_retries()
self.test_get_activity_tracker_events_all_params()
# Disable retries and run test_get_activity_tracker_events_all_params.
_service.disable_retries()
self.test_get_activity_tracker_events_all_params()
class TestPostActivityTrackerEvents():
"""
Test Class for post_activity_tracker_events
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_post_activity_tracker_events_all_params(self):
"""
post_activity_tracker_events()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/activity_tracker/events')
mock_response = '{"ok": true}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
types = ['management']
# Invoke method
response = _service.post_activity_tracker_events(
types,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# decompress gzip compressed request body
responses.calls[0].request.body = gzip.decompress(responses.calls[0].request.body)
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['types'] == ['management']
def test_post_activity_tracker_events_all_params_with_retries(self):
# Enable retries and run test_post_activity_tracker_events_all_params.
_service.enable_retries()
self.test_post_activity_tracker_events_all_params()
# Disable retries and run test_post_activity_tracker_events_all_params.
_service.disable_retries()
self.test_post_activity_tracker_events_all_params()
@responses.activate
def test_post_activity_tracker_events_value_error(self):
"""
test_post_activity_tracker_events_value_error()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/activity_tracker/events')
mock_response = '{"ok": true}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
types = ['management']
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"types": types,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
_service.post_activity_tracker_events(**req_copy)
def test_post_activity_tracker_events_value_error_with_retries(self):
# Enable retries and run test_post_activity_tracker_events_value_error.
_service.enable_retries()
self.test_post_activity_tracker_events_value_error()
# Disable retries and run test_post_activity_tracker_events_value_error.
_service.disable_retries()
self.test_post_activity_tracker_events_value_error()
class TestGetCurrentThroughputInformation():
"""
Test Class for get_current_throughput_information
"""
def preprocess_url(self, request_url: str):
"""
Preprocess the request URL to ensure the mock response will be found.
"""
request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded
request_url = urllib.parse.quote(request_url, safe=':/')
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
@responses.activate
def test_get_current_throughput_information_all_params(self):
"""
get_current_throughput_information()
"""
# Set up mock
url = self.preprocess_url(_base_url + '/_api/v2/user/current/throughput')
mock_response = '{"throughput": {"query": 0, "read": 0, "write": 0}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = _service.get_current_throughput_information()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
def test_get_current_throughput_information_all_params_with_retries(self):
# Enable retries and run test_get_current_throughput_information_all_params.
_service.enable_retries()
self.test_get_current_throughput_information_all_params()
# Disable retries and run test_get_current_throughput_information_all_params.
_service.disable_retries()
self.test_get_current_throughput_information_all_params()
# endregion
##############################################################################
# End of Service: Monitoring
##############################################################################
##############################################################################
# Start of Model Tests
##############################################################################
# region
class TestModel_ActiveTask():
"""
Test Class for ActiveTask
"""
def test_active_task_serialization(self):
"""
Test serialization/deserialization for ActiveTask
"""
# Construct a json representation of a ActiveTask model
active_task_model_json = {}
active_task_model_json['changes_done'] = 0
active_task_model_json['database'] = 'testString'
active_task_model_json['node'] = 'testString'
active_task_model_json['pid'] = 'testString'
active_task_model_json['progress'] = 0
active_task_model_json['started_on'] = 0
active_task_model_json['status'] = 'testString'
active_task_model_json['task'] = 'testString'
active_task_model_json['total_changes'] = 0
active_task_model_json['type'] = 'testString'
active_task_model_json['updated_on'] = 0
# Construct a model instance of ActiveTask by calling from_dict on the json representation
active_task_model = ActiveTask.from_dict(active_task_model_json)
assert active_task_model != False
# Construct a model instance of ActiveTask by calling from_dict on the json representation
active_task_model_dict = ActiveTask.from_dict(active_task_model_json).__dict__
active_task_model2 = ActiveTask(**active_task_model_dict)
# Verify the model instances are equivalent
assert active_task_model == active_task_model2
# Convert model instance back to dict and verify no loss of data
active_task_model_json2 = active_task_model.to_dict()
assert active_task_model_json2 == active_task_model_json
class TestModel_ActivityTrackerEvents():
"""
Test Class for ActivityTrackerEvents
"""
def test_activity_tracker_events_serialization(self):
"""
Test serialization/deserialization for ActivityTrackerEvents
"""
# Construct a json representation of a ActivityTrackerEvents model
activity_tracker_events_model_json = {}
activity_tracker_events_model_json['types'] = ['management']
# Construct a model instance of ActivityTrackerEvents by calling from_dict on the json representation
activity_tracker_events_model = ActivityTrackerEvents.from_dict(activity_tracker_events_model_json)
assert activity_tracker_events_model != False
# Construct a model instance of ActivityTrackerEvents by calling from_dict on the json representation
activity_tracker_events_model_dict = ActivityTrackerEvents.from_dict(activity_tracker_events_model_json).__dict__
activity_tracker_events_model2 = ActivityTrackerEvents(**activity_tracker_events_model_dict)
# Verify the model instances are equivalent
assert activity_tracker_events_model == activity_tracker_events_model2
# Convert model instance back to dict and verify no loss of data
activity_tracker_events_model_json2 = activity_tracker_events_model.to_dict()
assert activity_tracker_events_model_json2 == activity_tracker_events_model_json
class TestModel_AllDocsQueriesResult():
"""
Test Class for AllDocsQueriesResult
"""
def test_all_docs_queries_result_serialization(self):
"""
Test serialization/deserialization for AllDocsQueriesResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
docs_result_row_value_model = {} # DocsResultRowValue
docs_result_row_value_model['rev'] = 'testString'
docs_result_row_model = {} # DocsResultRow
docs_result_row_model['caused_by'] = 'testString'
docs_result_row_model['error'] = 'testString'
docs_result_row_model['reason'] = 'testString'
docs_result_row_model['doc'] = document_model
docs_result_row_model['id'] = 'testString'
docs_result_row_model['key'] = 'testString'
docs_result_row_model['value'] = docs_result_row_value_model
all_docs_result_model = {} # AllDocsResult
all_docs_result_model['total_rows'] = 0
all_docs_result_model['rows'] = [docs_result_row_model]
all_docs_result_model['update_seq'] = 'testString'
# Construct a json representation of a AllDocsQueriesResult model
all_docs_queries_result_model_json = {}
all_docs_queries_result_model_json['results'] = [all_docs_result_model]
# Construct a model instance of AllDocsQueriesResult by calling from_dict on the json representation
all_docs_queries_result_model = AllDocsQueriesResult.from_dict(all_docs_queries_result_model_json)
assert all_docs_queries_result_model != False
# Construct a model instance of AllDocsQueriesResult by calling from_dict on the json representation
all_docs_queries_result_model_dict = AllDocsQueriesResult.from_dict(all_docs_queries_result_model_json).__dict__
all_docs_queries_result_model2 = AllDocsQueriesResult(**all_docs_queries_result_model_dict)
# Verify the model instances are equivalent
assert all_docs_queries_result_model == all_docs_queries_result_model2
# Convert model instance back to dict and verify no loss of data
all_docs_queries_result_model_json2 = all_docs_queries_result_model.to_dict()
assert all_docs_queries_result_model_json2 == all_docs_queries_result_model_json
class TestModel_AllDocsQuery():
"""
Test Class for AllDocsQuery
"""
def test_all_docs_query_serialization(self):
"""
Test serialization/deserialization for AllDocsQuery
"""
# Construct a json representation of a AllDocsQuery model
all_docs_query_model_json = {}
all_docs_query_model_json['att_encoding_info'] = False
all_docs_query_model_json['attachments'] = False
all_docs_query_model_json['conflicts'] = False
all_docs_query_model_json['descending'] = False
all_docs_query_model_json['include_docs'] = False
all_docs_query_model_json['inclusive_end'] = True
all_docs_query_model_json['limit'] = 0
all_docs_query_model_json['skip'] = 0
all_docs_query_model_json['update_seq'] = False
all_docs_query_model_json['endkey'] = 'testString'
all_docs_query_model_json['key'] = 'testString'
all_docs_query_model_json['keys'] = ['testString']
all_docs_query_model_json['startkey'] = 'testString'
# Construct a model instance of AllDocsQuery by calling from_dict on the json representation
all_docs_query_model = AllDocsQuery.from_dict(all_docs_query_model_json)
assert all_docs_query_model != False
# Construct a model instance of AllDocsQuery by calling from_dict on the json representation
all_docs_query_model_dict = AllDocsQuery.from_dict(all_docs_query_model_json).__dict__
all_docs_query_model2 = AllDocsQuery(**all_docs_query_model_dict)
# Verify the model instances are equivalent
assert all_docs_query_model == all_docs_query_model2
# Convert model instance back to dict and verify no loss of data
all_docs_query_model_json2 = all_docs_query_model.to_dict()
assert all_docs_query_model_json2 == all_docs_query_model_json
class TestModel_AllDocsResult():
"""
Test Class for AllDocsResult
"""
def test_all_docs_result_serialization(self):
"""
Test serialization/deserialization for AllDocsResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
docs_result_row_value_model = {} # DocsResultRowValue
docs_result_row_value_model['rev'] = 'testString'
docs_result_row_model = {} # DocsResultRow
docs_result_row_model['caused_by'] = 'testString'
docs_result_row_model['error'] = 'testString'
docs_result_row_model['reason'] = 'testString'
docs_result_row_model['doc'] = document_model
docs_result_row_model['id'] = 'testString'
docs_result_row_model['key'] = 'testString'
docs_result_row_model['value'] = docs_result_row_value_model
# Construct a json representation of a AllDocsResult model
all_docs_result_model_json = {}
all_docs_result_model_json['total_rows'] = 0
all_docs_result_model_json['rows'] = [docs_result_row_model]
all_docs_result_model_json['update_seq'] = 'testString'
# Construct a model instance of AllDocsResult by calling from_dict on the json representation
all_docs_result_model = AllDocsResult.from_dict(all_docs_result_model_json)
assert all_docs_result_model != False
# Construct a model instance of AllDocsResult by calling from_dict on the json representation
all_docs_result_model_dict = AllDocsResult.from_dict(all_docs_result_model_json).__dict__
all_docs_result_model2 = AllDocsResult(**all_docs_result_model_dict)
# Verify the model instances are equivalent
assert all_docs_result_model == all_docs_result_model2
# Convert model instance back to dict and verify no loss of data
all_docs_result_model_json2 = all_docs_result_model.to_dict()
assert all_docs_result_model_json2 == all_docs_result_model_json
class TestModel_Analyzer():
"""
Test Class for Analyzer
"""
def test_analyzer_serialization(self):
"""
Test serialization/deserialization for Analyzer
"""
# Construct a json representation of a Analyzer model
analyzer_model_json = {}
analyzer_model_json['name'] = 'classic'
analyzer_model_json['stopwords'] = ['testString']
# Construct a model instance of Analyzer by calling from_dict on the json representation
analyzer_model = Analyzer.from_dict(analyzer_model_json)
assert analyzer_model != False
# Construct a model instance of Analyzer by calling from_dict on the json representation
analyzer_model_dict = Analyzer.from_dict(analyzer_model_json).__dict__
analyzer_model2 = Analyzer(**analyzer_model_dict)
# Verify the model instances are equivalent
assert analyzer_model == analyzer_model2
# Convert model instance back to dict and verify no loss of data
analyzer_model_json2 = analyzer_model.to_dict()
assert analyzer_model_json2 == analyzer_model_json
class TestModel_AnalyzerConfiguration():
"""
Test Class for AnalyzerConfiguration
"""
def test_analyzer_configuration_serialization(self):
"""
Test serialization/deserialization for AnalyzerConfiguration
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a json representation of a AnalyzerConfiguration model
analyzer_configuration_model_json = {}
analyzer_configuration_model_json['name'] = 'classic'
analyzer_configuration_model_json['stopwords'] = ['testString']
analyzer_configuration_model_json['fields'] = {}
# Construct a model instance of AnalyzerConfiguration by calling from_dict on the json representation
analyzer_configuration_model = AnalyzerConfiguration.from_dict(analyzer_configuration_model_json)
assert analyzer_configuration_model != False
# Construct a model instance of AnalyzerConfiguration by calling from_dict on the json representation
analyzer_configuration_model_dict = AnalyzerConfiguration.from_dict(analyzer_configuration_model_json).__dict__
analyzer_configuration_model2 = AnalyzerConfiguration(**analyzer_configuration_model_dict)
# Verify the model instances are equivalent
assert analyzer_configuration_model == analyzer_configuration_model2
# Convert model instance back to dict and verify no loss of data
analyzer_configuration_model_json2 = analyzer_configuration_model.to_dict()
assert analyzer_configuration_model_json2 == analyzer_configuration_model_json
class TestModel_ApiKeysResult():
"""
Test Class for ApiKeysResult
"""
def test_api_keys_result_serialization(self):
"""
Test serialization/deserialization for ApiKeysResult
"""
# Construct a json representation of a ApiKeysResult model
api_keys_result_model_json = {}
api_keys_result_model_json['ok'] = True
api_keys_result_model_json['key'] = 'testString'
api_keys_result_model_json['password'] = 'testString'
# Construct a model instance of ApiKeysResult by calling from_dict on the json representation
api_keys_result_model = ApiKeysResult.from_dict(api_keys_result_model_json)
assert api_keys_result_model != False
# Construct a model instance of ApiKeysResult by calling from_dict on the json representation
api_keys_result_model_dict = ApiKeysResult.from_dict(api_keys_result_model_json).__dict__
api_keys_result_model2 = ApiKeysResult(**api_keys_result_model_dict)
# Verify the model instances are equivalent
assert api_keys_result_model == api_keys_result_model2
# Convert model instance back to dict and verify no loss of data
api_keys_result_model_json2 = api_keys_result_model.to_dict()
assert api_keys_result_model_json2 == api_keys_result_model_json
class TestModel_Attachment():
"""
Test Class for Attachment
"""
def test_attachment_serialization(self):
"""
Test serialization/deserialization for Attachment
"""
# Construct a json representation of a Attachment model
attachment_model_json = {}
attachment_model_json['content_type'] = 'testString'
attachment_model_json['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model_json['digest'] = 'testString'
attachment_model_json['encoded_length'] = 0
attachment_model_json['encoding'] = 'testString'
attachment_model_json['follows'] = True
attachment_model_json['length'] = 0
attachment_model_json['revpos'] = 1
attachment_model_json['stub'] = True
# Construct a model instance of Attachment by calling from_dict on the json representation
attachment_model = Attachment.from_dict(attachment_model_json)
assert attachment_model != False
# Construct a model instance of Attachment by calling from_dict on the json representation
attachment_model_dict = Attachment.from_dict(attachment_model_json).__dict__
attachment_model2 = Attachment(**attachment_model_dict)
# Verify the model instances are equivalent
assert attachment_model == attachment_model2
# Convert model instance back to dict and verify no loss of data
attachment_model_json2 = attachment_model.to_dict()
assert attachment_model_json2 == attachment_model_json
class TestModel_BulkDocs():
"""
Test Class for BulkDocs
"""
def test_bulk_docs_serialization(self):
"""
Test serialization/deserialization for BulkDocs
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a BulkDocs model
bulk_docs_model_json = {}
bulk_docs_model_json['docs'] = [document_model]
bulk_docs_model_json['new_edits'] = True
# Construct a model instance of BulkDocs by calling from_dict on the json representation
bulk_docs_model = BulkDocs.from_dict(bulk_docs_model_json)
assert bulk_docs_model != False
# Construct a model instance of BulkDocs by calling from_dict on the json representation
bulk_docs_model_dict = BulkDocs.from_dict(bulk_docs_model_json).__dict__
bulk_docs_model2 = BulkDocs(**bulk_docs_model_dict)
# Verify the model instances are equivalent
assert bulk_docs_model == bulk_docs_model2
# Convert model instance back to dict and verify no loss of data
bulk_docs_model_json2 = bulk_docs_model.to_dict()
assert bulk_docs_model_json2 == bulk_docs_model_json
class TestModel_BulkGetQueryDocument():
"""
Test Class for BulkGetQueryDocument
"""
def test_bulk_get_query_document_serialization(self):
"""
Test serialization/deserialization for BulkGetQueryDocument
"""
# Construct a json representation of a BulkGetQueryDocument model
bulk_get_query_document_model_json = {}
bulk_get_query_document_model_json['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']
bulk_get_query_document_model_json['id'] = 'testString'
bulk_get_query_document_model_json['rev'] = 'testString'
# Construct a model instance of BulkGetQueryDocument by calling from_dict on the json representation
bulk_get_query_document_model = BulkGetQueryDocument.from_dict(bulk_get_query_document_model_json)
assert bulk_get_query_document_model != False
# Construct a model instance of BulkGetQueryDocument by calling from_dict on the json representation
bulk_get_query_document_model_dict = BulkGetQueryDocument.from_dict(bulk_get_query_document_model_json).__dict__
bulk_get_query_document_model2 = BulkGetQueryDocument(**bulk_get_query_document_model_dict)
# Verify the model instances are equivalent
assert bulk_get_query_document_model == bulk_get_query_document_model2
# Convert model instance back to dict and verify no loss of data
bulk_get_query_document_model_json2 = bulk_get_query_document_model.to_dict()
assert bulk_get_query_document_model_json2 == bulk_get_query_document_model_json
class TestModel_BulkGetResult():
"""
Test Class for BulkGetResult
"""
def test_bulk_get_result_serialization(self):
"""
Test serialization/deserialization for BulkGetResult
"""
# Construct dict forms of any model objects needed in order to build this model.
document_result_model = {} # DocumentResult
document_result_model['id'] = 'testString'
document_result_model['rev'] = 'testString'
document_result_model['ok'] = True
document_result_model['caused_by'] = 'testString'
document_result_model['error'] = 'testString'
document_result_model['reason'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
bulk_get_result_document_model = {} # BulkGetResultDocument
bulk_get_result_document_model['error'] = document_result_model
bulk_get_result_document_model['ok'] = document_model
bulk_get_result_item_model = {} # BulkGetResultItem
bulk_get_result_item_model['docs'] = [bulk_get_result_document_model]
bulk_get_result_item_model['id'] = 'testString'
# Construct a json representation of a BulkGetResult model
bulk_get_result_model_json = {}
bulk_get_result_model_json['results'] = [bulk_get_result_item_model]
# Construct a model instance of BulkGetResult by calling from_dict on the json representation
bulk_get_result_model = BulkGetResult.from_dict(bulk_get_result_model_json)
assert bulk_get_result_model != False
# Construct a model instance of BulkGetResult by calling from_dict on the json representation
bulk_get_result_model_dict = BulkGetResult.from_dict(bulk_get_result_model_json).__dict__
bulk_get_result_model2 = BulkGetResult(**bulk_get_result_model_dict)
# Verify the model instances are equivalent
assert bulk_get_result_model == bulk_get_result_model2
# Convert model instance back to dict and verify no loss of data
bulk_get_result_model_json2 = bulk_get_result_model.to_dict()
assert bulk_get_result_model_json2 == bulk_get_result_model_json
class TestModel_BulkGetResultDocument():
"""
Test Class for BulkGetResultDocument
"""
def test_bulk_get_result_document_serialization(self):
"""
Test serialization/deserialization for BulkGetResultDocument
"""
# Construct dict forms of any model objects needed in order to build this model.
document_result_model = {} # DocumentResult
document_result_model['id'] = 'testString'
document_result_model['rev'] = 'testString'
document_result_model['ok'] = True
document_result_model['caused_by'] = 'testString'
document_result_model['error'] = 'testString'
document_result_model['reason'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a BulkGetResultDocument model
bulk_get_result_document_model_json = {}
bulk_get_result_document_model_json['error'] = document_result_model
bulk_get_result_document_model_json['ok'] = document_model
# Construct a model instance of BulkGetResultDocument by calling from_dict on the json representation
bulk_get_result_document_model = BulkGetResultDocument.from_dict(bulk_get_result_document_model_json)
assert bulk_get_result_document_model != False
# Construct a model instance of BulkGetResultDocument by calling from_dict on the json representation
bulk_get_result_document_model_dict = BulkGetResultDocument.from_dict(bulk_get_result_document_model_json).__dict__
bulk_get_result_document_model2 = BulkGetResultDocument(**bulk_get_result_document_model_dict)
# Verify the model instances are equivalent
assert bulk_get_result_document_model == bulk_get_result_document_model2
# Convert model instance back to dict and verify no loss of data
bulk_get_result_document_model_json2 = bulk_get_result_document_model.to_dict()
assert bulk_get_result_document_model_json2 == bulk_get_result_document_model_json
class TestModel_BulkGetResultItem():
"""
Test Class for BulkGetResultItem
"""
def test_bulk_get_result_item_serialization(self):
"""
Test serialization/deserialization for BulkGetResultItem
"""
# Construct dict forms of any model objects needed in order to build this model.
document_result_model = {} # DocumentResult
document_result_model['id'] = 'testString'
document_result_model['rev'] = 'testString'
document_result_model['ok'] = True
document_result_model['caused_by'] = 'testString'
document_result_model['error'] = 'testString'
document_result_model['reason'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
bulk_get_result_document_model = {} # BulkGetResultDocument
bulk_get_result_document_model['error'] = document_result_model
bulk_get_result_document_model['ok'] = document_model
# Construct a json representation of a BulkGetResultItem model
bulk_get_result_item_model_json = {}
bulk_get_result_item_model_json['docs'] = [bulk_get_result_document_model]
bulk_get_result_item_model_json['id'] = 'testString'
# Construct a model instance of BulkGetResultItem by calling from_dict on the json representation
bulk_get_result_item_model = BulkGetResultItem.from_dict(bulk_get_result_item_model_json)
assert bulk_get_result_item_model != False
# Construct a model instance of BulkGetResultItem by calling from_dict on the json representation
bulk_get_result_item_model_dict = BulkGetResultItem.from_dict(bulk_get_result_item_model_json).__dict__
bulk_get_result_item_model2 = BulkGetResultItem(**bulk_get_result_item_model_dict)
# Verify the model instances are equivalent
assert bulk_get_result_item_model == bulk_get_result_item_model2
# Convert model instance back to dict and verify no loss of data
bulk_get_result_item_model_json2 = bulk_get_result_item_model.to_dict()
assert bulk_get_result_item_model_json2 == bulk_get_result_item_model_json
class TestModel_CapacityThroughputInformation():
"""
Test Class for CapacityThroughputInformation
"""
def test_capacity_throughput_information_serialization(self):
"""
Test serialization/deserialization for CapacityThroughputInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
throughput_information_model = {} # ThroughputInformation
throughput_information_model['blocks'] = 0
throughput_information_model['query'] = 0
throughput_information_model['read'] = 0
throughput_information_model['write'] = 0
capacity_throughput_information_current_model = {} # CapacityThroughputInformationCurrent
capacity_throughput_information_current_model['throughput'] = throughput_information_model
capacity_throughput_information_target_model = {} # CapacityThroughputInformationTarget
capacity_throughput_information_target_model['throughput'] = throughput_information_model
# Construct a json representation of a CapacityThroughputInformation model
capacity_throughput_information_model_json = {}
capacity_throughput_information_model_json['current'] = capacity_throughput_information_current_model
capacity_throughput_information_model_json['target'] = capacity_throughput_information_target_model
# Construct a model instance of CapacityThroughputInformation by calling from_dict on the json representation
capacity_throughput_information_model = CapacityThroughputInformation.from_dict(capacity_throughput_information_model_json)
assert capacity_throughput_information_model != False
# Construct a model instance of CapacityThroughputInformation by calling from_dict on the json representation
capacity_throughput_information_model_dict = CapacityThroughputInformation.from_dict(capacity_throughput_information_model_json).__dict__
capacity_throughput_information_model2 = CapacityThroughputInformation(**capacity_throughput_information_model_dict)
# Verify the model instances are equivalent
assert capacity_throughput_information_model == capacity_throughput_information_model2
# Convert model instance back to dict and verify no loss of data
capacity_throughput_information_model_json2 = capacity_throughput_information_model.to_dict()
assert capacity_throughput_information_model_json2 == capacity_throughput_information_model_json
class TestModel_CapacityThroughputInformationCurrent():
"""
Test Class for CapacityThroughputInformationCurrent
"""
def test_capacity_throughput_information_current_serialization(self):
"""
Test serialization/deserialization for CapacityThroughputInformationCurrent
"""
# Construct dict forms of any model objects needed in order to build this model.
throughput_information_model = {} # ThroughputInformation
throughput_information_model['blocks'] = 0
throughput_information_model['query'] = 0
throughput_information_model['read'] = 0
throughput_information_model['write'] = 0
# Construct a json representation of a CapacityThroughputInformationCurrent model
capacity_throughput_information_current_model_json = {}
capacity_throughput_information_current_model_json['throughput'] = throughput_information_model
# Construct a model instance of CapacityThroughputInformationCurrent by calling from_dict on the json representation
capacity_throughput_information_current_model = CapacityThroughputInformationCurrent.from_dict(capacity_throughput_information_current_model_json)
assert capacity_throughput_information_current_model != False
# Construct a model instance of CapacityThroughputInformationCurrent by calling from_dict on the json representation
capacity_throughput_information_current_model_dict = CapacityThroughputInformationCurrent.from_dict(capacity_throughput_information_current_model_json).__dict__
capacity_throughput_information_current_model2 = CapacityThroughputInformationCurrent(**capacity_throughput_information_current_model_dict)
# Verify the model instances are equivalent
assert capacity_throughput_information_current_model == capacity_throughput_information_current_model2
# Convert model instance back to dict and verify no loss of data
capacity_throughput_information_current_model_json2 = capacity_throughput_information_current_model.to_dict()
assert capacity_throughput_information_current_model_json2 == capacity_throughput_information_current_model_json
class TestModel_CapacityThroughputInformationTarget():
"""
Test Class for CapacityThroughputInformationTarget
"""
def test_capacity_throughput_information_target_serialization(self):
"""
Test serialization/deserialization for CapacityThroughputInformationTarget
"""
# Construct dict forms of any model objects needed in order to build this model.
throughput_information_model = {} # ThroughputInformation
throughput_information_model['blocks'] = 0
throughput_information_model['query'] = 0
throughput_information_model['read'] = 0
throughput_information_model['write'] = 0
# Construct a json representation of a CapacityThroughputInformationTarget model
capacity_throughput_information_target_model_json = {}
capacity_throughput_information_target_model_json['throughput'] = throughput_information_model
# Construct a model instance of CapacityThroughputInformationTarget by calling from_dict on the json representation
capacity_throughput_information_target_model = CapacityThroughputInformationTarget.from_dict(capacity_throughput_information_target_model_json)
assert capacity_throughput_information_target_model != False
# Construct a model instance of CapacityThroughputInformationTarget by calling from_dict on the json representation
capacity_throughput_information_target_model_dict = CapacityThroughputInformationTarget.from_dict(capacity_throughput_information_target_model_json).__dict__
capacity_throughput_information_target_model2 = CapacityThroughputInformationTarget(**capacity_throughput_information_target_model_dict)
# Verify the model instances are equivalent
assert capacity_throughput_information_target_model == capacity_throughput_information_target_model2
# Convert model instance back to dict and verify no loss of data
capacity_throughput_information_target_model_json2 = capacity_throughput_information_target_model.to_dict()
assert capacity_throughput_information_target_model_json2 == capacity_throughput_information_target_model_json
class TestModel_Change():
"""
Test Class for Change
"""
def test_change_serialization(self):
"""
Test serialization/deserialization for Change
"""
# Construct a json representation of a Change model
change_model_json = {}
change_model_json['rev'] = 'testString'
# Construct a model instance of Change by calling from_dict on the json representation
change_model = Change.from_dict(change_model_json)
assert change_model != False
# Construct a model instance of Change by calling from_dict on the json representation
change_model_dict = Change.from_dict(change_model_json).__dict__
change_model2 = Change(**change_model_dict)
# Verify the model instances are equivalent
assert change_model == change_model2
# Convert model instance back to dict and verify no loss of data
change_model_json2 = change_model.to_dict()
assert change_model_json2 == change_model_json
class TestModel_ChangesResult():
"""
Test Class for ChangesResult
"""
def test_changes_result_serialization(self):
"""
Test serialization/deserialization for ChangesResult
"""
# Construct dict forms of any model objects needed in order to build this model.
change_model = {} # Change
change_model['rev'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
changes_result_item_model = {} # ChangesResultItem
changes_result_item_model['changes'] = [change_model]
changes_result_item_model['deleted'] = True
changes_result_item_model['doc'] = document_model
changes_result_item_model['id'] = 'testString'
changes_result_item_model['seq'] = 'testString'
# Construct a json representation of a ChangesResult model
changes_result_model_json = {}
changes_result_model_json['last_seq'] = 'testString'
changes_result_model_json['pending'] = 26
changes_result_model_json['results'] = [changes_result_item_model]
# Construct a model instance of ChangesResult by calling from_dict on the json representation
changes_result_model = ChangesResult.from_dict(changes_result_model_json)
assert changes_result_model != False
# Construct a model instance of ChangesResult by calling from_dict on the json representation
changes_result_model_dict = ChangesResult.from_dict(changes_result_model_json).__dict__
changes_result_model2 = ChangesResult(**changes_result_model_dict)
# Verify the model instances are equivalent
assert changes_result_model == changes_result_model2
# Convert model instance back to dict and verify no loss of data
changes_result_model_json2 = changes_result_model.to_dict()
assert changes_result_model_json2 == changes_result_model_json
class TestModel_ChangesResultItem():
"""
Test Class for ChangesResultItem
"""
def test_changes_result_item_serialization(self):
"""
Test serialization/deserialization for ChangesResultItem
"""
# Construct dict forms of any model objects needed in order to build this model.
change_model = {} # Change
change_model['rev'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a ChangesResultItem model
changes_result_item_model_json = {}
changes_result_item_model_json['changes'] = [change_model]
changes_result_item_model_json['deleted'] = True
changes_result_item_model_json['doc'] = document_model
changes_result_item_model_json['id'] = 'testString'
changes_result_item_model_json['seq'] = 'testString'
# Construct a model instance of ChangesResultItem by calling from_dict on the json representation
changes_result_item_model = ChangesResultItem.from_dict(changes_result_item_model_json)
assert changes_result_item_model != False
# Construct a model instance of ChangesResultItem by calling from_dict on the json representation
changes_result_item_model_dict = ChangesResultItem.from_dict(changes_result_item_model_json).__dict__
changes_result_item_model2 = ChangesResultItem(**changes_result_item_model_dict)
# Verify the model instances are equivalent
assert changes_result_item_model == changes_result_item_model2
# Convert model instance back to dict and verify no loss of data
changes_result_item_model_json2 = changes_result_item_model.to_dict()
assert changes_result_item_model_json2 == changes_result_item_model_json
class TestModel_ContentInformationSizes():
"""
Test Class for ContentInformationSizes
"""
def test_content_information_sizes_serialization(self):
"""
Test serialization/deserialization for ContentInformationSizes
"""
# Construct a json representation of a ContentInformationSizes model
content_information_sizes_model_json = {}
content_information_sizes_model_json['active'] = 26
content_information_sizes_model_json['external'] = 26
content_information_sizes_model_json['file'] = 26
# Construct a model instance of ContentInformationSizes by calling from_dict on the json representation
content_information_sizes_model = ContentInformationSizes.from_dict(content_information_sizes_model_json)
assert content_information_sizes_model != False
# Construct a model instance of ContentInformationSizes by calling from_dict on the json representation
content_information_sizes_model_dict = ContentInformationSizes.from_dict(content_information_sizes_model_json).__dict__
content_information_sizes_model2 = ContentInformationSizes(**content_information_sizes_model_dict)
# Verify the model instances are equivalent
assert content_information_sizes_model == content_information_sizes_model2
# Convert model instance back to dict and verify no loss of data
content_information_sizes_model_json2 = content_information_sizes_model.to_dict()
assert content_information_sizes_model_json2 == content_information_sizes_model_json
class TestModel_CorsInformation():
"""
Test Class for CorsInformation
"""
def test_cors_information_serialization(self):
"""
Test serialization/deserialization for CorsInformation
"""
# Construct a json representation of a CorsInformation model
cors_information_model_json = {}
cors_information_model_json['allow_credentials'] = True
cors_information_model_json['enable_cors'] = True
cors_information_model_json['origins'] = ['testString']
# Construct a model instance of CorsInformation by calling from_dict on the json representation
cors_information_model = CorsInformation.from_dict(cors_information_model_json)
assert cors_information_model != False
# Construct a model instance of CorsInformation by calling from_dict on the json representation
cors_information_model_dict = CorsInformation.from_dict(cors_information_model_json).__dict__
cors_information_model2 = CorsInformation(**cors_information_model_dict)
# Verify the model instances are equivalent
assert cors_information_model == cors_information_model2
# Convert model instance back to dict and verify no loss of data
cors_information_model_json2 = cors_information_model.to_dict()
assert cors_information_model_json2 == cors_information_model_json
class TestModel_CurrentThroughputInformation():
"""
Test Class for CurrentThroughputInformation
"""
def test_current_throughput_information_serialization(self):
"""
Test serialization/deserialization for CurrentThroughputInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
current_throughput_information_throughput_model = {} # CurrentThroughputInformationThroughput
current_throughput_information_throughput_model['query'] = 0
current_throughput_information_throughput_model['read'] = 0
current_throughput_information_throughput_model['write'] = 0
# Construct a json representation of a CurrentThroughputInformation model
current_throughput_information_model_json = {}
current_throughput_information_model_json['throughput'] = current_throughput_information_throughput_model
# Construct a model instance of CurrentThroughputInformation by calling from_dict on the json representation
current_throughput_information_model = CurrentThroughputInformation.from_dict(current_throughput_information_model_json)
assert current_throughput_information_model != False
# Construct a model instance of CurrentThroughputInformation by calling from_dict on the json representation
current_throughput_information_model_dict = CurrentThroughputInformation.from_dict(current_throughput_information_model_json).__dict__
current_throughput_information_model2 = CurrentThroughputInformation(**current_throughput_information_model_dict)
# Verify the model instances are equivalent
assert current_throughput_information_model == current_throughput_information_model2
# Convert model instance back to dict and verify no loss of data
current_throughput_information_model_json2 = current_throughput_information_model.to_dict()
assert current_throughput_information_model_json2 == current_throughput_information_model_json
class TestModel_CurrentThroughputInformationThroughput():
"""
Test Class for CurrentThroughputInformationThroughput
"""
def test_current_throughput_information_throughput_serialization(self):
"""
Test serialization/deserialization for CurrentThroughputInformationThroughput
"""
# Construct a json representation of a CurrentThroughputInformationThroughput model
current_throughput_information_throughput_model_json = {}
current_throughput_information_throughput_model_json['query'] = 0
current_throughput_information_throughput_model_json['read'] = 0
current_throughput_information_throughput_model_json['write'] = 0
# Construct a model instance of CurrentThroughputInformationThroughput by calling from_dict on the json representation
current_throughput_information_throughput_model = CurrentThroughputInformationThroughput.from_dict(current_throughput_information_throughput_model_json)
assert current_throughput_information_throughput_model != False
# Construct a model instance of CurrentThroughputInformationThroughput by calling from_dict on the json representation
current_throughput_information_throughput_model_dict = CurrentThroughputInformationThroughput.from_dict(current_throughput_information_throughput_model_json).__dict__
current_throughput_information_throughput_model2 = CurrentThroughputInformationThroughput(**current_throughput_information_throughput_model_dict)
# Verify the model instances are equivalent
assert current_throughput_information_throughput_model == current_throughput_information_throughput_model2
# Convert model instance back to dict and verify no loss of data
current_throughput_information_throughput_model_json2 = current_throughput_information_throughput_model.to_dict()
assert current_throughput_information_throughput_model_json2 == current_throughput_information_throughput_model_json
class TestModel_DatabaseInformation():
"""
Test Class for DatabaseInformation
"""
def test_database_information_serialization(self):
"""
Test serialization/deserialization for DatabaseInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
database_information_cluster_model = {} # DatabaseInformationCluster
database_information_cluster_model['n'] = 1
database_information_cluster_model['q'] = 1
database_information_cluster_model['r'] = 1
database_information_cluster_model['w'] = 1
database_information_props_model = {} # DatabaseInformationProps
database_information_props_model['partitioned'] = True
content_information_sizes_model = {} # ContentInformationSizes
content_information_sizes_model['active'] = 26
content_information_sizes_model['external'] = 26
content_information_sizes_model['file'] = 26
# Construct a json representation of a DatabaseInformation model
database_information_model_json = {}
database_information_model_json['cluster'] = database_information_cluster_model
database_information_model_json['committed_update_seq'] = 'testString'
database_information_model_json['compact_running'] = True
database_information_model_json['compacted_seq'] = 'testString'
database_information_model_json['db_name'] = 'testString'
database_information_model_json['disk_format_version'] = 26
database_information_model_json['doc_count'] = 0
database_information_model_json['doc_del_count'] = 0
database_information_model_json['engine'] = 'testString'
database_information_model_json['props'] = database_information_props_model
database_information_model_json['sizes'] = content_information_sizes_model
database_information_model_json['update_seq'] = 'testString'
database_information_model_json['uuid'] = 'testString'
# Construct a model instance of DatabaseInformation by calling from_dict on the json representation
database_information_model = DatabaseInformation.from_dict(database_information_model_json)
assert database_information_model != False
# Construct a model instance of DatabaseInformation by calling from_dict on the json representation
database_information_model_dict = DatabaseInformation.from_dict(database_information_model_json).__dict__
database_information_model2 = DatabaseInformation(**database_information_model_dict)
# Verify the model instances are equivalent
assert database_information_model == database_information_model2
# Convert model instance back to dict and verify no loss of data
database_information_model_json2 = database_information_model.to_dict()
assert database_information_model_json2 == database_information_model_json
class TestModel_DatabaseInformationCluster():
"""
Test Class for DatabaseInformationCluster
"""
def test_database_information_cluster_serialization(self):
"""
Test serialization/deserialization for DatabaseInformationCluster
"""
# Construct a json representation of a DatabaseInformationCluster model
database_information_cluster_model_json = {}
database_information_cluster_model_json['n'] = 1
database_information_cluster_model_json['q'] = 1
database_information_cluster_model_json['r'] = 1
database_information_cluster_model_json['w'] = 1
# Construct a model instance of DatabaseInformationCluster by calling from_dict on the json representation
database_information_cluster_model = DatabaseInformationCluster.from_dict(database_information_cluster_model_json)
assert database_information_cluster_model != False
# Construct a model instance of DatabaseInformationCluster by calling from_dict on the json representation
database_information_cluster_model_dict = DatabaseInformationCluster.from_dict(database_information_cluster_model_json).__dict__
database_information_cluster_model2 = DatabaseInformationCluster(**database_information_cluster_model_dict)
# Verify the model instances are equivalent
assert database_information_cluster_model == database_information_cluster_model2
# Convert model instance back to dict and verify no loss of data
database_information_cluster_model_json2 = database_information_cluster_model.to_dict()
assert database_information_cluster_model_json2 == database_information_cluster_model_json
class TestModel_DatabaseInformationProps():
"""
Test Class for DatabaseInformationProps
"""
def test_database_information_props_serialization(self):
"""
Test serialization/deserialization for DatabaseInformationProps
"""
# Construct a json representation of a DatabaseInformationProps model
database_information_props_model_json = {}
database_information_props_model_json['partitioned'] = True
# Construct a model instance of DatabaseInformationProps by calling from_dict on the json representation
database_information_props_model = DatabaseInformationProps.from_dict(database_information_props_model_json)
assert database_information_props_model != False
# Construct a model instance of DatabaseInformationProps by calling from_dict on the json representation
database_information_props_model_dict = DatabaseInformationProps.from_dict(database_information_props_model_json).__dict__
database_information_props_model2 = DatabaseInformationProps(**database_information_props_model_dict)
# Verify the model instances are equivalent
assert database_information_props_model == database_information_props_model2
# Convert model instance back to dict and verify no loss of data
database_information_props_model_json2 = database_information_props_model.to_dict()
assert database_information_props_model_json2 == database_information_props_model_json
class TestModel_DbEvent():
"""
Test Class for DbEvent
"""
def test_db_event_serialization(self):
"""
Test serialization/deserialization for DbEvent
"""
# Construct a json representation of a DbEvent model
db_event_model_json = {}
db_event_model_json['account'] = 'testString'
db_event_model_json['db_name'] = 'testString'
db_event_model_json['seq'] = 'testString'
db_event_model_json['type'] = 'created'
# Construct a model instance of DbEvent by calling from_dict on the json representation
db_event_model = DbEvent.from_dict(db_event_model_json)
assert db_event_model != False
# Construct a model instance of DbEvent by calling from_dict on the json representation
db_event_model_dict = DbEvent.from_dict(db_event_model_json).__dict__
db_event_model2 = DbEvent(**db_event_model_dict)
# Verify the model instances are equivalent
assert db_event_model == db_event_model2
# Convert model instance back to dict and verify no loss of data
db_event_model_json2 = db_event_model.to_dict()
assert db_event_model_json2 == db_event_model_json
class TestModel_DbUpdates():
"""
Test Class for DbUpdates
"""
def test_db_updates_serialization(self):
"""
Test serialization/deserialization for DbUpdates
"""
# Construct dict forms of any model objects needed in order to build this model.
db_event_model = {} # DbEvent
db_event_model['account'] = 'testString'
db_event_model['db_name'] = 'testString'
db_event_model['seq'] = 'testString'
db_event_model['type'] = 'created'
# Construct a json representation of a DbUpdates model
db_updates_model_json = {}
db_updates_model_json['last_seq'] = 'testString'
db_updates_model_json['results'] = [db_event_model]
# Construct a model instance of DbUpdates by calling from_dict on the json representation
db_updates_model = DbUpdates.from_dict(db_updates_model_json)
assert db_updates_model != False
# Construct a model instance of DbUpdates by calling from_dict on the json representation
db_updates_model_dict = DbUpdates.from_dict(db_updates_model_json).__dict__
db_updates_model2 = DbUpdates(**db_updates_model_dict)
# Verify the model instances are equivalent
assert db_updates_model == db_updates_model2
# Convert model instance back to dict and verify no loss of data
db_updates_model_json2 = db_updates_model.to_dict()
assert db_updates_model_json2 == db_updates_model_json
class TestModel_DbsInfoResult():
"""
Test Class for DbsInfoResult
"""
def test_dbs_info_result_serialization(self):
"""
Test serialization/deserialization for DbsInfoResult
"""
# Construct dict forms of any model objects needed in order to build this model.
database_information_cluster_model = {} # DatabaseInformationCluster
database_information_cluster_model['n'] = 1
database_information_cluster_model['q'] = 1
database_information_cluster_model['r'] = 1
database_information_cluster_model['w'] = 1
database_information_props_model = {} # DatabaseInformationProps
database_information_props_model['partitioned'] = True
content_information_sizes_model = {} # ContentInformationSizes
content_information_sizes_model['active'] = 26
content_information_sizes_model['external'] = 26
content_information_sizes_model['file'] = 26
database_information_model = {} # DatabaseInformation
database_information_model['cluster'] = database_information_cluster_model
database_information_model['committed_update_seq'] = 'testString'
database_information_model['compact_running'] = True
database_information_model['compacted_seq'] = 'testString'
database_information_model['db_name'] = 'testString'
database_information_model['disk_format_version'] = 26
database_information_model['doc_count'] = 0
database_information_model['doc_del_count'] = 0
database_information_model['engine'] = 'testString'
database_information_model['props'] = database_information_props_model
database_information_model['sizes'] = content_information_sizes_model
database_information_model['update_seq'] = 'testString'
database_information_model['uuid'] = 'testString'
# Construct a json representation of a DbsInfoResult model
dbs_info_result_model_json = {}
dbs_info_result_model_json['error'] = 'testString'
dbs_info_result_model_json['info'] = database_information_model
dbs_info_result_model_json['key'] = 'testString'
# Construct a model instance of DbsInfoResult by calling from_dict on the json representation
dbs_info_result_model = DbsInfoResult.from_dict(dbs_info_result_model_json)
assert dbs_info_result_model != False
# Construct a model instance of DbsInfoResult by calling from_dict on the json representation
dbs_info_result_model_dict = DbsInfoResult.from_dict(dbs_info_result_model_json).__dict__
dbs_info_result_model2 = DbsInfoResult(**dbs_info_result_model_dict)
# Verify the model instances are equivalent
assert dbs_info_result_model == dbs_info_result_model2
# Convert model instance back to dict and verify no loss of data
dbs_info_result_model_json2 = dbs_info_result_model.to_dict()
assert dbs_info_result_model_json2 == dbs_info_result_model_json
class TestModel_DesignDocument():
"""
Test Class for DesignDocument
"""
def test_design_document_serialization(self):
"""
Test serialization/deserialization for DesignDocument
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
analyzer_configuration_model = {} # AnalyzerConfiguration
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
search_index_definition_model = {} # SearchIndexDefinition
search_index_definition_model['analyzer'] = analyzer_configuration_model
search_index_definition_model['index'] = 'testString'
design_document_options_model = {} # DesignDocumentOptions
design_document_options_model['partitioned'] = True
design_document_views_map_reduce_model = {} # DesignDocumentViewsMapReduce
design_document_views_map_reduce_model['map'] = 'testString'
design_document_views_map_reduce_model['reduce'] = 'testString'
geo_index_definition_model = {} # GeoIndexDefinition
geo_index_definition_model['index'] = 'testString'
# Construct a json representation of a DesignDocument model
design_document_model_json = {}
design_document_model_json['_attachments'] = {}
design_document_model_json['_conflicts'] = ['testString']
design_document_model_json['_deleted'] = True
design_document_model_json['_deleted_conflicts'] = ['testString']
design_document_model_json['_id'] = 'testString'
design_document_model_json['_local_seq'] = 'testString'
design_document_model_json['_rev'] = 'testString'
design_document_model_json['_revisions'] = revisions_model
design_document_model_json['_revs_info'] = [document_revision_status_model]
design_document_model_json['autoupdate'] = True
design_document_model_json['filters'] = {}
design_document_model_json['indexes'] = {}
design_document_model_json['language'] = 'javascript'
design_document_model_json['options'] = design_document_options_model
design_document_model_json['validate_doc_update'] = 'testString'
design_document_model_json['views'] = {}
design_document_model_json['st_indexes'] = {}
design_document_model_json['foo'] = 'testString'
# Construct a model instance of DesignDocument by calling from_dict on the json representation
design_document_model = DesignDocument.from_dict(design_document_model_json)
assert design_document_model != False
# Construct a model instance of DesignDocument by calling from_dict on the json representation
design_document_model_dict = DesignDocument.from_dict(design_document_model_json).__dict__
design_document_model2 = DesignDocument(**design_document_model_dict)
# Verify the model instances are equivalent
assert design_document_model == design_document_model2
# Convert model instance back to dict and verify no loss of data
design_document_model_json2 = design_document_model.to_dict()
assert design_document_model_json2 == design_document_model_json
# Test get_properties and set_properties methods.
design_document_model.set_properties({})
actual_dict = design_document_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'testString'}
design_document_model.set_properties(expected_dict)
actual_dict = design_document_model.get_properties()
assert actual_dict == expected_dict
class TestModel_DesignDocumentInformation():
"""
Test Class for DesignDocumentInformation
"""
def test_design_document_information_serialization(self):
"""
Test serialization/deserialization for DesignDocumentInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
content_information_sizes_model = {} # ContentInformationSizes
content_information_sizes_model['active'] = 26
content_information_sizes_model['external'] = 26
content_information_sizes_model['file'] = 26
design_document_view_index_model = {} # DesignDocumentViewIndex
design_document_view_index_model['compact_running'] = True
design_document_view_index_model['language'] = 'testString'
design_document_view_index_model['signature'] = 'testString'
design_document_view_index_model['sizes'] = content_information_sizes_model
design_document_view_index_model['updater_running'] = True
design_document_view_index_model['waiting_clients'] = 0
design_document_view_index_model['waiting_commit'] = True
# Construct a json representation of a DesignDocumentInformation model
design_document_information_model_json = {}
design_document_information_model_json['name'] = 'testString'
design_document_information_model_json['view_index'] = design_document_view_index_model
# Construct a model instance of DesignDocumentInformation by calling from_dict on the json representation
design_document_information_model = DesignDocumentInformation.from_dict(design_document_information_model_json)
assert design_document_information_model != False
# Construct a model instance of DesignDocumentInformation by calling from_dict on the json representation
design_document_information_model_dict = DesignDocumentInformation.from_dict(design_document_information_model_json).__dict__
design_document_information_model2 = DesignDocumentInformation(**design_document_information_model_dict)
# Verify the model instances are equivalent
assert design_document_information_model == design_document_information_model2
# Convert model instance back to dict and verify no loss of data
design_document_information_model_json2 = design_document_information_model.to_dict()
assert design_document_information_model_json2 == design_document_information_model_json
class TestModel_DesignDocumentOptions():
"""
Test Class for DesignDocumentOptions
"""
def test_design_document_options_serialization(self):
"""
Test serialization/deserialization for DesignDocumentOptions
"""
# Construct a json representation of a DesignDocumentOptions model
design_document_options_model_json = {}
design_document_options_model_json['partitioned'] = True
# Construct a model instance of DesignDocumentOptions by calling from_dict on the json representation
design_document_options_model = DesignDocumentOptions.from_dict(design_document_options_model_json)
assert design_document_options_model != False
# Construct a model instance of DesignDocumentOptions by calling from_dict on the json representation
design_document_options_model_dict = DesignDocumentOptions.from_dict(design_document_options_model_json).__dict__
design_document_options_model2 = DesignDocumentOptions(**design_document_options_model_dict)
# Verify the model instances are equivalent
assert design_document_options_model == design_document_options_model2
# Convert model instance back to dict and verify no loss of data
design_document_options_model_json2 = design_document_options_model.to_dict()
assert design_document_options_model_json2 == design_document_options_model_json
class TestModel_DesignDocumentViewIndex():
"""
Test Class for DesignDocumentViewIndex
"""
def test_design_document_view_index_serialization(self):
"""
Test serialization/deserialization for DesignDocumentViewIndex
"""
# Construct dict forms of any model objects needed in order to build this model.
content_information_sizes_model = {} # ContentInformationSizes
content_information_sizes_model['active'] = 26
content_information_sizes_model['external'] = 26
content_information_sizes_model['file'] = 26
# Construct a json representation of a DesignDocumentViewIndex model
design_document_view_index_model_json = {}
design_document_view_index_model_json['compact_running'] = True
design_document_view_index_model_json['language'] = 'testString'
design_document_view_index_model_json['signature'] = 'testString'
design_document_view_index_model_json['sizes'] = content_information_sizes_model
design_document_view_index_model_json['updater_running'] = True
design_document_view_index_model_json['waiting_clients'] = 0
design_document_view_index_model_json['waiting_commit'] = True
# Construct a model instance of DesignDocumentViewIndex by calling from_dict on the json representation
design_document_view_index_model = DesignDocumentViewIndex.from_dict(design_document_view_index_model_json)
assert design_document_view_index_model != False
# Construct a model instance of DesignDocumentViewIndex by calling from_dict on the json representation
design_document_view_index_model_dict = DesignDocumentViewIndex.from_dict(design_document_view_index_model_json).__dict__
design_document_view_index_model2 = DesignDocumentViewIndex(**design_document_view_index_model_dict)
# Verify the model instances are equivalent
assert design_document_view_index_model == design_document_view_index_model2
# Convert model instance back to dict and verify no loss of data
design_document_view_index_model_json2 = design_document_view_index_model.to_dict()
assert design_document_view_index_model_json2 == design_document_view_index_model_json
class TestModel_DesignDocumentViewsMapReduce():
"""
Test Class for DesignDocumentViewsMapReduce
"""
def test_design_document_views_map_reduce_serialization(self):
"""
Test serialization/deserialization for DesignDocumentViewsMapReduce
"""
# Construct a json representation of a DesignDocumentViewsMapReduce model
design_document_views_map_reduce_model_json = {}
design_document_views_map_reduce_model_json['map'] = 'testString'
design_document_views_map_reduce_model_json['reduce'] = 'testString'
# Construct a model instance of DesignDocumentViewsMapReduce by calling from_dict on the json representation
design_document_views_map_reduce_model = DesignDocumentViewsMapReduce.from_dict(design_document_views_map_reduce_model_json)
assert design_document_views_map_reduce_model != False
# Construct a model instance of DesignDocumentViewsMapReduce by calling from_dict on the json representation
design_document_views_map_reduce_model_dict = DesignDocumentViewsMapReduce.from_dict(design_document_views_map_reduce_model_json).__dict__
design_document_views_map_reduce_model2 = DesignDocumentViewsMapReduce(**design_document_views_map_reduce_model_dict)
# Verify the model instances are equivalent
assert design_document_views_map_reduce_model == design_document_views_map_reduce_model2
# Convert model instance back to dict and verify no loss of data
design_document_views_map_reduce_model_json2 = design_document_views_map_reduce_model.to_dict()
assert design_document_views_map_reduce_model_json2 == design_document_views_map_reduce_model_json
class TestModel_DocsResultRow():
"""
Test Class for DocsResultRow
"""
def test_docs_result_row_serialization(self):
"""
Test serialization/deserialization for DocsResultRow
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
docs_result_row_value_model = {} # DocsResultRowValue
docs_result_row_value_model['rev'] = 'testString'
# Construct a json representation of a DocsResultRow model
docs_result_row_model_json = {}
docs_result_row_model_json['caused_by'] = 'testString'
docs_result_row_model_json['error'] = 'testString'
docs_result_row_model_json['reason'] = 'testString'
docs_result_row_model_json['doc'] = document_model
docs_result_row_model_json['id'] = 'testString'
docs_result_row_model_json['key'] = 'testString'
docs_result_row_model_json['value'] = docs_result_row_value_model
# Construct a model instance of DocsResultRow by calling from_dict on the json representation
docs_result_row_model = DocsResultRow.from_dict(docs_result_row_model_json)
assert docs_result_row_model != False
# Construct a model instance of DocsResultRow by calling from_dict on the json representation
docs_result_row_model_dict = DocsResultRow.from_dict(docs_result_row_model_json).__dict__
docs_result_row_model2 = DocsResultRow(**docs_result_row_model_dict)
# Verify the model instances are equivalent
assert docs_result_row_model == docs_result_row_model2
# Convert model instance back to dict and verify no loss of data
docs_result_row_model_json2 = docs_result_row_model.to_dict()
assert docs_result_row_model_json2 == docs_result_row_model_json
class TestModel_DocsResultRowValue():
"""
Test Class for DocsResultRowValue
"""
def test_docs_result_row_value_serialization(self):
"""
Test serialization/deserialization for DocsResultRowValue
"""
# Construct a json representation of a DocsResultRowValue model
docs_result_row_value_model_json = {}
docs_result_row_value_model_json['rev'] = 'testString'
# Construct a model instance of DocsResultRowValue by calling from_dict on the json representation
docs_result_row_value_model = DocsResultRowValue.from_dict(docs_result_row_value_model_json)
assert docs_result_row_value_model != False
# Construct a model instance of DocsResultRowValue by calling from_dict on the json representation
docs_result_row_value_model_dict = DocsResultRowValue.from_dict(docs_result_row_value_model_json).__dict__
docs_result_row_value_model2 = DocsResultRowValue(**docs_result_row_value_model_dict)
# Verify the model instances are equivalent
assert docs_result_row_value_model == docs_result_row_value_model2
# Convert model instance back to dict and verify no loss of data
docs_result_row_value_model_json2 = docs_result_row_value_model.to_dict()
assert docs_result_row_value_model_json2 == docs_result_row_value_model_json
class TestModel_Document():
"""
Test Class for Document
"""
def test_document_serialization(self):
"""
Test serialization/deserialization for Document
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
# Construct a json representation of a Document model
document_model_json = {}
document_model_json['_attachments'] = {}
document_model_json['_conflicts'] = ['testString']
document_model_json['_deleted'] = True
document_model_json['_deleted_conflicts'] = ['testString']
document_model_json['_id'] = 'testString'
document_model_json['_local_seq'] = 'testString'
document_model_json['_rev'] = 'testString'
document_model_json['_revisions'] = revisions_model
document_model_json['_revs_info'] = [document_revision_status_model]
document_model_json['foo'] = 'testString'
# Construct a model instance of Document by calling from_dict on the json representation
document_model = Document.from_dict(document_model_json)
assert document_model != False
# Construct a model instance of Document by calling from_dict on the json representation
document_model_dict = Document.from_dict(document_model_json).__dict__
document_model2 = Document(**document_model_dict)
# Verify the model instances are equivalent
assert document_model == document_model2
# Convert model instance back to dict and verify no loss of data
document_model_json2 = document_model.to_dict()
assert document_model_json2 == document_model_json
# Test get_properties and set_properties methods.
document_model.set_properties({})
actual_dict = document_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'testString'}
document_model.set_properties(expected_dict)
actual_dict = document_model.get_properties()
assert actual_dict == expected_dict
class TestModel_DocumentResult():
"""
Test Class for DocumentResult
"""
def test_document_result_serialization(self):
"""
Test serialization/deserialization for DocumentResult
"""
# Construct a json representation of a DocumentResult model
document_result_model_json = {}
document_result_model_json['id'] = 'testString'
document_result_model_json['rev'] = 'testString'
document_result_model_json['ok'] = True
document_result_model_json['caused_by'] = 'testString'
document_result_model_json['error'] = 'testString'
document_result_model_json['reason'] = 'testString'
# Construct a model instance of DocumentResult by calling from_dict on the json representation
document_result_model = DocumentResult.from_dict(document_result_model_json)
assert document_result_model != False
# Construct a model instance of DocumentResult by calling from_dict on the json representation
document_result_model_dict = DocumentResult.from_dict(document_result_model_json).__dict__
document_result_model2 = DocumentResult(**document_result_model_dict)
# Verify the model instances are equivalent
assert document_result_model == document_result_model2
# Convert model instance back to dict and verify no loss of data
document_result_model_json2 = document_result_model.to_dict()
assert document_result_model_json2 == document_result_model_json
class TestModel_DocumentRevisionStatus():
"""
Test Class for DocumentRevisionStatus
"""
def test_document_revision_status_serialization(self):
"""
Test serialization/deserialization for DocumentRevisionStatus
"""
# Construct a json representation of a DocumentRevisionStatus model
document_revision_status_model_json = {}
document_revision_status_model_json['rev'] = 'testString'
document_revision_status_model_json['status'] = 'available'
# Construct a model instance of DocumentRevisionStatus by calling from_dict on the json representation
document_revision_status_model = DocumentRevisionStatus.from_dict(document_revision_status_model_json)
assert document_revision_status_model != False
# Construct a model instance of DocumentRevisionStatus by calling from_dict on the json representation
document_revision_status_model_dict = DocumentRevisionStatus.from_dict(document_revision_status_model_json).__dict__
document_revision_status_model2 = DocumentRevisionStatus(**document_revision_status_model_dict)
# Verify the model instances are equivalent
assert document_revision_status_model == document_revision_status_model2
# Convert model instance back to dict and verify no loss of data
document_revision_status_model_json2 = document_revision_status_model.to_dict()
assert document_revision_status_model_json2 == document_revision_status_model_json
class TestModel_DocumentShardInfo():
"""
Test Class for DocumentShardInfo
"""
def test_document_shard_info_serialization(self):
"""
Test serialization/deserialization for DocumentShardInfo
"""
# Construct a json representation of a DocumentShardInfo model
document_shard_info_model_json = {}
document_shard_info_model_json['nodes'] = ['testString']
document_shard_info_model_json['range'] = 'testString'
# Construct a model instance of DocumentShardInfo by calling from_dict on the json representation
document_shard_info_model = DocumentShardInfo.from_dict(document_shard_info_model_json)
assert document_shard_info_model != False
# Construct a model instance of DocumentShardInfo by calling from_dict on the json representation
document_shard_info_model_dict = DocumentShardInfo.from_dict(document_shard_info_model_json).__dict__
document_shard_info_model2 = DocumentShardInfo(**document_shard_info_model_dict)
# Verify the model instances are equivalent
assert document_shard_info_model == document_shard_info_model2
# Convert model instance back to dict and verify no loss of data
document_shard_info_model_json2 = document_shard_info_model.to_dict()
assert document_shard_info_model_json2 == document_shard_info_model_json
class TestModel_ExecutionStats():
"""
Test Class for ExecutionStats
"""
def test_execution_stats_serialization(self):
"""
Test serialization/deserialization for ExecutionStats
"""
# Construct a json representation of a ExecutionStats model
execution_stats_model_json = {}
execution_stats_model_json['execution_time_ms'] = 72.5
execution_stats_model_json['results_returned'] = 0
execution_stats_model_json['total_docs_examined'] = 0
execution_stats_model_json['total_keys_examined'] = 0
execution_stats_model_json['total_quorum_docs_examined'] = 0
# Construct a model instance of ExecutionStats by calling from_dict on the json representation
execution_stats_model = ExecutionStats.from_dict(execution_stats_model_json)
assert execution_stats_model != False
# Construct a model instance of ExecutionStats by calling from_dict on the json representation
execution_stats_model_dict = ExecutionStats.from_dict(execution_stats_model_json).__dict__
execution_stats_model2 = ExecutionStats(**execution_stats_model_dict)
# Verify the model instances are equivalent
assert execution_stats_model == execution_stats_model2
# Convert model instance back to dict and verify no loss of data
execution_stats_model_json2 = execution_stats_model.to_dict()
assert execution_stats_model_json2 == execution_stats_model_json
class TestModel_ExplainResult():
"""
Test Class for ExplainResult
"""
def test_explain_result_serialization(self):
"""
Test serialization/deserialization for ExplainResult
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
index_text_operator_default_field_model = {} # IndexTextOperatorDefaultField
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
index_field_model = {} # IndexField
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
index_definition_model = {} # IndexDefinition
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
index_information_model = {} # IndexInformation
index_information_model['ddoc'] = 'testString'
index_information_model['def'] = index_definition_model
index_information_model['name'] = 'testString'
index_information_model['type'] = 'json'
explain_result_range_model = {} # ExplainResultRange
explain_result_range_model['end_key'] = ['testString']
explain_result_range_model['start_key'] = ['testString']
# Construct a json representation of a ExplainResult model
explain_result_model_json = {}
explain_result_model_json['dbname'] = 'testString'
explain_result_model_json['fields'] = ['testString']
explain_result_model_json['index'] = index_information_model
explain_result_model_json['limit'] = 0
explain_result_model_json['opts'] = {}
explain_result_model_json['range'] = explain_result_range_model
explain_result_model_json['selector'] = {}
explain_result_model_json['skip'] = 0
# Construct a model instance of ExplainResult by calling from_dict on the json representation
explain_result_model = ExplainResult.from_dict(explain_result_model_json)
assert explain_result_model != False
# Construct a model instance of ExplainResult by calling from_dict on the json representation
explain_result_model_dict = ExplainResult.from_dict(explain_result_model_json).__dict__
explain_result_model2 = ExplainResult(**explain_result_model_dict)
# Verify the model instances are equivalent
assert explain_result_model == explain_result_model2
# Convert model instance back to dict and verify no loss of data
explain_result_model_json2 = explain_result_model.to_dict()
assert explain_result_model_json2 == explain_result_model_json
class TestModel_ExplainResultRange():
"""
Test Class for ExplainResultRange
"""
def test_explain_result_range_serialization(self):
"""
Test serialization/deserialization for ExplainResultRange
"""
# Construct a json representation of a ExplainResultRange model
explain_result_range_model_json = {}
explain_result_range_model_json['end_key'] = ['testString']
explain_result_range_model_json['start_key'] = ['testString']
# Construct a model instance of ExplainResultRange by calling from_dict on the json representation
explain_result_range_model = ExplainResultRange.from_dict(explain_result_range_model_json)
assert explain_result_range_model != False
# Construct a model instance of ExplainResultRange by calling from_dict on the json representation
explain_result_range_model_dict = ExplainResultRange.from_dict(explain_result_range_model_json).__dict__
explain_result_range_model2 = ExplainResultRange(**explain_result_range_model_dict)
# Verify the model instances are equivalent
assert explain_result_range_model == explain_result_range_model2
# Convert model instance back to dict and verify no loss of data
explain_result_range_model_json2 = explain_result_range_model.to_dict()
assert explain_result_range_model_json2 == explain_result_range_model_json
class TestModel_FindResult():
"""
Test Class for FindResult
"""
def test_find_result_serialization(self):
"""
Test serialization/deserialization for FindResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
execution_stats_model = {} # ExecutionStats
execution_stats_model['execution_time_ms'] = 72.5
execution_stats_model['results_returned'] = 0
execution_stats_model['total_docs_examined'] = 0
execution_stats_model['total_keys_examined'] = 0
execution_stats_model['total_quorum_docs_examined'] = 0
# Construct a json representation of a FindResult model
find_result_model_json = {}
find_result_model_json['bookmark'] = 'testString'
find_result_model_json['docs'] = [document_model]
find_result_model_json['execution_stats'] = execution_stats_model
find_result_model_json['warning'] = 'testString'
# Construct a model instance of FindResult by calling from_dict on the json representation
find_result_model = FindResult.from_dict(find_result_model_json)
assert find_result_model != False
# Construct a model instance of FindResult by calling from_dict on the json representation
find_result_model_dict = FindResult.from_dict(find_result_model_json).__dict__
find_result_model2 = FindResult(**find_result_model_dict)
# Verify the model instances are equivalent
assert find_result_model == find_result_model2
# Convert model instance back to dict and verify no loss of data
find_result_model_json2 = find_result_model.to_dict()
assert find_result_model_json2 == find_result_model_json
class TestModel_GeoIndexDefinition():
"""
Test Class for GeoIndexDefinition
"""
def test_geo_index_definition_serialization(self):
"""
Test serialization/deserialization for GeoIndexDefinition
"""
# Construct a json representation of a GeoIndexDefinition model
geo_index_definition_model_json = {}
geo_index_definition_model_json['index'] = 'testString'
# Construct a model instance of GeoIndexDefinition by calling from_dict on the json representation
geo_index_definition_model = GeoIndexDefinition.from_dict(geo_index_definition_model_json)
assert geo_index_definition_model != False
# Construct a model instance of GeoIndexDefinition by calling from_dict on the json representation
geo_index_definition_model_dict = GeoIndexDefinition.from_dict(geo_index_definition_model_json).__dict__
geo_index_definition_model2 = GeoIndexDefinition(**geo_index_definition_model_dict)
# Verify the model instances are equivalent
assert geo_index_definition_model == geo_index_definition_model2
# Convert model instance back to dict and verify no loss of data
geo_index_definition_model_json2 = geo_index_definition_model.to_dict()
assert geo_index_definition_model_json2 == geo_index_definition_model_json
class TestModel_GeoIndexInformation():
"""
Test Class for GeoIndexInformation
"""
def test_geo_index_information_serialization(self):
"""
Test serialization/deserialization for GeoIndexInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
geo_index_stats_model = {} # GeoIndexStats
geo_index_stats_model['data_size'] = 0
geo_index_stats_model['disk_size'] = 0
geo_index_stats_model['doc_count'] = 0
# Construct a json representation of a GeoIndexInformation model
geo_index_information_model_json = {}
geo_index_information_model_json['geo_index'] = geo_index_stats_model
geo_index_information_model_json['name'] = 'testString'
# Construct a model instance of GeoIndexInformation by calling from_dict on the json representation
geo_index_information_model = GeoIndexInformation.from_dict(geo_index_information_model_json)
assert geo_index_information_model != False
# Construct a model instance of GeoIndexInformation by calling from_dict on the json representation
geo_index_information_model_dict = GeoIndexInformation.from_dict(geo_index_information_model_json).__dict__
geo_index_information_model2 = GeoIndexInformation(**geo_index_information_model_dict)
# Verify the model instances are equivalent
assert geo_index_information_model == geo_index_information_model2
# Convert model instance back to dict and verify no loss of data
geo_index_information_model_json2 = geo_index_information_model.to_dict()
assert geo_index_information_model_json2 == geo_index_information_model_json
class TestModel_GeoIndexStats():
"""
Test Class for GeoIndexStats
"""
def test_geo_index_stats_serialization(self):
"""
Test serialization/deserialization for GeoIndexStats
"""
# Construct a json representation of a GeoIndexStats model
geo_index_stats_model_json = {}
geo_index_stats_model_json['data_size'] = 0
geo_index_stats_model_json['disk_size'] = 0
geo_index_stats_model_json['doc_count'] = 0
# Construct a model instance of GeoIndexStats by calling from_dict on the json representation
geo_index_stats_model = GeoIndexStats.from_dict(geo_index_stats_model_json)
assert geo_index_stats_model != False
# Construct a model instance of GeoIndexStats by calling from_dict on the json representation
geo_index_stats_model_dict = GeoIndexStats.from_dict(geo_index_stats_model_json).__dict__
geo_index_stats_model2 = GeoIndexStats(**geo_index_stats_model_dict)
# Verify the model instances are equivalent
assert geo_index_stats_model == geo_index_stats_model2
# Convert model instance back to dict and verify no loss of data
geo_index_stats_model_json2 = geo_index_stats_model.to_dict()
assert geo_index_stats_model_json2 == geo_index_stats_model_json
class TestModel_GeoJsonFeature():
"""
Test Class for GeoJsonFeature
"""
def test_geo_json_feature_serialization(self):
"""
Test serialization/deserialization for GeoJsonFeature
"""
# Construct dict forms of any model objects needed in order to build this model.
geo_json_geometry_object_model = {} # GeoJsonGeometry
geo_json_geometry_object_model['type'] = 'Point'
geo_json_geometry_object_model['coordinates'] = ['testString']
# Construct a json representation of a GeoJsonFeature model
geo_json_feature_model_json = {}
geo_json_feature_model_json['_id'] = 'testString'
geo_json_feature_model_json['_rev'] = 'testString'
geo_json_feature_model_json['bbox'] = [72.5]
geo_json_feature_model_json['geometry'] = geo_json_geometry_object_model
geo_json_feature_model_json['properties'] = {}
geo_json_feature_model_json['type'] = 'Feature'
geo_json_feature_model_json['foo'] = 'testString'
# Construct a model instance of GeoJsonFeature by calling from_dict on the json representation
geo_json_feature_model = GeoJsonFeature.from_dict(geo_json_feature_model_json)
assert geo_json_feature_model != False
# Construct a model instance of GeoJsonFeature by calling from_dict on the json representation
geo_json_feature_model_dict = GeoJsonFeature.from_dict(geo_json_feature_model_json).__dict__
geo_json_feature_model2 = GeoJsonFeature(**geo_json_feature_model_dict)
# Verify the model instances are equivalent
assert geo_json_feature_model == geo_json_feature_model2
# Convert model instance back to dict and verify no loss of data
geo_json_feature_model_json2 = geo_json_feature_model.to_dict()
assert geo_json_feature_model_json2 == geo_json_feature_model_json
# Test get_properties and set_properties methods.
geo_json_feature_model.set_properties({})
actual_dict = geo_json_feature_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'testString'}
geo_json_feature_model.set_properties(expected_dict)
actual_dict = geo_json_feature_model.get_properties()
assert actual_dict == expected_dict
class TestModel_GeoResult():
"""
Test Class for GeoResult
"""
def test_geo_result_serialization(self):
"""
Test serialization/deserialization for GeoResult
"""
# Construct dict forms of any model objects needed in order to build this model.
geo_json_geometry_object_model = {} # GeoJsonGeometry
geo_json_geometry_object_model['type'] = 'Point'
geo_json_geometry_object_model['coordinates'] = ['testString']
geo_json_feature_model = {} # GeoJsonFeature
geo_json_feature_model['_id'] = 'testString'
geo_json_feature_model['_rev'] = 'testString'
geo_json_feature_model['bbox'] = [72.5]
geo_json_feature_model['geometry'] = geo_json_geometry_object_model
geo_json_feature_model['properties'] = {}
geo_json_feature_model['type'] = 'Feature'
geo_json_feature_model['foo'] = 'testString'
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
geo_json_geometry_model = {} # GeoJsonGeometry
geo_json_geometry_model['type'] = 'Point'
geo_json_geometry_model['coordinates'] = ['testString']
geo_result_row_model = {} # GeoResultRow
geo_result_row_model['doc'] = document_model
geo_result_row_model['geometry'] = geo_json_geometry_model
geo_result_row_model['id'] = 'testString'
geo_result_row_model['rev'] = 'testString'
# Construct a json representation of a GeoResult model
geo_result_model_json = {}
geo_result_model_json['bookmark'] = 'testString'
geo_result_model_json['features'] = [geo_json_feature_model]
geo_result_model_json['rows'] = [geo_result_row_model]
geo_result_model_json['type'] = 'FeatureCollection'
# Construct a model instance of GeoResult by calling from_dict on the json representation
geo_result_model = GeoResult.from_dict(geo_result_model_json)
assert geo_result_model != False
# Construct a model instance of GeoResult by calling from_dict on the json representation
geo_result_model_dict = GeoResult.from_dict(geo_result_model_json).__dict__
geo_result_model2 = GeoResult(**geo_result_model_dict)
# Verify the model instances are equivalent
assert geo_result_model == geo_result_model2
# Convert model instance back to dict and verify no loss of data
geo_result_model_json2 = geo_result_model.to_dict()
assert geo_result_model_json2 == geo_result_model_json
class TestModel_GeoResultRow():
"""
Test Class for GeoResultRow
"""
def test_geo_result_row_serialization(self):
"""
Test serialization/deserialization for GeoResultRow
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
geo_json_geometry_model = {} # GeoJsonGeometry
geo_json_geometry_model['type'] = 'Point'
geo_json_geometry_model['coordinates'] = ['testString']
# Construct a json representation of a GeoResultRow model
geo_result_row_model_json = {}
geo_result_row_model_json['doc'] = document_model
geo_result_row_model_json['geometry'] = geo_json_geometry_model
geo_result_row_model_json['id'] = 'testString'
geo_result_row_model_json['rev'] = 'testString'
# Construct a model instance of GeoResultRow by calling from_dict on the json representation
geo_result_row_model = GeoResultRow.from_dict(geo_result_row_model_json)
assert geo_result_row_model != False
# Construct a model instance of GeoResultRow by calling from_dict on the json representation
geo_result_row_model_dict = GeoResultRow.from_dict(geo_result_row_model_json).__dict__
geo_result_row_model2 = GeoResultRow(**geo_result_row_model_dict)
# Verify the model instances are equivalent
assert geo_result_row_model == geo_result_row_model2
# Convert model instance back to dict and verify no loss of data
geo_result_row_model_json2 = geo_result_row_model.to_dict()
assert geo_result_row_model_json2 == geo_result_row_model_json
class TestModel_IndexDefinition():
"""
Test Class for IndexDefinition
"""
def test_index_definition_serialization(self):
"""
Test serialization/deserialization for IndexDefinition
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
index_text_operator_default_field_model = {} # IndexTextOperatorDefaultField
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
index_field_model = {} # IndexField
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
# Construct a json representation of a IndexDefinition model
index_definition_model_json = {}
index_definition_model_json['default_analyzer'] = analyzer_model
index_definition_model_json['default_field'] = index_text_operator_default_field_model
index_definition_model_json['fields'] = [index_field_model]
index_definition_model_json['index_array_lengths'] = True
index_definition_model_json['partial_filter_selector'] = {}
# Construct a model instance of IndexDefinition by calling from_dict on the json representation
index_definition_model = IndexDefinition.from_dict(index_definition_model_json)
assert index_definition_model != False
# Construct a model instance of IndexDefinition by calling from_dict on the json representation
index_definition_model_dict = IndexDefinition.from_dict(index_definition_model_json).__dict__
index_definition_model2 = IndexDefinition(**index_definition_model_dict)
# Verify the model instances are equivalent
assert index_definition_model == index_definition_model2
# Convert model instance back to dict and verify no loss of data
index_definition_model_json2 = index_definition_model.to_dict()
assert index_definition_model_json2 == index_definition_model_json
class TestModel_IndexField():
"""
Test Class for IndexField
"""
def test_index_field_serialization(self):
"""
Test serialization/deserialization for IndexField
"""
# Construct a json representation of a IndexField model
index_field_model_json = {}
index_field_model_json['name'] = 'testString'
index_field_model_json['type'] = 'boolean'
index_field_model_json['foo'] = 'asc'
# Construct a model instance of IndexField by calling from_dict on the json representation
index_field_model = IndexField.from_dict(index_field_model_json)
assert index_field_model != False
# Construct a model instance of IndexField by calling from_dict on the json representation
index_field_model_dict = IndexField.from_dict(index_field_model_json).__dict__
index_field_model2 = IndexField(**index_field_model_dict)
# Verify the model instances are equivalent
assert index_field_model == index_field_model2
# Convert model instance back to dict and verify no loss of data
index_field_model_json2 = index_field_model.to_dict()
assert index_field_model_json2 == index_field_model_json
# Test get_properties and set_properties methods.
index_field_model.set_properties({})
actual_dict = index_field_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'asc'}
index_field_model.set_properties(expected_dict)
actual_dict = index_field_model.get_properties()
assert actual_dict == expected_dict
class TestModel_IndexInformation():
"""
Test Class for IndexInformation
"""
def test_index_information_serialization(self):
"""
Test serialization/deserialization for IndexInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
index_text_operator_default_field_model = {} # IndexTextOperatorDefaultField
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
index_field_model = {} # IndexField
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
index_definition_model = {} # IndexDefinition
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
# Construct a json representation of a IndexInformation model
index_information_model_json = {}
index_information_model_json['ddoc'] = 'testString'
index_information_model_json['def'] = index_definition_model
index_information_model_json['name'] = 'testString'
index_information_model_json['type'] = 'json'
# Construct a model instance of IndexInformation by calling from_dict on the json representation
index_information_model = IndexInformation.from_dict(index_information_model_json)
assert index_information_model != False
# Construct a model instance of IndexInformation by calling from_dict on the json representation
index_information_model_dict = IndexInformation.from_dict(index_information_model_json).__dict__
index_information_model2 = IndexInformation(**index_information_model_dict)
# Verify the model instances are equivalent
assert index_information_model == index_information_model2
# Convert model instance back to dict and verify no loss of data
index_information_model_json2 = index_information_model.to_dict()
assert index_information_model_json2 == index_information_model_json
class TestModel_IndexResult():
"""
Test Class for IndexResult
"""
def test_index_result_serialization(self):
"""
Test serialization/deserialization for IndexResult
"""
# Construct a json representation of a IndexResult model
index_result_model_json = {}
index_result_model_json['id'] = 'testString'
index_result_model_json['name'] = 'testString'
index_result_model_json['result'] = 'created'
# Construct a model instance of IndexResult by calling from_dict on the json representation
index_result_model = IndexResult.from_dict(index_result_model_json)
assert index_result_model != False
# Construct a model instance of IndexResult by calling from_dict on the json representation
index_result_model_dict = IndexResult.from_dict(index_result_model_json).__dict__
index_result_model2 = IndexResult(**index_result_model_dict)
# Verify the model instances are equivalent
assert index_result_model == index_result_model2
# Convert model instance back to dict and verify no loss of data
index_result_model_json2 = index_result_model.to_dict()
assert index_result_model_json2 == index_result_model_json
class TestModel_IndexTextOperatorDefaultField():
"""
Test Class for IndexTextOperatorDefaultField
"""
def test_index_text_operator_default_field_serialization(self):
"""
Test serialization/deserialization for IndexTextOperatorDefaultField
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
# Construct a json representation of a IndexTextOperatorDefaultField model
index_text_operator_default_field_model_json = {}
index_text_operator_default_field_model_json['analyzer'] = analyzer_model
index_text_operator_default_field_model_json['enabled'] = True
# Construct a model instance of IndexTextOperatorDefaultField by calling from_dict on the json representation
index_text_operator_default_field_model = IndexTextOperatorDefaultField.from_dict(index_text_operator_default_field_model_json)
assert index_text_operator_default_field_model != False
# Construct a model instance of IndexTextOperatorDefaultField by calling from_dict on the json representation
index_text_operator_default_field_model_dict = IndexTextOperatorDefaultField.from_dict(index_text_operator_default_field_model_json).__dict__
index_text_operator_default_field_model2 = IndexTextOperatorDefaultField(**index_text_operator_default_field_model_dict)
# Verify the model instances are equivalent
assert index_text_operator_default_field_model == index_text_operator_default_field_model2
# Convert model instance back to dict and verify no loss of data
index_text_operator_default_field_model_json2 = index_text_operator_default_field_model.to_dict()
assert index_text_operator_default_field_model_json2 == index_text_operator_default_field_model_json
class TestModel_IndexesInformation():
"""
Test Class for IndexesInformation
"""
def test_indexes_information_serialization(self):
"""
Test serialization/deserialization for IndexesInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
index_text_operator_default_field_model = {} # IndexTextOperatorDefaultField
index_text_operator_default_field_model['analyzer'] = analyzer_model
index_text_operator_default_field_model['enabled'] = True
index_field_model = {} # IndexField
index_field_model['name'] = 'testString'
index_field_model['type'] = 'boolean'
index_field_model['foo'] = 'asc'
index_definition_model = {} # IndexDefinition
index_definition_model['default_analyzer'] = analyzer_model
index_definition_model['default_field'] = index_text_operator_default_field_model
index_definition_model['fields'] = [index_field_model]
index_definition_model['index_array_lengths'] = True
index_definition_model['partial_filter_selector'] = {}
index_information_model = {} # IndexInformation
index_information_model['ddoc'] = 'testString'
index_information_model['def'] = index_definition_model
index_information_model['name'] = 'testString'
index_information_model['type'] = 'json'
# Construct a json representation of a IndexesInformation model
indexes_information_model_json = {}
indexes_information_model_json['total_rows'] = 0
indexes_information_model_json['indexes'] = [index_information_model]
# Construct a model instance of IndexesInformation by calling from_dict on the json representation
indexes_information_model = IndexesInformation.from_dict(indexes_information_model_json)
assert indexes_information_model != False
# Construct a model instance of IndexesInformation by calling from_dict on the json representation
indexes_information_model_dict = IndexesInformation.from_dict(indexes_information_model_json).__dict__
indexes_information_model2 = IndexesInformation(**indexes_information_model_dict)
# Verify the model instances are equivalent
assert indexes_information_model == indexes_information_model2
# Convert model instance back to dict and verify no loss of data
indexes_information_model_json2 = indexes_information_model.to_dict()
assert indexes_information_model_json2 == indexes_information_model_json
class TestModel_MembershipInformation():
"""
Test Class for MembershipInformation
"""
def test_membership_information_serialization(self):
"""
Test serialization/deserialization for MembershipInformation
"""
# Construct a json representation of a MembershipInformation model
membership_information_model_json = {}
membership_information_model_json['all_nodes'] = ['testString']
membership_information_model_json['cluster_nodes'] = ['testString']
# Construct a model instance of MembershipInformation by calling from_dict on the json representation
membership_information_model = MembershipInformation.from_dict(membership_information_model_json)
assert membership_information_model != False
# Construct a model instance of MembershipInformation by calling from_dict on the json representation
membership_information_model_dict = MembershipInformation.from_dict(membership_information_model_json).__dict__
membership_information_model2 = MembershipInformation(**membership_information_model_dict)
# Verify the model instances are equivalent
assert membership_information_model == membership_information_model2
# Convert model instance back to dict and verify no loss of data
membership_information_model_json2 = membership_information_model.to_dict()
assert membership_information_model_json2 == membership_information_model_json
class TestModel_Ok():
"""
Test Class for Ok
"""
def test_ok_serialization(self):
"""
Test serialization/deserialization for Ok
"""
# Construct a json representation of a Ok model
ok_model_json = {}
ok_model_json['ok'] = True
# Construct a model instance of Ok by calling from_dict on the json representation
ok_model = Ok.from_dict(ok_model_json)
assert ok_model != False
# Construct a model instance of Ok by calling from_dict on the json representation
ok_model_dict = Ok.from_dict(ok_model_json).__dict__
ok_model2 = Ok(**ok_model_dict)
# Verify the model instances are equivalent
assert ok_model == ok_model2
# Convert model instance back to dict and verify no loss of data
ok_model_json2 = ok_model.to_dict()
assert ok_model_json2 == ok_model_json
class TestModel_PartitionInformation():
"""
Test Class for PartitionInformation
"""
def test_partition_information_serialization(self):
"""
Test serialization/deserialization for PartitionInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
partition_information_indexes_indexes_model = {} # PartitionInformationIndexesIndexes
partition_information_indexes_indexes_model['search'] = 0
partition_information_indexes_indexes_model['view'] = 0
partition_information_indexes_model = {} # PartitionInformationIndexes
partition_information_indexes_model['count'] = 0
partition_information_indexes_model['indexes'] = partition_information_indexes_indexes_model
partition_information_indexes_model['limit'] = 0
partition_information_sizes_model = {} # PartitionInformationSizes
partition_information_sizes_model['active'] = 0
partition_information_sizes_model['external'] = 0
# Construct a json representation of a PartitionInformation model
partition_information_model_json = {}
partition_information_model_json['db_name'] = 'testString'
partition_information_model_json['doc_count'] = 0
partition_information_model_json['doc_del_count'] = 0
partition_information_model_json['partition'] = 'testString'
partition_information_model_json['partitioned_indexes'] = partition_information_indexes_model
partition_information_model_json['sizes'] = partition_information_sizes_model
# Construct a model instance of PartitionInformation by calling from_dict on the json representation
partition_information_model = PartitionInformation.from_dict(partition_information_model_json)
assert partition_information_model != False
# Construct a model instance of PartitionInformation by calling from_dict on the json representation
partition_information_model_dict = PartitionInformation.from_dict(partition_information_model_json).__dict__
partition_information_model2 = PartitionInformation(**partition_information_model_dict)
# Verify the model instances are equivalent
assert partition_information_model == partition_information_model2
# Convert model instance back to dict and verify no loss of data
partition_information_model_json2 = partition_information_model.to_dict()
assert partition_information_model_json2 == partition_information_model_json
class TestModel_PartitionInformationIndexes():
"""
Test Class for PartitionInformationIndexes
"""
def test_partition_information_indexes_serialization(self):
"""
Test serialization/deserialization for PartitionInformationIndexes
"""
# Construct dict forms of any model objects needed in order to build this model.
partition_information_indexes_indexes_model = {} # PartitionInformationIndexesIndexes
partition_information_indexes_indexes_model['search'] = 0
partition_information_indexes_indexes_model['view'] = 0
# Construct a json representation of a PartitionInformationIndexes model
partition_information_indexes_model_json = {}
partition_information_indexes_model_json['count'] = 0
partition_information_indexes_model_json['indexes'] = partition_information_indexes_indexes_model
partition_information_indexes_model_json['limit'] = 0
# Construct a model instance of PartitionInformationIndexes by calling from_dict on the json representation
partition_information_indexes_model = PartitionInformationIndexes.from_dict(partition_information_indexes_model_json)
assert partition_information_indexes_model != False
# Construct a model instance of PartitionInformationIndexes by calling from_dict on the json representation
partition_information_indexes_model_dict = PartitionInformationIndexes.from_dict(partition_information_indexes_model_json).__dict__
partition_information_indexes_model2 = PartitionInformationIndexes(**partition_information_indexes_model_dict)
# Verify the model instances are equivalent
assert partition_information_indexes_model == partition_information_indexes_model2
# Convert model instance back to dict and verify no loss of data
partition_information_indexes_model_json2 = partition_information_indexes_model.to_dict()
assert partition_information_indexes_model_json2 == partition_information_indexes_model_json
class TestModel_PartitionInformationIndexesIndexes():
"""
Test Class for PartitionInformationIndexesIndexes
"""
def test_partition_information_indexes_indexes_serialization(self):
"""
Test serialization/deserialization for PartitionInformationIndexesIndexes
"""
# Construct a json representation of a PartitionInformationIndexesIndexes model
partition_information_indexes_indexes_model_json = {}
partition_information_indexes_indexes_model_json['search'] = 0
partition_information_indexes_indexes_model_json['view'] = 0
# Construct a model instance of PartitionInformationIndexesIndexes by calling from_dict on the json representation
partition_information_indexes_indexes_model = PartitionInformationIndexesIndexes.from_dict(partition_information_indexes_indexes_model_json)
assert partition_information_indexes_indexes_model != False
# Construct a model instance of PartitionInformationIndexesIndexes by calling from_dict on the json representation
partition_information_indexes_indexes_model_dict = PartitionInformationIndexesIndexes.from_dict(partition_information_indexes_indexes_model_json).__dict__
partition_information_indexes_indexes_model2 = PartitionInformationIndexesIndexes(**partition_information_indexes_indexes_model_dict)
# Verify the model instances are equivalent
assert partition_information_indexes_indexes_model == partition_information_indexes_indexes_model2
# Convert model instance back to dict and verify no loss of data
partition_information_indexes_indexes_model_json2 = partition_information_indexes_indexes_model.to_dict()
assert partition_information_indexes_indexes_model_json2 == partition_information_indexes_indexes_model_json
class TestModel_PartitionInformationSizes():
"""
Test Class for PartitionInformationSizes
"""
def test_partition_information_sizes_serialization(self):
"""
Test serialization/deserialization for PartitionInformationSizes
"""
# Construct a json representation of a PartitionInformationSizes model
partition_information_sizes_model_json = {}
partition_information_sizes_model_json['active'] = 0
partition_information_sizes_model_json['external'] = 0
# Construct a model instance of PartitionInformationSizes by calling from_dict on the json representation
partition_information_sizes_model = PartitionInformationSizes.from_dict(partition_information_sizes_model_json)
assert partition_information_sizes_model != False
# Construct a model instance of PartitionInformationSizes by calling from_dict on the json representation
partition_information_sizes_model_dict = PartitionInformationSizes.from_dict(partition_information_sizes_model_json).__dict__
partition_information_sizes_model2 = PartitionInformationSizes(**partition_information_sizes_model_dict)
# Verify the model instances are equivalent
assert partition_information_sizes_model == partition_information_sizes_model2
# Convert model instance back to dict and verify no loss of data
partition_information_sizes_model_json2 = partition_information_sizes_model.to_dict()
assert partition_information_sizes_model_json2 == partition_information_sizes_model_json
class TestModel_ReplicationCreateTargetParameters():
"""
Test Class for ReplicationCreateTargetParameters
"""
def test_replication_create_target_parameters_serialization(self):
"""
Test serialization/deserialization for ReplicationCreateTargetParameters
"""
# Construct a json representation of a ReplicationCreateTargetParameters model
replication_create_target_parameters_model_json = {}
replication_create_target_parameters_model_json['n'] = 1
replication_create_target_parameters_model_json['partitioned'] = False
replication_create_target_parameters_model_json['q'] = 1
# Construct a model instance of ReplicationCreateTargetParameters by calling from_dict on the json representation
replication_create_target_parameters_model = ReplicationCreateTargetParameters.from_dict(replication_create_target_parameters_model_json)
assert replication_create_target_parameters_model != False
# Construct a model instance of ReplicationCreateTargetParameters by calling from_dict on the json representation
replication_create_target_parameters_model_dict = ReplicationCreateTargetParameters.from_dict(replication_create_target_parameters_model_json).__dict__
replication_create_target_parameters_model2 = ReplicationCreateTargetParameters(**replication_create_target_parameters_model_dict)
# Verify the model instances are equivalent
assert replication_create_target_parameters_model == replication_create_target_parameters_model2
# Convert model instance back to dict and verify no loss of data
replication_create_target_parameters_model_json2 = replication_create_target_parameters_model.to_dict()
assert replication_create_target_parameters_model_json2 == replication_create_target_parameters_model_json
class TestModel_ReplicationDatabase():
"""
Test Class for ReplicationDatabase
"""
def test_replication_database_serialization(self):
"""
Test serialization/deserialization for ReplicationDatabase
"""
# Construct dict forms of any model objects needed in order to build this model.
replication_database_auth_basic_model = {} # ReplicationDatabaseAuthBasic
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
replication_database_auth_iam_model = {} # ReplicationDatabaseAuthIam
replication_database_auth_iam_model['api_key'] = 'testString'
replication_database_auth_model = {} # ReplicationDatabaseAuth
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
# Construct a json representation of a ReplicationDatabase model
replication_database_model_json = {}
replication_database_model_json['auth'] = replication_database_auth_model
replication_database_model_json['headers'] = {}
replication_database_model_json['url'] = 'testString'
# Construct a model instance of ReplicationDatabase by calling from_dict on the json representation
replication_database_model = ReplicationDatabase.from_dict(replication_database_model_json)
assert replication_database_model != False
# Construct a model instance of ReplicationDatabase by calling from_dict on the json representation
replication_database_model_dict = ReplicationDatabase.from_dict(replication_database_model_json).__dict__
replication_database_model2 = ReplicationDatabase(**replication_database_model_dict)
# Verify the model instances are equivalent
assert replication_database_model == replication_database_model2
# Convert model instance back to dict and verify no loss of data
replication_database_model_json2 = replication_database_model.to_dict()
assert replication_database_model_json2 == replication_database_model_json
class TestModel_ReplicationDatabaseAuth():
"""
Test Class for ReplicationDatabaseAuth
"""
def test_replication_database_auth_serialization(self):
"""
Test serialization/deserialization for ReplicationDatabaseAuth
"""
# Construct dict forms of any model objects needed in order to build this model.
replication_database_auth_basic_model = {} # ReplicationDatabaseAuthBasic
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
replication_database_auth_iam_model = {} # ReplicationDatabaseAuthIam
replication_database_auth_iam_model['api_key'] = 'testString'
# Construct a json representation of a ReplicationDatabaseAuth model
replication_database_auth_model_json = {}
replication_database_auth_model_json['basic'] = replication_database_auth_basic_model
replication_database_auth_model_json['iam'] = replication_database_auth_iam_model
# Construct a model instance of ReplicationDatabaseAuth by calling from_dict on the json representation
replication_database_auth_model = ReplicationDatabaseAuth.from_dict(replication_database_auth_model_json)
assert replication_database_auth_model != False
# Construct a model instance of ReplicationDatabaseAuth by calling from_dict on the json representation
replication_database_auth_model_dict = ReplicationDatabaseAuth.from_dict(replication_database_auth_model_json).__dict__
replication_database_auth_model2 = ReplicationDatabaseAuth(**replication_database_auth_model_dict)
# Verify the model instances are equivalent
assert replication_database_auth_model == replication_database_auth_model2
# Convert model instance back to dict and verify no loss of data
replication_database_auth_model_json2 = replication_database_auth_model.to_dict()
assert replication_database_auth_model_json2 == replication_database_auth_model_json
class TestModel_ReplicationDatabaseAuthBasic():
"""
Test Class for ReplicationDatabaseAuthBasic
"""
def test_replication_database_auth_basic_serialization(self):
"""
Test serialization/deserialization for ReplicationDatabaseAuthBasic
"""
# Construct a json representation of a ReplicationDatabaseAuthBasic model
replication_database_auth_basic_model_json = {}
replication_database_auth_basic_model_json['password'] = 'testString'
replication_database_auth_basic_model_json['username'] = 'testString'
# Construct a model instance of ReplicationDatabaseAuthBasic by calling from_dict on the json representation
replication_database_auth_basic_model = ReplicationDatabaseAuthBasic.from_dict(replication_database_auth_basic_model_json)
assert replication_database_auth_basic_model != False
# Construct a model instance of ReplicationDatabaseAuthBasic by calling from_dict on the json representation
replication_database_auth_basic_model_dict = ReplicationDatabaseAuthBasic.from_dict(replication_database_auth_basic_model_json).__dict__
replication_database_auth_basic_model2 = ReplicationDatabaseAuthBasic(**replication_database_auth_basic_model_dict)
# Verify the model instances are equivalent
assert replication_database_auth_basic_model == replication_database_auth_basic_model2
# Convert model instance back to dict and verify no loss of data
replication_database_auth_basic_model_json2 = replication_database_auth_basic_model.to_dict()
assert replication_database_auth_basic_model_json2 == replication_database_auth_basic_model_json
class TestModel_ReplicationDatabaseAuthIam():
"""
Test Class for ReplicationDatabaseAuthIam
"""
def test_replication_database_auth_iam_serialization(self):
"""
Test serialization/deserialization for ReplicationDatabaseAuthIam
"""
# Construct a json representation of a ReplicationDatabaseAuthIam model
replication_database_auth_iam_model_json = {}
replication_database_auth_iam_model_json['api_key'] = 'testString'
# Construct a model instance of ReplicationDatabaseAuthIam by calling from_dict on the json representation
replication_database_auth_iam_model = ReplicationDatabaseAuthIam.from_dict(replication_database_auth_iam_model_json)
assert replication_database_auth_iam_model != False
# Construct a model instance of ReplicationDatabaseAuthIam by calling from_dict on the json representation
replication_database_auth_iam_model_dict = ReplicationDatabaseAuthIam.from_dict(replication_database_auth_iam_model_json).__dict__
replication_database_auth_iam_model2 = ReplicationDatabaseAuthIam(**replication_database_auth_iam_model_dict)
# Verify the model instances are equivalent
assert replication_database_auth_iam_model == replication_database_auth_iam_model2
# Convert model instance back to dict and verify no loss of data
replication_database_auth_iam_model_json2 = replication_database_auth_iam_model.to_dict()
assert replication_database_auth_iam_model_json2 == replication_database_auth_iam_model_json
class TestModel_ReplicationDocument():
"""
Test Class for ReplicationDocument
"""
def test_replication_document_serialization(self):
"""
Test serialization/deserialization for ReplicationDocument
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
replication_create_target_parameters_model = {} # ReplicationCreateTargetParameters
replication_create_target_parameters_model['n'] = 1
replication_create_target_parameters_model['partitioned'] = False
replication_create_target_parameters_model['q'] = 1
replication_database_auth_basic_model = {} # ReplicationDatabaseAuthBasic
replication_database_auth_basic_model['password'] = 'testString'
replication_database_auth_basic_model['username'] = 'testString'
replication_database_auth_iam_model = {} # ReplicationDatabaseAuthIam
replication_database_auth_iam_model['api_key'] = 'testString'
replication_database_auth_model = {} # ReplicationDatabaseAuth
replication_database_auth_model['basic'] = replication_database_auth_basic_model
replication_database_auth_model['iam'] = replication_database_auth_iam_model
replication_database_model = {} # ReplicationDatabase
replication_database_model['auth'] = replication_database_auth_model
replication_database_model['headers'] = {}
replication_database_model['url'] = 'testString'
user_context_model = {} # UserContext
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a json representation of a ReplicationDocument model
replication_document_model_json = {}
replication_document_model_json['_attachments'] = {}
replication_document_model_json['_conflicts'] = ['testString']
replication_document_model_json['_deleted'] = True
replication_document_model_json['_deleted_conflicts'] = ['testString']
replication_document_model_json['_id'] = 'testString'
replication_document_model_json['_local_seq'] = 'testString'
replication_document_model_json['_rev'] = 'testString'
replication_document_model_json['_revisions'] = revisions_model
replication_document_model_json['_revs_info'] = [document_revision_status_model]
replication_document_model_json['cancel'] = True
replication_document_model_json['checkpoint_interval'] = 0
replication_document_model_json['connection_timeout'] = 0
replication_document_model_json['continuous'] = False
replication_document_model_json['create_target'] = False
replication_document_model_json['create_target_params'] = replication_create_target_parameters_model
replication_document_model_json['doc_ids'] = ['testString']
replication_document_model_json['filter'] = 'testString'
replication_document_model_json['http_connections'] = 1
replication_document_model_json['query_params'] = {}
replication_document_model_json['retries_per_request'] = 0
replication_document_model_json['selector'] = {}
replication_document_model_json['since_seq'] = 'testString'
replication_document_model_json['socket_options'] = 'testString'
replication_document_model_json['source'] = replication_database_model
replication_document_model_json['source_proxy'] = 'testString'
replication_document_model_json['target'] = replication_database_model
replication_document_model_json['target_proxy'] = 'testString'
replication_document_model_json['use_checkpoints'] = True
replication_document_model_json['user_ctx'] = user_context_model
replication_document_model_json['worker_batch_size'] = 1
replication_document_model_json['worker_processes'] = 1
replication_document_model_json['foo'] = 'testString'
# Construct a model instance of ReplicationDocument by calling from_dict on the json representation
replication_document_model = ReplicationDocument.from_dict(replication_document_model_json)
assert replication_document_model != False
# Construct a model instance of ReplicationDocument by calling from_dict on the json representation
replication_document_model_dict = ReplicationDocument.from_dict(replication_document_model_json).__dict__
replication_document_model2 = ReplicationDocument(**replication_document_model_dict)
# Verify the model instances are equivalent
assert replication_document_model == replication_document_model2
# Convert model instance back to dict and verify no loss of data
replication_document_model_json2 = replication_document_model.to_dict()
assert replication_document_model_json2 == replication_document_model_json
# Test get_properties and set_properties methods.
replication_document_model.set_properties({})
actual_dict = replication_document_model.get_properties()
assert actual_dict == {}
expected_dict = {'foo': 'testString'}
replication_document_model.set_properties(expected_dict)
actual_dict = replication_document_model.get_properties()
assert actual_dict == expected_dict
class TestModel_Revisions():
"""
Test Class for Revisions
"""
def test_revisions_serialization(self):
"""
Test serialization/deserialization for Revisions
"""
# Construct a json representation of a Revisions model
revisions_model_json = {}
revisions_model_json['ids'] = ['testString']
revisions_model_json['start'] = 1
# Construct a model instance of Revisions by calling from_dict on the json representation
revisions_model = Revisions.from_dict(revisions_model_json)
assert revisions_model != False
# Construct a model instance of Revisions by calling from_dict on the json representation
revisions_model_dict = Revisions.from_dict(revisions_model_json).__dict__
revisions_model2 = Revisions(**revisions_model_dict)
# Verify the model instances are equivalent
assert revisions_model == revisions_model2
# Convert model instance back to dict and verify no loss of data
revisions_model_json2 = revisions_model.to_dict()
assert revisions_model_json2 == revisions_model_json
class TestModel_RevsDiff():
"""
Test Class for RevsDiff
"""
def test_revs_diff_serialization(self):
"""
Test serialization/deserialization for RevsDiff
"""
# Construct a json representation of a RevsDiff model
revs_diff_model_json = {}
revs_diff_model_json['missing'] = ['testString']
revs_diff_model_json['possible_ancestors'] = ['testString']
# Construct a model instance of RevsDiff by calling from_dict on the json representation
revs_diff_model = RevsDiff.from_dict(revs_diff_model_json)
assert revs_diff_model != False
# Construct a model instance of RevsDiff by calling from_dict on the json representation
revs_diff_model_dict = RevsDiff.from_dict(revs_diff_model_json).__dict__
revs_diff_model2 = RevsDiff(**revs_diff_model_dict)
# Verify the model instances are equivalent
assert revs_diff_model == revs_diff_model2
# Convert model instance back to dict and verify no loss of data
revs_diff_model_json2 = revs_diff_model.to_dict()
assert revs_diff_model_json2 == revs_diff_model_json
class TestModel_SchedulerDocsResult():
"""
Test Class for SchedulerDocsResult
"""
def test_scheduler_docs_result_serialization(self):
"""
Test serialization/deserialization for SchedulerDocsResult
"""
# Construct dict forms of any model objects needed in order to build this model.
scheduler_info_model = {} # SchedulerInfo
scheduler_info_model['changes_pending'] = 0
scheduler_info_model['checkpointed_source_seq'] = 'testString'
scheduler_info_model['doc_write_failures'] = 0
scheduler_info_model['docs_read'] = 0
scheduler_info_model['docs_written'] = 0
scheduler_info_model['error'] = 'testString'
scheduler_info_model['missing_revisions_found'] = 0
scheduler_info_model['revisions_checked'] = 0
scheduler_info_model['source_seq'] = 'testString'
scheduler_info_model['through_seq'] = 'testString'
scheduler_document_model = {} # SchedulerDocument
scheduler_document_model['database'] = 'testString'
scheduler_document_model['doc_id'] = 'testString'
scheduler_document_model['error_count'] = 0
scheduler_document_model['id'] = 'testString'
scheduler_document_model['info'] = scheduler_info_model
scheduler_document_model['last_updated'] = "2019-01-01T12:00:00Z"
scheduler_document_model['node'] = 'testString'
scheduler_document_model['source'] = 'testString'
scheduler_document_model['source_proxy'] = 'testString'
scheduler_document_model['start_time'] = "2019-01-01T12:00:00Z"
scheduler_document_model['state'] = 'initializing'
scheduler_document_model['target'] = 'testString'
scheduler_document_model['target_proxy'] = 'testString'
# Construct a json representation of a SchedulerDocsResult model
scheduler_docs_result_model_json = {}
scheduler_docs_result_model_json['total_rows'] = 0
scheduler_docs_result_model_json['docs'] = [scheduler_document_model]
# Construct a model instance of SchedulerDocsResult by calling from_dict on the json representation
scheduler_docs_result_model = SchedulerDocsResult.from_dict(scheduler_docs_result_model_json)
assert scheduler_docs_result_model != False
# Construct a model instance of SchedulerDocsResult by calling from_dict on the json representation
scheduler_docs_result_model_dict = SchedulerDocsResult.from_dict(scheduler_docs_result_model_json).__dict__
scheduler_docs_result_model2 = SchedulerDocsResult(**scheduler_docs_result_model_dict)
# Verify the model instances are equivalent
assert scheduler_docs_result_model == scheduler_docs_result_model2
# Convert model instance back to dict and verify no loss of data
scheduler_docs_result_model_json2 = scheduler_docs_result_model.to_dict()
assert scheduler_docs_result_model_json2 == scheduler_docs_result_model_json
class TestModel_SchedulerDocument():
"""
Test Class for SchedulerDocument
"""
def test_scheduler_document_serialization(self):
"""
Test serialization/deserialization for SchedulerDocument
"""
# Construct dict forms of any model objects needed in order to build this model.
scheduler_info_model = {} # SchedulerInfo
scheduler_info_model['changes_pending'] = 0
scheduler_info_model['checkpointed_source_seq'] = 'testString'
scheduler_info_model['doc_write_failures'] = 0
scheduler_info_model['docs_read'] = 0
scheduler_info_model['docs_written'] = 0
scheduler_info_model['error'] = 'testString'
scheduler_info_model['missing_revisions_found'] = 0
scheduler_info_model['revisions_checked'] = 0
scheduler_info_model['source_seq'] = 'testString'
scheduler_info_model['through_seq'] = 'testString'
# Construct a json representation of a SchedulerDocument model
scheduler_document_model_json = {}
scheduler_document_model_json['database'] = 'testString'
scheduler_document_model_json['doc_id'] = 'testString'
scheduler_document_model_json['error_count'] = 0
scheduler_document_model_json['id'] = 'testString'
scheduler_document_model_json['info'] = scheduler_info_model
scheduler_document_model_json['last_updated'] = "2019-01-01T12:00:00Z"
scheduler_document_model_json['node'] = 'testString'
scheduler_document_model_json['source'] = 'testString'
scheduler_document_model_json['source_proxy'] = 'testString'
scheduler_document_model_json['start_time'] = "2019-01-01T12:00:00Z"
scheduler_document_model_json['state'] = 'initializing'
scheduler_document_model_json['target'] = 'testString'
scheduler_document_model_json['target_proxy'] = 'testString'
# Construct a model instance of SchedulerDocument by calling from_dict on the json representation
scheduler_document_model = SchedulerDocument.from_dict(scheduler_document_model_json)
assert scheduler_document_model != False
# Construct a model instance of SchedulerDocument by calling from_dict on the json representation
scheduler_document_model_dict = SchedulerDocument.from_dict(scheduler_document_model_json).__dict__
scheduler_document_model2 = SchedulerDocument(**scheduler_document_model_dict)
# Verify the model instances are equivalent
assert scheduler_document_model == scheduler_document_model2
# Convert model instance back to dict and verify no loss of data
scheduler_document_model_json2 = scheduler_document_model.to_dict()
assert scheduler_document_model_json2 == scheduler_document_model_json
class TestModel_SchedulerInfo():
"""
Test Class for SchedulerInfo
"""
def test_scheduler_info_serialization(self):
"""
Test serialization/deserialization for SchedulerInfo
"""
# Construct a json representation of a SchedulerInfo model
scheduler_info_model_json = {}
scheduler_info_model_json['changes_pending'] = 0
scheduler_info_model_json['checkpointed_source_seq'] = 'testString'
scheduler_info_model_json['doc_write_failures'] = 0
scheduler_info_model_json['docs_read'] = 0
scheduler_info_model_json['docs_written'] = 0
scheduler_info_model_json['error'] = 'testString'
scheduler_info_model_json['missing_revisions_found'] = 0
scheduler_info_model_json['revisions_checked'] = 0
scheduler_info_model_json['source_seq'] = 'testString'
scheduler_info_model_json['through_seq'] = 'testString'
# Construct a model instance of SchedulerInfo by calling from_dict on the json representation
scheduler_info_model = SchedulerInfo.from_dict(scheduler_info_model_json)
assert scheduler_info_model != False
# Construct a model instance of SchedulerInfo by calling from_dict on the json representation
scheduler_info_model_dict = SchedulerInfo.from_dict(scheduler_info_model_json).__dict__
scheduler_info_model2 = SchedulerInfo(**scheduler_info_model_dict)
# Verify the model instances are equivalent
assert scheduler_info_model == scheduler_info_model2
# Convert model instance back to dict and verify no loss of data
scheduler_info_model_json2 = scheduler_info_model.to_dict()
assert scheduler_info_model_json2 == scheduler_info_model_json
class TestModel_SchedulerJob():
"""
Test Class for SchedulerJob
"""
def test_scheduler_job_serialization(self):
"""
Test serialization/deserialization for SchedulerJob
"""
# Construct dict forms of any model objects needed in order to build this model.
scheduler_job_event_model = {} # SchedulerJobEvent
scheduler_job_event_model['reason'] = 'testString'
scheduler_job_event_model['timestamp'] = "2019-01-01T12:00:00Z"
scheduler_job_event_model['type'] = 'testString'
scheduler_info_model = {} # SchedulerInfo
scheduler_info_model['changes_pending'] = 0
scheduler_info_model['checkpointed_source_seq'] = 'testString'
scheduler_info_model['doc_write_failures'] = 0
scheduler_info_model['docs_read'] = 0
scheduler_info_model['docs_written'] = 0
scheduler_info_model['error'] = 'testString'
scheduler_info_model['missing_revisions_found'] = 0
scheduler_info_model['revisions_checked'] = 0
scheduler_info_model['source_seq'] = 'testString'
scheduler_info_model['through_seq'] = 'testString'
# Construct a json representation of a SchedulerJob model
scheduler_job_model_json = {}
scheduler_job_model_json['database'] = 'testString'
scheduler_job_model_json['doc_id'] = 'testString'
scheduler_job_model_json['history'] = [scheduler_job_event_model]
scheduler_job_model_json['id'] = 'testString'
scheduler_job_model_json['info'] = scheduler_info_model
scheduler_job_model_json['node'] = 'testString'
scheduler_job_model_json['pid'] = 'testString'
scheduler_job_model_json['source'] = 'testString'
scheduler_job_model_json['start_time'] = "2019-01-01T12:00:00Z"
scheduler_job_model_json['target'] = 'testString'
scheduler_job_model_json['user'] = 'testString'
# Construct a model instance of SchedulerJob by calling from_dict on the json representation
scheduler_job_model = SchedulerJob.from_dict(scheduler_job_model_json)
assert scheduler_job_model != False
# Construct a model instance of SchedulerJob by calling from_dict on the json representation
scheduler_job_model_dict = SchedulerJob.from_dict(scheduler_job_model_json).__dict__
scheduler_job_model2 = SchedulerJob(**scheduler_job_model_dict)
# Verify the model instances are equivalent
assert scheduler_job_model == scheduler_job_model2
# Convert model instance back to dict and verify no loss of data
scheduler_job_model_json2 = scheduler_job_model.to_dict()
assert scheduler_job_model_json2 == scheduler_job_model_json
class TestModel_SchedulerJobEvent():
"""
Test Class for SchedulerJobEvent
"""
def test_scheduler_job_event_serialization(self):
"""
Test serialization/deserialization for SchedulerJobEvent
"""
# Construct a json representation of a SchedulerJobEvent model
scheduler_job_event_model_json = {}
scheduler_job_event_model_json['reason'] = 'testString'
scheduler_job_event_model_json['timestamp'] = "2019-01-01T12:00:00Z"
scheduler_job_event_model_json['type'] = 'testString'
# Construct a model instance of SchedulerJobEvent by calling from_dict on the json representation
scheduler_job_event_model = SchedulerJobEvent.from_dict(scheduler_job_event_model_json)
assert scheduler_job_event_model != False
# Construct a model instance of SchedulerJobEvent by calling from_dict on the json representation
scheduler_job_event_model_dict = SchedulerJobEvent.from_dict(scheduler_job_event_model_json).__dict__
scheduler_job_event_model2 = SchedulerJobEvent(**scheduler_job_event_model_dict)
# Verify the model instances are equivalent
assert scheduler_job_event_model == scheduler_job_event_model2
# Convert model instance back to dict and verify no loss of data
scheduler_job_event_model_json2 = scheduler_job_event_model.to_dict()
assert scheduler_job_event_model_json2 == scheduler_job_event_model_json
class TestModel_SchedulerJobsResult():
"""
Test Class for SchedulerJobsResult
"""
def test_scheduler_jobs_result_serialization(self):
"""
Test serialization/deserialization for SchedulerJobsResult
"""
# Construct dict forms of any model objects needed in order to build this model.
scheduler_job_event_model = {} # SchedulerJobEvent
scheduler_job_event_model['reason'] = 'testString'
scheduler_job_event_model['timestamp'] = "2019-01-01T12:00:00Z"
scheduler_job_event_model['type'] = 'testString'
scheduler_info_model = {} # SchedulerInfo
scheduler_info_model['changes_pending'] = 0
scheduler_info_model['checkpointed_source_seq'] = 'testString'
scheduler_info_model['doc_write_failures'] = 0
scheduler_info_model['docs_read'] = 0
scheduler_info_model['docs_written'] = 0
scheduler_info_model['error'] = 'testString'
scheduler_info_model['missing_revisions_found'] = 0
scheduler_info_model['revisions_checked'] = 0
scheduler_info_model['source_seq'] = 'testString'
scheduler_info_model['through_seq'] = 'testString'
scheduler_job_model = {} # SchedulerJob
scheduler_job_model['database'] = 'testString'
scheduler_job_model['doc_id'] = 'testString'
scheduler_job_model['history'] = [scheduler_job_event_model]
scheduler_job_model['id'] = 'testString'
scheduler_job_model['info'] = scheduler_info_model
scheduler_job_model['node'] = 'testString'
scheduler_job_model['pid'] = 'testString'
scheduler_job_model['source'] = 'testString'
scheduler_job_model['start_time'] = "2019-01-01T12:00:00Z"
scheduler_job_model['target'] = 'testString'
scheduler_job_model['user'] = 'testString'
# Construct a json representation of a SchedulerJobsResult model
scheduler_jobs_result_model_json = {}
scheduler_jobs_result_model_json['total_rows'] = 0
scheduler_jobs_result_model_json['jobs'] = [scheduler_job_model]
# Construct a model instance of SchedulerJobsResult by calling from_dict on the json representation
scheduler_jobs_result_model = SchedulerJobsResult.from_dict(scheduler_jobs_result_model_json)
assert scheduler_jobs_result_model != False
# Construct a model instance of SchedulerJobsResult by calling from_dict on the json representation
scheduler_jobs_result_model_dict = SchedulerJobsResult.from_dict(scheduler_jobs_result_model_json).__dict__
scheduler_jobs_result_model2 = SchedulerJobsResult(**scheduler_jobs_result_model_dict)
# Verify the model instances are equivalent
assert scheduler_jobs_result_model == scheduler_jobs_result_model2
# Convert model instance back to dict and verify no loss of data
scheduler_jobs_result_model_json2 = scheduler_jobs_result_model.to_dict()
assert scheduler_jobs_result_model_json2 == scheduler_jobs_result_model_json
class TestModel_SearchAnalyzeResult():
"""
Test Class for SearchAnalyzeResult
"""
def test_search_analyze_result_serialization(self):
"""
Test serialization/deserialization for SearchAnalyzeResult
"""
# Construct a json representation of a SearchAnalyzeResult model
search_analyze_result_model_json = {}
search_analyze_result_model_json['tokens'] = ['testString']
# Construct a model instance of SearchAnalyzeResult by calling from_dict on the json representation
search_analyze_result_model = SearchAnalyzeResult.from_dict(search_analyze_result_model_json)
assert search_analyze_result_model != False
# Construct a model instance of SearchAnalyzeResult by calling from_dict on the json representation
search_analyze_result_model_dict = SearchAnalyzeResult.from_dict(search_analyze_result_model_json).__dict__
search_analyze_result_model2 = SearchAnalyzeResult(**search_analyze_result_model_dict)
# Verify the model instances are equivalent
assert search_analyze_result_model == search_analyze_result_model2
# Convert model instance back to dict and verify no loss of data
search_analyze_result_model_json2 = search_analyze_result_model.to_dict()
assert search_analyze_result_model_json2 == search_analyze_result_model_json
class TestModel_SearchIndexDefinition():
"""
Test Class for SearchIndexDefinition
"""
def test_search_index_definition_serialization(self):
"""
Test serialization/deserialization for SearchIndexDefinition
"""
# Construct dict forms of any model objects needed in order to build this model.
analyzer_model = {} # Analyzer
analyzer_model['name'] = 'classic'
analyzer_model['stopwords'] = ['testString']
analyzer_configuration_model = {} # AnalyzerConfiguration
analyzer_configuration_model['name'] = 'classic'
analyzer_configuration_model['stopwords'] = ['testString']
analyzer_configuration_model['fields'] = {}
# Construct a json representation of a SearchIndexDefinition model
search_index_definition_model_json = {}
search_index_definition_model_json['analyzer'] = analyzer_configuration_model
search_index_definition_model_json['index'] = 'testString'
# Construct a model instance of SearchIndexDefinition by calling from_dict on the json representation
search_index_definition_model = SearchIndexDefinition.from_dict(search_index_definition_model_json)
assert search_index_definition_model != False
# Construct a model instance of SearchIndexDefinition by calling from_dict on the json representation
search_index_definition_model_dict = SearchIndexDefinition.from_dict(search_index_definition_model_json).__dict__
search_index_definition_model2 = SearchIndexDefinition(**search_index_definition_model_dict)
# Verify the model instances are equivalent
assert search_index_definition_model == search_index_definition_model2
# Convert model instance back to dict and verify no loss of data
search_index_definition_model_json2 = search_index_definition_model.to_dict()
assert search_index_definition_model_json2 == search_index_definition_model_json
class TestModel_SearchIndexInfo():
"""
Test Class for SearchIndexInfo
"""
def test_search_index_info_serialization(self):
"""
Test serialization/deserialization for SearchIndexInfo
"""
# Construct a json representation of a SearchIndexInfo model
search_index_info_model_json = {}
search_index_info_model_json['committed_seq'] = 26
search_index_info_model_json['disk_size'] = 0
search_index_info_model_json['doc_count'] = 0
search_index_info_model_json['doc_del_count'] = 0
search_index_info_model_json['pending_seq'] = 26
# Construct a model instance of SearchIndexInfo by calling from_dict on the json representation
search_index_info_model = SearchIndexInfo.from_dict(search_index_info_model_json)
assert search_index_info_model != False
# Construct a model instance of SearchIndexInfo by calling from_dict on the json representation
search_index_info_model_dict = SearchIndexInfo.from_dict(search_index_info_model_json).__dict__
search_index_info_model2 = SearchIndexInfo(**search_index_info_model_dict)
# Verify the model instances are equivalent
assert search_index_info_model == search_index_info_model2
# Convert model instance back to dict and verify no loss of data
search_index_info_model_json2 = search_index_info_model.to_dict()
assert search_index_info_model_json2 == search_index_info_model_json
class TestModel_SearchInfoResult():
"""
Test Class for SearchInfoResult
"""
def test_search_info_result_serialization(self):
"""
Test serialization/deserialization for SearchInfoResult
"""
# Construct dict forms of any model objects needed in order to build this model.
search_index_info_model = {} # SearchIndexInfo
search_index_info_model['committed_seq'] = 26
search_index_info_model['disk_size'] = 0
search_index_info_model['doc_count'] = 0
search_index_info_model['doc_del_count'] = 0
search_index_info_model['pending_seq'] = 26
# Construct a json representation of a SearchInfoResult model
search_info_result_model_json = {}
search_info_result_model_json['name'] = 'testString'
search_info_result_model_json['search_index'] = search_index_info_model
# Construct a model instance of SearchInfoResult by calling from_dict on the json representation
search_info_result_model = SearchInfoResult.from_dict(search_info_result_model_json)
assert search_info_result_model != False
# Construct a model instance of SearchInfoResult by calling from_dict on the json representation
search_info_result_model_dict = SearchInfoResult.from_dict(search_info_result_model_json).__dict__
search_info_result_model2 = SearchInfoResult(**search_info_result_model_dict)
# Verify the model instances are equivalent
assert search_info_result_model == search_info_result_model2
# Convert model instance back to dict and verify no loss of data
search_info_result_model_json2 = search_info_result_model.to_dict()
assert search_info_result_model_json2 == search_info_result_model_json
class TestModel_SearchResult():
"""
Test Class for SearchResult
"""
def test_search_result_serialization(self):
"""
Test serialization/deserialization for SearchResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
search_result_row_model = {} # SearchResultRow
search_result_row_model['doc'] = document_model
search_result_row_model['fields'] = {}
search_result_row_model['highlights'] = {}
search_result_row_model['id'] = 'testString'
search_result_properties_model = {} # SearchResultProperties
search_result_properties_model['total_rows'] = 0
search_result_properties_model['bookmark'] = 'testString'
search_result_properties_model['by'] = 'testString'
search_result_properties_model['counts'] = {}
search_result_properties_model['ranges'] = {}
search_result_properties_model['rows'] = [search_result_row_model]
# Construct a json representation of a SearchResult model
search_result_model_json = {}
search_result_model_json['total_rows'] = 0
search_result_model_json['bookmark'] = 'testString'
search_result_model_json['by'] = 'testString'
search_result_model_json['counts'] = {}
search_result_model_json['ranges'] = {}
search_result_model_json['rows'] = [search_result_row_model]
search_result_model_json['groups'] = [search_result_properties_model]
# Construct a model instance of SearchResult by calling from_dict on the json representation
search_result_model = SearchResult.from_dict(search_result_model_json)
assert search_result_model != False
# Construct a model instance of SearchResult by calling from_dict on the json representation
search_result_model_dict = SearchResult.from_dict(search_result_model_json).__dict__
search_result_model2 = SearchResult(**search_result_model_dict)
# Verify the model instances are equivalent
assert search_result_model == search_result_model2
# Convert model instance back to dict and verify no loss of data
search_result_model_json2 = search_result_model.to_dict()
assert search_result_model_json2 == search_result_model_json
class TestModel_SearchResultProperties():
"""
Test Class for SearchResultProperties
"""
def test_search_result_properties_serialization(self):
"""
Test serialization/deserialization for SearchResultProperties
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
search_result_row_model = {} # SearchResultRow
search_result_row_model['doc'] = document_model
search_result_row_model['fields'] = {}
search_result_row_model['highlights'] = {}
search_result_row_model['id'] = 'testString'
# Construct a json representation of a SearchResultProperties model
search_result_properties_model_json = {}
search_result_properties_model_json['total_rows'] = 0
search_result_properties_model_json['bookmark'] = 'testString'
search_result_properties_model_json['by'] = 'testString'
search_result_properties_model_json['counts'] = {}
search_result_properties_model_json['ranges'] = {}
search_result_properties_model_json['rows'] = [search_result_row_model]
# Construct a model instance of SearchResultProperties by calling from_dict on the json representation
search_result_properties_model = SearchResultProperties.from_dict(search_result_properties_model_json)
assert search_result_properties_model != False
# Construct a model instance of SearchResultProperties by calling from_dict on the json representation
search_result_properties_model_dict = SearchResultProperties.from_dict(search_result_properties_model_json).__dict__
search_result_properties_model2 = SearchResultProperties(**search_result_properties_model_dict)
# Verify the model instances are equivalent
assert search_result_properties_model == search_result_properties_model2
# Convert model instance back to dict and verify no loss of data
search_result_properties_model_json2 = search_result_properties_model.to_dict()
assert search_result_properties_model_json2 == search_result_properties_model_json
class TestModel_SearchResultRow():
"""
Test Class for SearchResultRow
"""
def test_search_result_row_serialization(self):
"""
Test serialization/deserialization for SearchResultRow
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a SearchResultRow model
search_result_row_model_json = {}
search_result_row_model_json['doc'] = document_model
search_result_row_model_json['fields'] = {}
search_result_row_model_json['highlights'] = {}
search_result_row_model_json['id'] = 'testString'
# Construct a model instance of SearchResultRow by calling from_dict on the json representation
search_result_row_model = SearchResultRow.from_dict(search_result_row_model_json)
assert search_result_row_model != False
# Construct a model instance of SearchResultRow by calling from_dict on the json representation
search_result_row_model_dict = SearchResultRow.from_dict(search_result_row_model_json).__dict__
search_result_row_model2 = SearchResultRow(**search_result_row_model_dict)
# Verify the model instances are equivalent
assert search_result_row_model == search_result_row_model2
# Convert model instance back to dict and verify no loss of data
search_result_row_model_json2 = search_result_row_model.to_dict()
assert search_result_row_model_json2 == search_result_row_model_json
class TestModel_Security():
"""
Test Class for Security
"""
def test_security_serialization(self):
"""
Test serialization/deserialization for Security
"""
# Construct dict forms of any model objects needed in order to build this model.
security_object_model = {} # SecurityObject
security_object_model['names'] = ['testString']
security_object_model['roles'] = ['testString']
# Construct a json representation of a Security model
security_model_json = {}
security_model_json['admins'] = security_object_model
security_model_json['members'] = security_object_model
security_model_json['cloudant'] = {}
security_model_json['couchdb_auth_only'] = True
# Construct a model instance of Security by calling from_dict on the json representation
security_model = Security.from_dict(security_model_json)
assert security_model != False
# Construct a model instance of Security by calling from_dict on the json representation
security_model_dict = Security.from_dict(security_model_json).__dict__
security_model2 = Security(**security_model_dict)
# Verify the model instances are equivalent
assert security_model == security_model2
# Convert model instance back to dict and verify no loss of data
security_model_json2 = security_model.to_dict()
assert security_model_json2 == security_model_json
class TestModel_SecurityObject():
"""
Test Class for SecurityObject
"""
def test_security_object_serialization(self):
"""
Test serialization/deserialization for SecurityObject
"""
# Construct a json representation of a SecurityObject model
security_object_model_json = {}
security_object_model_json['names'] = ['testString']
security_object_model_json['roles'] = ['testString']
# Construct a model instance of SecurityObject by calling from_dict on the json representation
security_object_model = SecurityObject.from_dict(security_object_model_json)
assert security_object_model != False
# Construct a model instance of SecurityObject by calling from_dict on the json representation
security_object_model_dict = SecurityObject.from_dict(security_object_model_json).__dict__
security_object_model2 = SecurityObject(**security_object_model_dict)
# Verify the model instances are equivalent
assert security_object_model == security_object_model2
# Convert model instance back to dict and verify no loss of data
security_object_model_json2 = security_object_model.to_dict()
assert security_object_model_json2 == security_object_model_json
class TestModel_ServerInformation():
"""
Test Class for ServerInformation
"""
def test_server_information_serialization(self):
"""
Test serialization/deserialization for ServerInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
server_vendor_model = {} # ServerVendor
server_vendor_model['name'] = 'testString'
server_vendor_model['variant'] = 'testString'
server_vendor_model['version'] = 'testString'
# Construct a json representation of a ServerInformation model
server_information_model_json = {}
server_information_model_json['couchdb'] = 'testString'
server_information_model_json['features'] = ['testString']
server_information_model_json['vendor'] = server_vendor_model
server_information_model_json['version'] = 'testString'
server_information_model_json['features_flags'] = ['testString']
# Construct a model instance of ServerInformation by calling from_dict on the json representation
server_information_model = ServerInformation.from_dict(server_information_model_json)
assert server_information_model != False
# Construct a model instance of ServerInformation by calling from_dict on the json representation
server_information_model_dict = ServerInformation.from_dict(server_information_model_json).__dict__
server_information_model2 = ServerInformation(**server_information_model_dict)
# Verify the model instances are equivalent
assert server_information_model == server_information_model2
# Convert model instance back to dict and verify no loss of data
server_information_model_json2 = server_information_model.to_dict()
assert server_information_model_json2 == server_information_model_json
class TestModel_ServerVendor():
"""
Test Class for ServerVendor
"""
def test_server_vendor_serialization(self):
"""
Test serialization/deserialization for ServerVendor
"""
# Construct a json representation of a ServerVendor model
server_vendor_model_json = {}
server_vendor_model_json['name'] = 'testString'
server_vendor_model_json['variant'] = 'testString'
server_vendor_model_json['version'] = 'testString'
# Construct a model instance of ServerVendor by calling from_dict on the json representation
server_vendor_model = ServerVendor.from_dict(server_vendor_model_json)
assert server_vendor_model != False
# Construct a model instance of ServerVendor by calling from_dict on the json representation
server_vendor_model_dict = ServerVendor.from_dict(server_vendor_model_json).__dict__
server_vendor_model2 = ServerVendor(**server_vendor_model_dict)
# Verify the model instances are equivalent
assert server_vendor_model == server_vendor_model2
# Convert model instance back to dict and verify no loss of data
server_vendor_model_json2 = server_vendor_model.to_dict()
assert server_vendor_model_json2 == server_vendor_model_json
class TestModel_SessionAuthentication():
"""
Test Class for SessionAuthentication
"""
def test_session_authentication_serialization(self):
"""
Test serialization/deserialization for SessionAuthentication
"""
# Construct a json representation of a SessionAuthentication model
session_authentication_model_json = {}
session_authentication_model_json['authenticated'] = 'testString'
session_authentication_model_json['authentication_db'] = 'testString'
session_authentication_model_json['authentication_handlers'] = ['testString']
# Construct a model instance of SessionAuthentication by calling from_dict on the json representation
session_authentication_model = SessionAuthentication.from_dict(session_authentication_model_json)
assert session_authentication_model != False
# Construct a model instance of SessionAuthentication by calling from_dict on the json representation
session_authentication_model_dict = SessionAuthentication.from_dict(session_authentication_model_json).__dict__
session_authentication_model2 = SessionAuthentication(**session_authentication_model_dict)
# Verify the model instances are equivalent
assert session_authentication_model == session_authentication_model2
# Convert model instance back to dict and verify no loss of data
session_authentication_model_json2 = session_authentication_model.to_dict()
assert session_authentication_model_json2 == session_authentication_model_json
class TestModel_SessionInformation():
"""
Test Class for SessionInformation
"""
def test_session_information_serialization(self):
"""
Test serialization/deserialization for SessionInformation
"""
# Construct dict forms of any model objects needed in order to build this model.
session_authentication_model = {} # SessionAuthentication
session_authentication_model['authenticated'] = 'testString'
session_authentication_model['authentication_db'] = 'testString'
session_authentication_model['authentication_handlers'] = ['testString']
user_context_model = {} # UserContext
user_context_model['db'] = 'testString'
user_context_model['name'] = 'testString'
user_context_model['roles'] = ['_reader']
# Construct a json representation of a SessionInformation model
session_information_model_json = {}
session_information_model_json['ok'] = True
session_information_model_json['info'] = session_authentication_model
session_information_model_json['userCtx'] = user_context_model
# Construct a model instance of SessionInformation by calling from_dict on the json representation
session_information_model = SessionInformation.from_dict(session_information_model_json)
assert session_information_model != False
# Construct a model instance of SessionInformation by calling from_dict on the json representation
session_information_model_dict = SessionInformation.from_dict(session_information_model_json).__dict__
session_information_model2 = SessionInformation(**session_information_model_dict)
# Verify the model instances are equivalent
assert session_information_model == session_information_model2
# Convert model instance back to dict and verify no loss of data
session_information_model_json2 = session_information_model.to_dict()
assert session_information_model_json2 == session_information_model_json
class TestModel_ShardsInformation():
"""
Test Class for ShardsInformation
"""
def test_shards_information_serialization(self):
"""
Test serialization/deserialization for ShardsInformation
"""
# Construct a json representation of a ShardsInformation model
shards_information_model_json = {}
shards_information_model_json['shards'] = {}
# Construct a model instance of ShardsInformation by calling from_dict on the json representation
shards_information_model = ShardsInformation.from_dict(shards_information_model_json)
assert shards_information_model != False
# Construct a model instance of ShardsInformation by calling from_dict on the json representation
shards_information_model_dict = ShardsInformation.from_dict(shards_information_model_json).__dict__
shards_information_model2 = ShardsInformation(**shards_information_model_dict)
# Verify the model instances are equivalent
assert shards_information_model == shards_information_model2
# Convert model instance back to dict and verify no loss of data
shards_information_model_json2 = shards_information_model.to_dict()
assert shards_information_model_json2 == shards_information_model_json
class TestModel_ThroughputInformation():
"""
Test Class for ThroughputInformation
"""
def test_throughput_information_serialization(self):
"""
Test serialization/deserialization for ThroughputInformation
"""
# Construct a json representation of a ThroughputInformation model
throughput_information_model_json = {}
throughput_information_model_json['blocks'] = 0
throughput_information_model_json['query'] = 0
throughput_information_model_json['read'] = 0
throughput_information_model_json['write'] = 0
# Construct a model instance of ThroughputInformation by calling from_dict on the json representation
throughput_information_model = ThroughputInformation.from_dict(throughput_information_model_json)
assert throughput_information_model != False
# Construct a model instance of ThroughputInformation by calling from_dict on the json representation
throughput_information_model_dict = ThroughputInformation.from_dict(throughput_information_model_json).__dict__
throughput_information_model2 = ThroughputInformation(**throughput_information_model_dict)
# Verify the model instances are equivalent
assert throughput_information_model == throughput_information_model2
# Convert model instance back to dict and verify no loss of data
throughput_information_model_json2 = throughput_information_model.to_dict()
assert throughput_information_model_json2 == throughput_information_model_json
class TestModel_UpInformation():
"""
Test Class for UpInformation
"""
def test_up_information_serialization(self):
"""
Test serialization/deserialization for UpInformation
"""
# Construct a json representation of a UpInformation model
up_information_model_json = {}
up_information_model_json['seeds'] = { 'foo': 'bar' }
up_information_model_json['status'] = 'maintenance_mode'
# Construct a model instance of UpInformation by calling from_dict on the json representation
up_information_model = UpInformation.from_dict(up_information_model_json)
assert up_information_model != False
# Construct a model instance of UpInformation by calling from_dict on the json representation
up_information_model_dict = UpInformation.from_dict(up_information_model_json).__dict__
up_information_model2 = UpInformation(**up_information_model_dict)
# Verify the model instances are equivalent
assert up_information_model == up_information_model2
# Convert model instance back to dict and verify no loss of data
up_information_model_json2 = up_information_model.to_dict()
assert up_information_model_json2 == up_information_model_json
class TestModel_UserContext():
"""
Test Class for UserContext
"""
def test_user_context_serialization(self):
"""
Test serialization/deserialization for UserContext
"""
# Construct a json representation of a UserContext model
user_context_model_json = {}
user_context_model_json['db'] = 'testString'
user_context_model_json['name'] = 'testString'
user_context_model_json['roles'] = ['_reader']
# Construct a model instance of UserContext by calling from_dict on the json representation
user_context_model = UserContext.from_dict(user_context_model_json)
assert user_context_model != False
# Construct a model instance of UserContext by calling from_dict on the json representation
user_context_model_dict = UserContext.from_dict(user_context_model_json).__dict__
user_context_model2 = UserContext(**user_context_model_dict)
# Verify the model instances are equivalent
assert user_context_model == user_context_model2
# Convert model instance back to dict and verify no loss of data
user_context_model_json2 = user_context_model.to_dict()
assert user_context_model_json2 == user_context_model_json
class TestModel_UuidsResult():
"""
Test Class for UuidsResult
"""
def test_uuids_result_serialization(self):
"""
Test serialization/deserialization for UuidsResult
"""
# Construct a json representation of a UuidsResult model
uuids_result_model_json = {}
uuids_result_model_json['uuids'] = ['testString']
# Construct a model instance of UuidsResult by calling from_dict on the json representation
uuids_result_model = UuidsResult.from_dict(uuids_result_model_json)
assert uuids_result_model != False
# Construct a model instance of UuidsResult by calling from_dict on the json representation
uuids_result_model_dict = UuidsResult.from_dict(uuids_result_model_json).__dict__
uuids_result_model2 = UuidsResult(**uuids_result_model_dict)
# Verify the model instances are equivalent
assert uuids_result_model == uuids_result_model2
# Convert model instance back to dict and verify no loss of data
uuids_result_model_json2 = uuids_result_model.to_dict()
assert uuids_result_model_json2 == uuids_result_model_json
class TestModel_ViewQueriesResult():
"""
Test Class for ViewQueriesResult
"""
def test_view_queries_result_serialization(self):
"""
Test serialization/deserialization for ViewQueriesResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
view_result_row_model = {} # ViewResultRow
view_result_row_model['caused_by'] = 'testString'
view_result_row_model['error'] = 'testString'
view_result_row_model['reason'] = 'testString'
view_result_row_model['doc'] = document_model
view_result_row_model['id'] = 'testString'
view_result_row_model['key'] = 'testString'
view_result_row_model['value'] = 'testString'
view_result_model = {} # ViewResult
view_result_model['total_rows'] = 0
view_result_model['update_seq'] = 'testString'
view_result_model['rows'] = [view_result_row_model]
# Construct a json representation of a ViewQueriesResult model
view_queries_result_model_json = {}
view_queries_result_model_json['results'] = [view_result_model]
# Construct a model instance of ViewQueriesResult by calling from_dict on the json representation
view_queries_result_model = ViewQueriesResult.from_dict(view_queries_result_model_json)
assert view_queries_result_model != False
# Construct a model instance of ViewQueriesResult by calling from_dict on the json representation
view_queries_result_model_dict = ViewQueriesResult.from_dict(view_queries_result_model_json).__dict__
view_queries_result_model2 = ViewQueriesResult(**view_queries_result_model_dict)
# Verify the model instances are equivalent
assert view_queries_result_model == view_queries_result_model2
# Convert model instance back to dict and verify no loss of data
view_queries_result_model_json2 = view_queries_result_model.to_dict()
assert view_queries_result_model_json2 == view_queries_result_model_json
class TestModel_ViewQuery():
"""
Test Class for ViewQuery
"""
def test_view_query_serialization(self):
"""
Test serialization/deserialization for ViewQuery
"""
# Construct a json representation of a ViewQuery model
view_query_model_json = {}
view_query_model_json['att_encoding_info'] = False
view_query_model_json['attachments'] = False
view_query_model_json['conflicts'] = False
view_query_model_json['descending'] = False
view_query_model_json['include_docs'] = False
view_query_model_json['inclusive_end'] = True
view_query_model_json['limit'] = 0
view_query_model_json['skip'] = 0
view_query_model_json['update_seq'] = False
view_query_model_json['endkey'] = 'testString'
view_query_model_json['endkey_docid'] = 'testString'
view_query_model_json['group'] = False
view_query_model_json['group_level'] = 1
view_query_model_json['key'] = 'testString'
view_query_model_json['keys'] = ['testString']
view_query_model_json['reduce'] = True
view_query_model_json['stable'] = False
view_query_model_json['startkey'] = 'testString'
view_query_model_json['startkey_docid'] = 'testString'
view_query_model_json['update'] = 'true'
# Construct a model instance of ViewQuery by calling from_dict on the json representation
view_query_model = ViewQuery.from_dict(view_query_model_json)
assert view_query_model != False
# Construct a model instance of ViewQuery by calling from_dict on the json representation
view_query_model_dict = ViewQuery.from_dict(view_query_model_json).__dict__
view_query_model2 = ViewQuery(**view_query_model_dict)
# Verify the model instances are equivalent
assert view_query_model == view_query_model2
# Convert model instance back to dict and verify no loss of data
view_query_model_json2 = view_query_model.to_dict()
assert view_query_model_json2 == view_query_model_json
class TestModel_ViewResult():
"""
Test Class for ViewResult
"""
def test_view_result_serialization(self):
"""
Test serialization/deserialization for ViewResult
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
view_result_row_model = {} # ViewResultRow
view_result_row_model['caused_by'] = 'testString'
view_result_row_model['error'] = 'testString'
view_result_row_model['reason'] = 'testString'
view_result_row_model['doc'] = document_model
view_result_row_model['id'] = 'testString'
view_result_row_model['key'] = 'testString'
view_result_row_model['value'] = 'testString'
# Construct a json representation of a ViewResult model
view_result_model_json = {}
view_result_model_json['total_rows'] = 0
view_result_model_json['update_seq'] = 'testString'
view_result_model_json['rows'] = [view_result_row_model]
# Construct a model instance of ViewResult by calling from_dict on the json representation
view_result_model = ViewResult.from_dict(view_result_model_json)
assert view_result_model != False
# Construct a model instance of ViewResult by calling from_dict on the json representation
view_result_model_dict = ViewResult.from_dict(view_result_model_json).__dict__
view_result_model2 = ViewResult(**view_result_model_dict)
# Verify the model instances are equivalent
assert view_result_model == view_result_model2
# Convert model instance back to dict and verify no loss of data
view_result_model_json2 = view_result_model.to_dict()
assert view_result_model_json2 == view_result_model_json
class TestModel_ViewResultRow():
"""
Test Class for ViewResultRow
"""
def test_view_result_row_serialization(self):
"""
Test serialization/deserialization for ViewResultRow
"""
# Construct dict forms of any model objects needed in order to build this model.
attachment_model = {} # Attachment
attachment_model['content_type'] = 'testString'
attachment_model['data'] = 'VGhpcyBpcyBhIG1vY2sgYnl0ZSBhcnJheSB2YWx1ZS4='
attachment_model['digest'] = 'testString'
attachment_model['encoded_length'] = 0
attachment_model['encoding'] = 'testString'
attachment_model['follows'] = True
attachment_model['length'] = 0
attachment_model['revpos'] = 1
attachment_model['stub'] = True
revisions_model = {} # Revisions
revisions_model['ids'] = ['testString']
revisions_model['start'] = 1
document_revision_status_model = {} # DocumentRevisionStatus
document_revision_status_model['rev'] = 'testString'
document_revision_status_model['status'] = 'available'
document_model = {} # Document
document_model['_attachments'] = {}
document_model['_conflicts'] = ['testString']
document_model['_deleted'] = True
document_model['_deleted_conflicts'] = ['testString']
document_model['_id'] = 'testString'
document_model['_local_seq'] = 'testString'
document_model['_rev'] = 'testString'
document_model['_revisions'] = revisions_model
document_model['_revs_info'] = [document_revision_status_model]
document_model['foo'] = 'testString'
# Construct a json representation of a ViewResultRow model
view_result_row_model_json = {}
view_result_row_model_json['caused_by'] = 'testString'
view_result_row_model_json['error'] = 'testString'
view_result_row_model_json['reason'] = 'testString'
view_result_row_model_json['doc'] = document_model
view_result_row_model_json['id'] = 'testString'
view_result_row_model_json['key'] = 'testString'
view_result_row_model_json['value'] = 'testString'
# Construct a model instance of ViewResultRow by calling from_dict on the json representation
view_result_row_model = ViewResultRow.from_dict(view_result_row_model_json)
assert view_result_row_model != False
# Construct a model instance of ViewResultRow by calling from_dict on the json representation
view_result_row_model_dict = ViewResultRow.from_dict(view_result_row_model_json).__dict__
view_result_row_model2 = ViewResultRow(**view_result_row_model_dict)
# Verify the model instances are equivalent
assert view_result_row_model == view_result_row_model2
# Convert model instance back to dict and verify no loss of data
view_result_row_model_json2 = view_result_row_model.to_dict()
assert view_result_row_model_json2 == view_result_row_model_json
class TestModel_GeoJsonGeometry():
"""
Test Class for GeoJsonGeometry
"""
def test_geo_json_geometry_serialization(self):
"""
Test serialization/deserialization for GeoJsonGeometry
"""
# Construct a json representation of a GeoJsonGeometry model
geo_json_geometry_model_json = {}
geo_json_geometry_model_json['type'] = 'Point'
geo_json_geometry_model_json['coordinates'] = ['testString']
# Construct a model instance of GeoJsonGeometry by calling from_dict on the json representation
geo_json_geometry_model = GeoJsonGeometry.from_dict(geo_json_geometry_model_json)
assert geo_json_geometry_model != False
# Construct a model instance of GeoJsonGeometry by calling from_dict on the json representation
geo_json_geometry_model_dict = GeoJsonGeometry.from_dict(geo_json_geometry_model_json).__dict__
geo_json_geometry_model2 = GeoJsonGeometry(**geo_json_geometry_model_dict)
# Verify the model instances are equivalent
assert geo_json_geometry_model == geo_json_geometry_model2
# Convert model instance back to dict and verify no loss of data
geo_json_geometry_model_json2 = geo_json_geometry_model.to_dict()
assert geo_json_geometry_model_json2 == geo_json_geometry_model_json
class TestModel_GeoJsonGeometryCollection():
"""
Test Class for GeoJsonGeometryCollection
"""
def test_geo_json_geometry_collection_serialization(self):
"""
Test serialization/deserialization for GeoJsonGeometryCollection
"""
# Construct dict forms of any model objects needed in order to build this model.
geo_json_geometry_model = {} # GeoJsonGeometry
geo_json_geometry_model['type'] = 'Point'
geo_json_geometry_model['coordinates'] = ['testString']
# Construct a json representation of a GeoJsonGeometryCollection model
geo_json_geometry_collection_model_json = {}
geo_json_geometry_collection_model_json['type'] = 'Point'
geo_json_geometry_collection_model_json['geometries'] = [geo_json_geometry_model]
# Construct a model instance of GeoJsonGeometryCollection by calling from_dict on the json representation
geo_json_geometry_collection_model = GeoJsonGeometryCollection.from_dict(geo_json_geometry_collection_model_json)
assert geo_json_geometry_collection_model != False
# Construct a model instance of GeoJsonGeometryCollection by calling from_dict on the json representation
geo_json_geometry_collection_model_dict = GeoJsonGeometryCollection.from_dict(geo_json_geometry_collection_model_json).__dict__
geo_json_geometry_collection_model2 = GeoJsonGeometryCollection(**geo_json_geometry_collection_model_dict)
# Verify the model instances are equivalent
assert geo_json_geometry_collection_model == geo_json_geometry_collection_model2
# Convert model instance back to dict and verify no loss of data
geo_json_geometry_collection_model_json2 = geo_json_geometry_collection_model.to_dict()
assert geo_json_geometry_collection_model_json2 == geo_json_geometry_collection_model_json
# endregion
##############################################################################
# End of Model Tests
##############################################################################
|
from collections import namedtuple
from functools import update_wrapper
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
List,
Optional,
Union,
cast,
overload,
)
from dagster import check
from dagster.core.definitions.config import is_callable_valid_config_arg
from dagster.core.definitions.configurable import AnonymousConfigurableDefinition
from dagster.core.errors import (
DagsterInvalidDefinitionError,
DagsterInvalidInvocationError,
DagsterUnknownResourceError,
)
from dagster.seven import funcsigs
from dagster.utils.backcompat import experimental_arg_warning
from ..decorator_utils import (
get_function_params,
is_required_param,
positional_arg_name_list,
validate_expected_params,
)
from .definition_config_schema import (
IDefinitionConfigSchema,
convert_user_facing_definition_config_schema,
)
from .resource_invocation import resource_invocation_result
if TYPE_CHECKING:
from dagster.core.execution.resources_init import InitResourceContext
def is_context_provided(params: List[funcsigs.Parameter]) -> bool:
return len(params) >= 1
class ResourceDefinition(AnonymousConfigurableDefinition):
"""Core class for defining resources.
Resources are scoped ways to make external resources (like database connections) available to
during job execution and to clean up after execution resolves.
If resource_fn yields once rather than returning (in the manner of functions decorable with
:py:func:`@contextlib.contextmanager <python:contextlib.contextmanager>`) then the body of the
function after the yield will be run after execution resolves, allowing users to write their
own teardown/cleanup logic.
Depending on your executor, resources may be instantiated and cleaned up more than once in a
job execution.
Args:
resource_fn (Callable[[InitResourceContext], Any]): User-provided function to instantiate
the resource, which will be made available to executions keyed on the
``context.resources`` object.
config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check
that config provided for the resource matches this schema and fail if it does not. If
not set, Dagster will accept any config provided for the resource.
description (Optional[str]): A human-readable description of the resource.
required_resource_keys: (Optional[Set[str]]) Keys for the resources required by this
resource. A DagsterInvariantViolationError will be raised during initialization if
dependencies are cyclic.
version (Optional[str]): (Experimental) The version of the resource's definition fn. Two
wrapped resource functions should only have the same version if they produce the same
resource definition when provided with the same inputs.
"""
def __init__(
self,
resource_fn: Callable[["InitResourceContext"], Any],
config_schema: Optional[Union[Any, IDefinitionConfigSchema]] = None,
description: Optional[str] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
version: Optional[str] = None,
):
self._resource_fn = check.callable_param(resource_fn, "resource_fn")
self._config_schema = convert_user_facing_definition_config_schema(config_schema)
self._description = check.opt_str_param(description, "description")
self._required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys"
)
self._version = check.opt_str_param(version, "version")
if version:
experimental_arg_warning("version", "ResourceDefinition.__init__")
@property
def resource_fn(self) -> Callable[..., Any]:
return self._resource_fn
@property
def config_schema(self) -> IDefinitionConfigSchema:
return self._config_schema
@property
def description(self) -> Optional[str]:
return self._description
@property
def version(self) -> Optional[str]:
return self._version
@property
def required_resource_keys(self) -> AbstractSet[str]:
return self._required_resource_keys
@staticmethod
def none_resource(description: Optional[str] = None) -> "ResourceDefinition":
"""A helper function that returns a none resource.
Args:
description ([Optional[str]]): The description of the resource. Defaults to None.
Returns:
[ResourceDefinition]: A resource that does nothing.
"""
return ResourceDefinition.hardcoded_resource(value=None, description=description)
@staticmethod
def hardcoded_resource(value: Any, description: Optional[str] = None) -> "ResourceDefinition":
"""A helper function that creates a ``ResourceDefinition`` with a hardcoded object.
Args:
value (Any): The value that will be accessible via context.resources.resource_name.
description ([Optional[str]]): The description of the resource. Defaults to None.
Returns:
[ResourceDefinition]: A hardcoded resource.
"""
return ResourceDefinition(resource_fn=lambda _init_context: value, description=description)
@staticmethod
def mock_resource(description: Optional[str] = None) -> "ResourceDefinition":
"""A helper function that creates a ``ResourceDefinition`` which wraps a ``mock.MagicMock``.
Args:
description ([Optional[str]]): The description of the resource. Defaults to None.
Returns:
[ResourceDefinition]: A resource that creates the magic methods automatically and helps
you mock existing resources.
"""
from unittest import mock
return ResourceDefinition(
resource_fn=lambda _init_context: mock.MagicMock(), description=description
)
@staticmethod
def string_resource(description: Optional[str] = None) -> "ResourceDefinition":
return ResourceDefinition(
resource_fn=lambda init_context: init_context.resource_config,
config_schema=str,
description=description,
)
def copy_for_configured(
self, description: Optional[str], config_schema: IDefinitionConfigSchema, _
) -> "ResourceDefinition":
return ResourceDefinition(
config_schema=config_schema,
description=description or self.description,
resource_fn=self.resource_fn,
required_resource_keys=self.required_resource_keys,
version=self.version,
)
def __call__(self, *args, **kwargs):
from dagster.core.execution.resources_init import InitResourceContext
context_provided = is_context_provided(get_function_params(self.resource_fn))
if context_provided:
if len(args) + len(kwargs) == 0:
raise DagsterInvalidInvocationError(
"Resource initialization function has context argument, but no context was provided "
"when invoking."
)
if len(args) + len(kwargs) > 1:
raise DagsterInvalidInvocationError(
"Initialization of resource received multiple arguments. Only a first "
"positional context parameter should be provided when invoking."
)
context_param_name = get_function_params(self.resource_fn)[0].name
if args:
check.opt_inst_param(args[0], context_param_name, InitResourceContext)
return resource_invocation_result(self, args[0])
else:
if context_param_name not in kwargs:
raise DagsterInvalidInvocationError(
f"Resource initialization expected argument '{context_param_name}'."
)
check.opt_inst_param(
kwargs[context_param_name], context_param_name, InitResourceContext
)
return resource_invocation_result(self, kwargs[context_param_name])
else:
return resource_invocation_result(self, None)
class _ResourceDecoratorCallable:
def __init__(
self,
config_schema: Optional[Dict[str, Any]] = None,
description: Optional[str] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
version: Optional[str] = None,
):
self.config_schema = config_schema # checked by underlying definition
self.description = check.opt_str_param(description, "description")
self.version = check.opt_str_param(version, "version")
self.required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys"
)
def __call__(self, resource_fn: Callable[["InitResourceContext"], Any]):
check.callable_param(resource_fn, "resource_fn")
any_name = ["*"] if is_context_provided(get_function_params(resource_fn)) else []
params = get_function_params(resource_fn)
missing_positional = validate_expected_params(params, any_name)
if missing_positional:
raise DagsterInvalidDefinitionError(
f"@resource decorated function '{resource_fn.__name__}' expects a single "
"positional argument."
)
extras = params[len(any_name) :]
required_extras = list(filter(is_required_param, extras))
if required_extras:
raise DagsterInvalidDefinitionError(
f"@resource decorated function '{resource_fn.__name__}' expects only a single positional required argument. "
f"Got required extra params {", ".join(positional_arg_name_list(required_extras))}"
)
resource_def = ResourceDefinition(
resource_fn=resource_fn,
config_schema=self.config_schema,
description=self.description,
version=self.version,
required_resource_keys=self.required_resource_keys,
)
update_wrapper(resource_def, wrapped=resource_fn)
return resource_def
@overload
def resource(config_schema=Callable[["InitResourceContext"], Any]) -> ResourceDefinition:
...
@overload
def resource(
config_schema: Optional[Dict[str, Any]] = None,
description: Optional[str] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
version=None,
) -> Callable[[Callable[["InitResourceContext"], Any]], "ResourceDefinition"]:
...
def resource(
config_schema: Optional[
Union[Callable[["InitResourceContext"], Any], IDefinitionConfigSchema, Dict[str, Any]]
] = None,
description: Optional[str] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
version=None,
) -> Union[
Callable[[Callable[["InitResourceContext"], Any]], "ResourceDefinition"], "ResourceDefinition"
]:
"""Define a resource.
The decorated function should accept an :py:class:`InitResourceContext` and return an instance of
the resource. This function will become the ``resource_fn`` of an underlying
:py:class:`ResourceDefinition`.
If the decorated function yields once rather than returning (in the manner of functions
decorable with :py:func:`@contextlib.contextmanager <python:contextlib.contextmanager>`) then
the body of the function after the yield will be run after execution resolves, allowing users
to write their own teardown/cleanup logic.
Args:
config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in
`init_context.resource_config`. If not set, Dagster will accept any config provided.
description(Optional[str]): A human-readable description of the resource.
version (Optional[str]): (Experimental) The version of a resource function. Two wrapped
resource functions should only have the same version if they produce the same resource
definition when provided with the same inputs.
required_resource_keys (Optional[Set[str]]): Keys for the resources required by this resource.
"""
# This case is for when decorator is used bare, without arguments.
# E.g. @resource versus @resource()
if callable(config_schema) and not is_callable_valid_config_arg(config_schema):
return _ResourceDecoratorCallable()(config_schema)
def _wrap(resource_fn: Callable[["InitResourceContext"], Any]) -> "ResourceDefinition":
return _ResourceDecoratorCallable(
config_schema=cast(Optional[Dict[str, Any]], config_schema),
description=description,
required_resource_keys=required_resource_keys,
version=version,
)(resource_fn)
return _wrap
class Resources:
"""This class functions as a "tag" that we can use to type the namedtuple returned by
ScopedResourcesBuilder.build(). The way that we create the namedtuple returned by build() is
incompatible with type annotations on its own due to its dynamic attributes, so this tag class
provides a workaround."""
class IContainsGenerator:
"""This class adds an additional tag to indicate that the resources object has at least one
resource that has been yielded from a generator, and thus may require teardown."""
class ScopedResourcesBuilder(
namedtuple("ScopedResourcesBuilder", "resource_instance_dict contains_generator")
):
"""There are concepts in the codebase (e.g. ops, system storage) that receive
only the resources that they have specified in required_resource_keys.
ScopedResourcesBuilder is responsible for dynamically building a class with
only those required resources and returning an instance of that class."""
def __new__(
cls,
resource_instance_dict: Optional[Dict[str, Any]] = None,
contains_generator: Optional[bool] = False,
):
return super(ScopedResourcesBuilder, cls).__new__(
cls,
resource_instance_dict=check.opt_dict_param(
resource_instance_dict, "resource_instance_dict", key_type=str
),
contains_generator=contains_generator,
)
def build(self, required_resource_keys: Optional[AbstractSet[str]]) -> Resources:
"""We dynamically create a type that has the resource keys as properties, to enable dotting into
the resources from a context.
For example, given:
resources = {'foo': <some resource>, 'bar': <some other resource>}
then this will create the type Resource(namedtuple('foo bar'))
and then binds the specified resources into an instance of this object, which can be consumed
as, e.g., context.resources.foo.
"""
required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys", of_type=str
)
# it is possible that the surrounding context does NOT have the required resource keys
# because we are building a context for steps that we are not going to execute (e.g. in the
# resume/retry case, in order to generate copy intermediates events)
resource_instance_dict = {
key: self.resource_instance_dict[key]
for key in required_resource_keys
if key in self.resource_instance_dict
}
# If any of the resources are generators, add the IContainsGenerator subclass to flag that
# this is the case.
if self.contains_generator:
class _ScopedResourcesContainsGenerator(
namedtuple("_ScopedResourcesContainsGenerator", list(resource_instance_dict.keys())), # type: ignore[misc]
Resources,
IContainsGenerator,
):
def __getattr__(self, attr):
raise DagsterUnknownResourceError(attr)
return _ScopedResourcesContainsGenerator(**resource_instance_dict) # type: ignore[call-arg]
else:
class _ScopedResources(
namedtuple("_ScopedResources", list(resource_instance_dict.keys())), # type: ignore[misc]
Resources,
):
def __getattr__(self, attr):
raise DagsterUnknownResourceError(attr)
return _ScopedResources(**resource_instance_dict) # type: ignore[call-arg]
def make_values_resource(**kwargs: Any) -> ResourceDefinition:
"""A helper function that creates a ``ResourceDefinition`` to take in user-defined values.
This is useful for sharing values between ops.
Args:
**kwargs: Arbitrary keyword arguments that will be passed to the config schema of the
returned resource definition. If not set, Dagster will accept any config provided for
the resource.
For example:
.. code-block:: python
@op(required_resource_keys={"globals"})
def my_op(context):
print(context.resources.globals["my_str_var"])
@job(resource_defs={"globals": make_values_resource(my_str_var=str, my_int_var=int)})
def my_job():
my_op()
Returns:
ResourceDefinition: A resource that passes in user-defined values.
"""
return ResourceDefinition(
resource_fn=lambda init_context: init_context.resource_config,
config_schema=kwargs or Any,
)
| from collections import namedtuple
from functools import update_wrapper
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
List,
Optional,
Union,
cast,
overload,
)
from dagster import check
from dagster.core.definitions.config import is_callable_valid_config_arg
from dagster.core.definitions.configurable import AnonymousConfigurableDefinition
from dagster.core.errors import (
DagsterInvalidDefinitionError,
DagsterInvalidInvocationError,
DagsterUnknownResourceError,
)
from dagster.seven import funcsigs
from dagster.utils.backcompat import experimental_arg_warning
from ..decorator_utils import (
get_function_params,
is_required_param,
positional_arg_name_list,
validate_expected_params,
)
from .definition_config_schema import (
IDefinitionConfigSchema,
convert_user_facing_definition_config_schema,
)
from .resource_invocation import resource_invocation_result
if TYPE_CHECKING:
from dagster.core.execution.resources_init import InitResourceContext
def is_context_provided(params: List[funcsigs.Parameter]) -> bool:
return len(params) >= 1
class ResourceDefinition(AnonymousConfigurableDefinition):
"""Core class for defining resources.
Resources are scoped ways to make external resources (like database connections) available to
during job execution and to clean up after execution resolves.
If resource_fn yields once rather than returning (in the manner of functions decorable with
:py:func:`@contextlib.contextmanager <python:contextlib.contextmanager>`) then the body of the
function after the yield will be run after execution resolves, allowing users to write their
own teardown/cleanup logic.
Depending on your executor, resources may be instantiated and cleaned up more than once in a
job execution.
Args:
resource_fn (Callable[[InitResourceContext], Any]): User-provided function to instantiate
the resource, which will be made available to executions keyed on the
``context.resources`` object.
config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check
that config provided for the resource matches this schema and fail if it does not. If
not set, Dagster will accept any config provided for the resource.
description (Optional[str]): A human-readable description of the resource.
required_resource_keys: (Optional[Set[str]]) Keys for the resources required by this
resource. A DagsterInvariantViolationError will be raised during initialization if
dependencies are cyclic.
version (Optional[str]): (Experimental) The version of the resource's definition fn. Two
wrapped resource functions should only have the same version if they produce the same
resource definition when provided with the same inputs.
"""
def __init__(
self,
resource_fn: Callable[["InitResourceContext"], Any],
config_schema: Optional[Union[Any, IDefinitionConfigSchema]] = None,
description: Optional[str] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
version: Optional[str] = None,
):
self._resource_fn = check.callable_param(resource_fn, "resource_fn")
self._config_schema = convert_user_facing_definition_config_schema(config_schema)
self._description = check.opt_str_param(description, "description")
self._required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys"
)
self._version = check.opt_str_param(version, "version")
if version:
experimental_arg_warning("version", "ResourceDefinition.__init__")
@property
def resource_fn(self) -> Callable[..., Any]:
return self._resource_fn
@property
def config_schema(self) -> IDefinitionConfigSchema:
return self._config_schema
@property
def description(self) -> Optional[str]:
return self._description
@property
def version(self) -> Optional[str]:
return self._version
@property
def required_resource_keys(self) -> AbstractSet[str]:
return self._required_resource_keys
@staticmethod
def none_resource(description: Optional[str] = None) -> "ResourceDefinition":
"""A helper function that returns a none resource.
Args:
description ([Optional[str]]): The description of the resource. Defaults to None.
Returns:
[ResourceDefinition]: A resource that does nothing.
"""
return ResourceDefinition.hardcoded_resource(value=None, description=description)
@staticmethod
def hardcoded_resource(value: Any, description: Optional[str] = None) -> "ResourceDefinition":
"""A helper function that creates a ``ResourceDefinition`` with a hardcoded object.
Args:
value (Any): The value that will be accessible via context.resources.resource_name.
description ([Optional[str]]): The description of the resource. Defaults to None.
Returns:
[ResourceDefinition]: A hardcoded resource.
"""
return ResourceDefinition(resource_fn=lambda _init_context: value, description=description)
@staticmethod
def mock_resource(description: Optional[str] = None) -> "ResourceDefinition":
"""A helper function that creates a ``ResourceDefinition`` which wraps a ``mock.MagicMock``.
Args:
description ([Optional[str]]): The description of the resource. Defaults to None.
Returns:
[ResourceDefinition]: A resource that creates the magic methods automatically and helps
you mock existing resources.
"""
from unittest import mock
return ResourceDefinition(
resource_fn=lambda _init_context: mock.MagicMock(), description=description
)
@staticmethod
def string_resource(description: Optional[str] = None) -> "ResourceDefinition":
return ResourceDefinition(
resource_fn=lambda init_context: init_context.resource_config,
config_schema=str,
description=description,
)
def copy_for_configured(
self, description: Optional[str], config_schema: IDefinitionConfigSchema, _
) -> "ResourceDefinition":
return ResourceDefinition(
config_schema=config_schema,
description=description or self.description,
resource_fn=self.resource_fn,
required_resource_keys=self.required_resource_keys,
version=self.version,
)
def __call__(self, *args, **kwargs):
from dagster.core.execution.resources_init import InitResourceContext
context_provided = is_context_provided(get_function_params(self.resource_fn))
if context_provided:
if len(args) + len(kwargs) == 0:
raise DagsterInvalidInvocationError(
"Resource initialization function has context argument, but no context was provided "
"when invoking."
)
if len(args) + len(kwargs) > 1:
raise DagsterInvalidInvocationError(
"Initialization of resource received multiple arguments. Only a first "
"positional context parameter should be provided when invoking."
)
context_param_name = get_function_params(self.resource_fn)[0].name
if args:
check.opt_inst_param(args[0], context_param_name, InitResourceContext)
return resource_invocation_result(self, args[0])
else:
if context_param_name not in kwargs:
raise DagsterInvalidInvocationError(
f"Resource initialization expected argument '{context_param_name}'."
)
check.opt_inst_param(
kwargs[context_param_name], context_param_name, InitResourceContext
)
return resource_invocation_result(self, kwargs[context_param_name])
else:
return resource_invocation_result(self, None)
class _ResourceDecoratorCallable:
def __init__(
self,
config_schema: Optional[Dict[str, Any]] = None,
description: Optional[str] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
version: Optional[str] = None,
):
self.config_schema = config_schema # checked by underlying definition
self.description = check.opt_str_param(description, "description")
self.version = check.opt_str_param(version, "version")
self.required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys"
)
def __call__(self, resource_fn: Callable[["InitResourceContext"], Any]):
check.callable_param(resource_fn, "resource_fn")
any_name = ["*"] if is_context_provided(get_function_params(resource_fn)) else []
params = get_function_params(resource_fn)
missing_positional = validate_expected_params(params, any_name)
if missing_positional:
raise DagsterInvalidDefinitionError(
f"@resource decorated function '{resource_fn.__name__}' expects a single "
"positional argument."
)
extras = params[len(any_name) :]
required_extras = list(filter(is_required_param, extras))
if required_extras:
raise DagsterInvalidDefinitionError(
f"@resource decorated function '{resource_fn.__name__}' expects only a single positional required argument. "
f"Got required extra params {', '.join(positional_arg_name_list(required_extras))}"
)
resource_def = ResourceDefinition(
resource_fn=resource_fn,
config_schema=self.config_schema,
description=self.description,
version=self.version,
required_resource_keys=self.required_resource_keys,
)
update_wrapper(resource_def, wrapped=resource_fn)
return resource_def
@overload
def resource(config_schema=Callable[["InitResourceContext"], Any]) -> ResourceDefinition:
...
@overload
def resource(
config_schema: Optional[Dict[str, Any]] = None,
description: Optional[str] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
version=None,
) -> Callable[[Callable[["InitResourceContext"], Any]], "ResourceDefinition"]:
...
def resource(
config_schema: Optional[
Union[Callable[["InitResourceContext"], Any], IDefinitionConfigSchema, Dict[str, Any]]
] = None,
description: Optional[str] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
version=None,
) -> Union[
Callable[[Callable[["InitResourceContext"], Any]], "ResourceDefinition"], "ResourceDefinition"
]:
"""Define a resource.
The decorated function should accept an :py:class:`InitResourceContext` and return an instance of
the resource. This function will become the ``resource_fn`` of an underlying
:py:class:`ResourceDefinition`.
If the decorated function yields once rather than returning (in the manner of functions
decorable with :py:func:`@contextlib.contextmanager <python:contextlib.contextmanager>`) then
the body of the function after the yield will be run after execution resolves, allowing users
to write their own teardown/cleanup logic.
Args:
config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in
`init_context.resource_config`. If not set, Dagster will accept any config provided.
description(Optional[str]): A human-readable description of the resource.
version (Optional[str]): (Experimental) The version of a resource function. Two wrapped
resource functions should only have the same version if they produce the same resource
definition when provided with the same inputs.
required_resource_keys (Optional[Set[str]]): Keys for the resources required by this resource.
"""
# This case is for when decorator is used bare, without arguments.
# E.g. @resource versus @resource()
if callable(config_schema) and not is_callable_valid_config_arg(config_schema):
return _ResourceDecoratorCallable()(config_schema)
def _wrap(resource_fn: Callable[["InitResourceContext"], Any]) -> "ResourceDefinition":
return _ResourceDecoratorCallable(
config_schema=cast(Optional[Dict[str, Any]], config_schema),
description=description,
required_resource_keys=required_resource_keys,
version=version,
)(resource_fn)
return _wrap
class Resources:
"""This class functions as a "tag" that we can use to type the namedtuple returned by
ScopedResourcesBuilder.build(). The way that we create the namedtuple returned by build() is
incompatible with type annotations on its own due to its dynamic attributes, so this tag class
provides a workaround."""
class IContainsGenerator:
"""This class adds an additional tag to indicate that the resources object has at least one
resource that has been yielded from a generator, and thus may require teardown."""
class ScopedResourcesBuilder(
namedtuple("ScopedResourcesBuilder", "resource_instance_dict contains_generator")
):
"""There are concepts in the codebase (e.g. ops, system storage) that receive
only the resources that they have specified in required_resource_keys.
ScopedResourcesBuilder is responsible for dynamically building a class with
only those required resources and returning an instance of that class."""
def __new__(
cls,
resource_instance_dict: Optional[Dict[str, Any]] = None,
contains_generator: Optional[bool] = False,
):
return super(ScopedResourcesBuilder, cls).__new__(
cls,
resource_instance_dict=check.opt_dict_param(
resource_instance_dict, "resource_instance_dict", key_type=str
),
contains_generator=contains_generator,
)
def build(self, required_resource_keys: Optional[AbstractSet[str]]) -> Resources:
"""We dynamically create a type that has the resource keys as properties, to enable dotting into
the resources from a context.
For example, given:
resources = {'foo': <some resource>, 'bar': <some other resource>}
then this will create the type Resource(namedtuple('foo bar'))
and then binds the specified resources into an instance of this object, which can be consumed
as, e.g., context.resources.foo.
"""
required_resource_keys = check.opt_set_param(
required_resource_keys, "required_resource_keys", of_type=str
)
# it is possible that the surrounding context does NOT have the required resource keys
# because we are building a context for steps that we are not going to execute (e.g. in the
# resume/retry case, in order to generate copy intermediates events)
resource_instance_dict = {
key: self.resource_instance_dict[key]
for key in required_resource_keys
if key in self.resource_instance_dict
}
# If any of the resources are generators, add the IContainsGenerator subclass to flag that
# this is the case.
if self.contains_generator:
class _ScopedResourcesContainsGenerator(
namedtuple("_ScopedResourcesContainsGenerator", list(resource_instance_dict.keys())), # type: ignore[misc]
Resources,
IContainsGenerator,
):
def __getattr__(self, attr):
raise DagsterUnknownResourceError(attr)
return _ScopedResourcesContainsGenerator(**resource_instance_dict) # type: ignore[call-arg]
else:
class _ScopedResources(
namedtuple("_ScopedResources", list(resource_instance_dict.keys())), # type: ignore[misc]
Resources,
):
def __getattr__(self, attr):
raise DagsterUnknownResourceError(attr)
return _ScopedResources(**resource_instance_dict) # type: ignore[call-arg]
def make_values_resource(**kwargs: Any) -> ResourceDefinition:
"""A helper function that creates a ``ResourceDefinition`` to take in user-defined values.
This is useful for sharing values between ops.
Args:
**kwargs: Arbitrary keyword arguments that will be passed to the config schema of the
returned resource definition. If not set, Dagster will accept any config provided for
the resource.
For example:
.. code-block:: python
@op(required_resource_keys={"globals"})
def my_op(context):
print(context.resources.globals["my_str_var"])
@job(resource_defs={"globals": make_values_resource(my_str_var=str, my_int_var=int)})
def my_job():
my_op()
Returns:
ResourceDefinition: A resource that passes in user-defined values.
"""
return ResourceDefinition(
resource_fn=lambda init_context: init_context.resource_config,
config_schema=kwargs or Any,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing
from collections import deque
from contextlib import contextmanager
from enum import Enum
from itertools import islice
from pathlib import Path
from typing import Optional
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
from submitit import AutoExecutor
logger = logging.getLogger(__name__)
class Executor(BaseModel):
"""Defines the execution environment for jobs.
E.g. a node on a cluster, the local machine, etc. To create jobs,
instantiate this class and submit functions to using the executor API:
>>> executor = Executor(executor="local", block=True)
>>> with executor.get_executor() as executor:
... executor.submit(my_job, arg1, arg2)
... executor.submit(another_job)
"""
class Type(str, Enum):
"""Types of execution environments."""
SLURM = "slurm"
"""Submit jobs to a SLURM cluster scheduler."""
LOCAL = "local"
"""Submit jobs to run on the current machine."""
DEBUG = "debug"
"""Submit jobs to run synchronously on the current machine."""
NOOP = "noop"
"""Submitted jobs return immediately without executing. This can be
useful for debugging, where you want to validate the code and
configuration without performing any computation.
"""
type: Type = Field(allow_mutation=False)
"""The execution environment."""
slurm_partition: Optional[str] = Field(default=None, allow_mutation=False)
"""The name of the SLURM partition to submit jobs to.
Only used for :code:`Type.SLURM` executors.
"""
cpus: int = Field(default=1, allow_mutation=False, ge=1)
"""The number of CPU threads to provision.
If the type of executor is :code:`Type.SLURM`, this is the number of CPU
threads to provision for each job. If the type of executor is
:code:`Type.LOCAL`, this is the number of parallel jobs to process in a
thread pool. If the value is -1 and the executor is :code:`Type.LOCAL`, the
number of physical cores on the machine is used. Has no effect for
:code:`Type.DEBUG` and :code:`Type.NOOP`.
"""
timeout_hours: float = Field(default=12, allow_mutation=False, gt=0)
block: bool = Field(default=False, allow_mutation=False)
"""If :code:`True`, the :code:`get_executor()` context manager will block
until all jobs have completed when exiting scope. Jobs are still submitted
asynchronously for parallel execution.
"""
# === Start of public API. ===
@contextmanager
def get_executor(self, logs_dir: Path, cpus=None):
cpus = cpus or self.cpus
if self.type == self.Type.SLURM:
executor = AutoExecutor(folder=logs_dir)
executor.update_parameters(
timeout_min=int(round(self.timeout_hours * 60)),
nodes=1,
cpus_per_task=cpus,
slurm_partition=self.slurm_partition,
)
name = self.slurm_partition
elif self.type == self.Type.LOCAL:
executor, name = (
LocalParallelExecutor(
cpus=multiprocessing.cpu_count() if cpus == -1 else cpus,
timeout_seconds=int(round(self.timeout_hours * 3600)),
),
"local",
)
elif self.type == self.Type.DEBUG:
executor, name = LocalSynchronousExecutor(), "local"
elif self.type == self.Type.NOOP:
executor, name = DummyExecutor(), "noop"
else:
assert False, f"Unknown executor: {self.type} ({type(self.type).__name__})"
executor = WrappedExecutor(executor, name=name)
yield executor
if self.type == self.Type.DEBUG or self.block:
wait_on_jobs(
executor.jobs,
executor_name=str(executor),
cancel_on_error=self.type == self.Type.SLURM,
)
if hasattr(executor.unwrapped, "close"):
executor.unwrapped.close()
# === Start of implementation details. ===
@validator("slurm_partition")
def validate_slurm_partition(cls, value, *, values, **kwargs):
del kwargs
if values["type"] == cls.Type.SLURM:
assert value, f"Must specify a partition for executor: {values["executor"]}"
return value
@root_validator
def local_always_blocks(cls, values):
if values["type"] == cls.Type.LOCAL or values["type"] == cls.Type.NOOP:
values["block"] = True
return values
class Config:
validate_assignment = True
class WrappedExecutor:
"""An executor-like interface that records all jobs that are submitted."""
def __init__(self, executor, name: str):
self.unwrapped = executor
self.jobs = []
self.name = name
def submit(self, *args, **kwargs):
job = self.unwrapped.submit(*args, **kwargs)
logger.info("Submitting job %s to %s ...", job.job_id, self)
self.jobs.append(job)
return job
def __repr__(self) -> str:
return self.name
def wait_on_jobs(jobs, executor_name: str = "executor", cancel_on_error: bool = True):
njobs = len(jobs)
jobs = deque(jobs)
def cancel_all_jobs(jobs):
print(f"Cancelling {len(jobs)} {executor_name} jobs")
for job in jobs:
try:
job.cancel()
except: # noqa
pass
# Produce a list of the first few job IDs
max_num_job_ids_to_show = 8
job_ids = [j.job_id for j in islice(jobs, max_num_job_ids_to_show)]
job_ids = ", ".join(str(x) for x in job_ids)
job_ids = f"job ID: {job_ids}" if len(jobs) == 1 else f"job IDs: {job_ids}"
if len(jobs) > max_num_job_ids_to_show:
job_ids = f"{job_ids} ..."
logger.info(
f"Waiting for {len(jobs)} {executor_name} jobs to complete with {job_ids}"
)
completed = 0
while jobs:
job = jobs.popleft()
if cancel_on_error:
try:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
except Exception as e: # noqa Intentionally broad.
logger.error(f"Caught: {type(e).__name__}: {e}")
jobs.append(job)
return cancel_all_jobs(jobs)
else:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
logger.info("All done.")
class LocalParallelExecutor:
"""An executor which uses a process pool to process jobs in parallel on the
local machine.
"""
class LocalJob:
def __init__(self, job_id: int, async_result, timeout_seconds: int):
self._async_result = async_result
self.job_id = job_id
self.timeout_seconds = timeout_seconds
def result(self):
return self._async_result.get(timeout=self.timeout_seconds)
def cancel(self):
pass
def __init__(self, cpus: int, timeout_seconds: int):
self.last_job_id = 0
self.process_pool = multiprocessing.Pool(cpus)
self.timeout_seconds = timeout_seconds
self.futures = []
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
self.futures.append(self.process_pool.apply_async(fn, args, kwargs))
return self.LocalJob(
self.last_job_id,
self.futures[-1],
self.timeout_seconds,
)
def close(self):
# Block until all jobs have completed.
for future in self.futures:
future.get()
self.process_pool.close()
class LocalSynchronousExecutor:
"""An executor where each job is executed synchronously when result() is
called."""
class LocalJob:
def __init__(self, job_id: int, fn, *args, **kwargs):
self._callback = lambda: fn(*args, **kwargs)
self.job_id = job_id
def result(self):
return self._callback()
def cancel(self):
pass
def __init__(self):
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
return self.LocalJob(self.last_job_id, fn, *args, **kwargs)
class DummyExecutor:
class DummyJob:
def __init__(self, job_id: int):
self.job_id = job_id
def result(self):
return None
def cancel(self):
pass
def __init__(self) -> None:
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
del fn
del args
del kwargs
self.last_job_id += 1
return self.DummyJob(self.last_job_id)
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing
from collections import deque
from contextlib import contextmanager
from enum import Enum
from itertools import islice
from pathlib import Path
from typing import Optional
from pydantic import BaseModel, Field, validator
from pydantic.class_validators import root_validator
from submitit import AutoExecutor
logger = logging.getLogger(__name__)
class Executor(BaseModel):
"""Defines the execution environment for jobs.
E.g. a node on a cluster, the local machine, etc. To create jobs,
instantiate this class and submit functions to using the executor API:
>>> executor = Executor(executor="local", block=True)
>>> with executor.get_executor() as executor:
... executor.submit(my_job, arg1, arg2)
... executor.submit(another_job)
"""
class Type(str, Enum):
"""Types of execution environments."""
SLURM = "slurm"
"""Submit jobs to a SLURM cluster scheduler."""
LOCAL = "local"
"""Submit jobs to run on the current machine."""
DEBUG = "debug"
"""Submit jobs to run synchronously on the current machine."""
NOOP = "noop"
"""Submitted jobs return immediately without executing. This can be
useful for debugging, where you want to validate the code and
configuration without performing any computation.
"""
type: Type = Field(allow_mutation=False)
"""The execution environment."""
slurm_partition: Optional[str] = Field(default=None, allow_mutation=False)
"""The name of the SLURM partition to submit jobs to.
Only used for :code:`Type.SLURM` executors.
"""
cpus: int = Field(default=1, allow_mutation=False, ge=1)
"""The number of CPU threads to provision.
If the type of executor is :code:`Type.SLURM`, this is the number of CPU
threads to provision for each job. If the type of executor is
:code:`Type.LOCAL`, this is the number of parallel jobs to process in a
thread pool. If the value is -1 and the executor is :code:`Type.LOCAL`, the
number of physical cores on the machine is used. Has no effect for
:code:`Type.DEBUG` and :code:`Type.NOOP`.
"""
timeout_hours: float = Field(default=12, allow_mutation=False, gt=0)
block: bool = Field(default=False, allow_mutation=False)
"""If :code:`True`, the :code:`get_executor()` context manager will block
until all jobs have completed when exiting scope. Jobs are still submitted
asynchronously for parallel execution.
"""
# === Start of public API. ===
@contextmanager
def get_executor(self, logs_dir: Path, cpus=None):
cpus = cpus or self.cpus
if self.type == self.Type.SLURM:
executor = AutoExecutor(folder=logs_dir)
executor.update_parameters(
timeout_min=int(round(self.timeout_hours * 60)),
nodes=1,
cpus_per_task=cpus,
slurm_partition=self.slurm_partition,
)
name = self.slurm_partition
elif self.type == self.Type.LOCAL:
executor, name = (
LocalParallelExecutor(
cpus=multiprocessing.cpu_count() if cpus == -1 else cpus,
timeout_seconds=int(round(self.timeout_hours * 3600)),
),
"local",
)
elif self.type == self.Type.DEBUG:
executor, name = LocalSynchronousExecutor(), "local"
elif self.type == self.Type.NOOP:
executor, name = DummyExecutor(), "noop"
else:
assert False, f"Unknown executor: {self.type} ({type(self.type).__name__})"
executor = WrappedExecutor(executor, name=name)
yield executor
if self.type == self.Type.DEBUG or self.block:
wait_on_jobs(
executor.jobs,
executor_name=str(executor),
cancel_on_error=self.type == self.Type.SLURM,
)
if hasattr(executor.unwrapped, "close"):
executor.unwrapped.close()
# === Start of implementation details. ===
@validator("slurm_partition")
def validate_slurm_partition(cls, value, *, values, **kwargs):
del kwargs
if values["type"] == cls.Type.SLURM:
assert value, f"Must specify a partition for executor: {values['executor']}"
return value
@root_validator
def local_always_blocks(cls, values):
if values["type"] == cls.Type.LOCAL or values["type"] == cls.Type.NOOP:
values["block"] = True
return values
class Config:
validate_assignment = True
class WrappedExecutor:
"""An executor-like interface that records all jobs that are submitted."""
def __init__(self, executor, name: str):
self.unwrapped = executor
self.jobs = []
self.name = name
def submit(self, *args, **kwargs):
job = self.unwrapped.submit(*args, **kwargs)
logger.info("Submitting job %s to %s ...", job.job_id, self)
self.jobs.append(job)
return job
def __repr__(self) -> str:
return self.name
def wait_on_jobs(jobs, executor_name: str = "executor", cancel_on_error: bool = True):
njobs = len(jobs)
jobs = deque(jobs)
def cancel_all_jobs(jobs):
print(f"Cancelling {len(jobs)} {executor_name} jobs")
for job in jobs:
try:
job.cancel()
except: # noqa
pass
# Produce a list of the first few job IDs
max_num_job_ids_to_show = 8
job_ids = [j.job_id for j in islice(jobs, max_num_job_ids_to_show)]
job_ids = ", ".join(str(x) for x in job_ids)
job_ids = f"job ID: {job_ids}" if len(jobs) == 1 else f"job IDs: {job_ids}"
if len(jobs) > max_num_job_ids_to_show:
job_ids = f"{job_ids} ..."
logger.info(
f"Waiting for {len(jobs)} {executor_name} jobs to complete with {job_ids}"
)
completed = 0
while jobs:
job = jobs.popleft()
if cancel_on_error:
try:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
except Exception as e: # noqa Intentionally broad.
logger.error(f"Caught: {type(e).__name__}: {e}")
jobs.append(job)
return cancel_all_jobs(jobs)
else:
job.result()
completed += 1
logger.info(f"Jobs completed = {completed} of {njobs} ...")
logger.info("All done.")
class LocalParallelExecutor:
"""An executor which uses a process pool to process jobs in parallel on the
local machine.
"""
class LocalJob:
def __init__(self, job_id: int, async_result, timeout_seconds: int):
self._async_result = async_result
self.job_id = job_id
self.timeout_seconds = timeout_seconds
def result(self):
return self._async_result.get(timeout=self.timeout_seconds)
def cancel(self):
pass
def __init__(self, cpus: int, timeout_seconds: int):
self.last_job_id = 0
self.process_pool = multiprocessing.Pool(cpus)
self.timeout_seconds = timeout_seconds
self.futures = []
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
self.futures.append(self.process_pool.apply_async(fn, args, kwargs))
return self.LocalJob(
self.last_job_id,
self.futures[-1],
self.timeout_seconds,
)
def close(self):
# Block until all jobs have completed.
for future in self.futures:
future.get()
self.process_pool.close()
class LocalSynchronousExecutor:
"""An executor where each job is executed synchronously when result() is
called."""
class LocalJob:
def __init__(self, job_id: int, fn, *args, **kwargs):
self._callback = lambda: fn(*args, **kwargs)
self.job_id = job_id
def result(self):
return self._callback()
def cancel(self):
pass
def __init__(self):
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
self.last_job_id += 1
return self.LocalJob(self.last_job_id, fn, *args, **kwargs)
class DummyExecutor:
class DummyJob:
def __init__(self, job_id: int):
self.job_id = job_id
def result(self):
return None
def cancel(self):
pass
def __init__(self) -> None:
self.last_job_id = 0
def submit(self, fn, *args, **kwargs):
del fn
del args
del kwargs
self.last_job_id += 1
return self.DummyJob(self.last_job_id)
|
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
"""
Code not written by us
"""
import os
import sqlalchemy
import structlog
from decisionengine.framework.modules.logging_configDict import LOGGERNAME
__all__ = ["orm_as_dict", "clone_model", "add_engine_pidguard"]
def orm_as_dict(obj):
"""Based on : https://stackoverflow.com/a/37350445"""
return {c.key: getattr(obj, c.key) for c in sqlalchemy.inspect(obj).mapper.column_attrs}
def clone_model(model, **kwargs):
"""Based on https://stackoverflow.com/a/55991358"""
# will raise AttributeError if data not loaded
try:
model.sequence_id # taskmanager doesn't have an 'id' column
except AttributeError:
model.id # pylint: disable=pointless-statement
table = model.__table__
non_pk_columns = [k for k in table.columns.keys() if k not in table.primary_key]
data = {c: getattr(model, c) for c in non_pk_columns}
data.update(kwargs)
return model.__class__(**data)
def add_engine_pidguard(engine):
"""
Based on
https://stackoverflow.com/questions/62920507/using-sqlalchemy-connection-pooling-queues-with-python-multiprocessing
"""
structlog.getLogger(LOGGERNAME).debug(f"setting up add_engine_pidguard for {engine}")
@sqlalchemy.event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
"""
Based on
https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#foreign-key-support
https://docs.sqlalchemy.org/en/14/core/pooling.html#using-connection-pools-with-multiprocessing-or-os-fork
"""
if "sqlite" in str(type(dbapi_connection)):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.execute("PRAGMA busy_timeout=5000") # permit retrys for 5 seconds only
cursor.close()
connection_record.info["pid"] = os.getpid()
@sqlalchemy.event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
"""
Based on
https://docs.sqlalchemy.org/en/14/core/pooling.html#using-connection-pools-with-multiprocessing-or-os-fork
"""
pid = os.getpid()
if connection_record.info["pid"] != pid:
connection_record.connection = connection_proxy.connection = None
raise sqlalchemy.exc.DisconnectionError(
f"Connection record belongs to pid {connection_record.info["pid"]}, attempting to check out in pid {pid}"
)
| # SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
"""
Code not written by us
"""
import os
import sqlalchemy
import structlog
from decisionengine.framework.modules.logging_configDict import LOGGERNAME
__all__ = ["orm_as_dict", "clone_model", "add_engine_pidguard"]
def orm_as_dict(obj):
"""Based on : https://stackoverflow.com/a/37350445"""
return {c.key: getattr(obj, c.key) for c in sqlalchemy.inspect(obj).mapper.column_attrs}
def clone_model(model, **kwargs):
"""Based on https://stackoverflow.com/a/55991358"""
# will raise AttributeError if data not loaded
try:
model.sequence_id # taskmanager doesn't have an 'id' column
except AttributeError:
model.id # pylint: disable=pointless-statement
table = model.__table__
non_pk_columns = [k for k in table.columns.keys() if k not in table.primary_key]
data = {c: getattr(model, c) for c in non_pk_columns}
data.update(kwargs)
return model.__class__(**data)
def add_engine_pidguard(engine):
"""
Based on
https://stackoverflow.com/questions/62920507/using-sqlalchemy-connection-pooling-queues-with-python-multiprocessing
"""
structlog.getLogger(LOGGERNAME).debug(f"setting up add_engine_pidguard for {engine}")
@sqlalchemy.event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
"""
Based on
https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#foreign-key-support
https://docs.sqlalchemy.org/en/14/core/pooling.html#using-connection-pools-with-multiprocessing-or-os-fork
"""
if "sqlite" in str(type(dbapi_connection)):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.execute("PRAGMA busy_timeout=5000") # permit retrys for 5 seconds only
cursor.close()
connection_record.info["pid"] = os.getpid()
@sqlalchemy.event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
"""
Based on
https://docs.sqlalchemy.org/en/14/core/pooling.html#using-connection-pools-with-multiprocessing-or-os-fork
"""
pid = os.getpid()
if connection_record.info["pid"] != pid:
connection_record.connection = connection_proxy.connection = None
raise sqlalchemy.exc.DisconnectionError(
f"Connection record belongs to pid {connection_record.info['pid']}, attempting to check out in pid {pid}"
)
|
from flask import Blueprint, redirect, url_for, render_template, request, session
from src.constants.model_params import Ridge_Params, Lasso_Params, ElasticNet_Params, RandomForestRegressor_Params, \
SVR_params, AdabootRegressor_Params, \
GradientBoostRegressor_Params
from src.constants.model_params import KmeansClustering_Params, DbscanClustering_Params, AgglomerativeClustering_Params
from src.constants.model_params import LogisticRegression_Params, SVC_Params, KNeighborsClassifier_Params, \
DecisionTreeClassifier_Params, RandomForestClassifier_Params, GradientBoostingClassifier_Params, \
AdaBoostClassifier_Params
from src.constants.constants import ACTIVATION_FUNCTIONS, CLASSIFICATION_MODELS, CLUSTERING_MODELS, OPTIMIZERS, \
REGRESSION_LOSS, POOLING
from flask.json import jsonify
from src.constants.model_params import DecisionTreeRegressor_Params, LinearRegression_Params
from src.model.custom.classification_models import ClassificationModels
from src.model.custom.regression_models import RegressionModels
from src.model.custom.clustering_models import ClusteringModels
from src.preprocessing.preprocessing_helper import Preprocessing
from src.constants.constants import REGRESSION_MODELS
from src.utils.common.prediction_helper import make_prediction
from src.utils.databases.mysql_helper import MySqlHelper
from werkzeug.utils import secure_filename
import os
from src.utils.common.common_helper import get_param_value, load_prediction_result, load_project_model, \
read_config, save_prediction_result, save_project_model
import pandas as pd
from src.utils.common.data_helper import load_data
from src.model.auto.Auto_classification import ModelTrain_Classification
from src.model.auto.Auto_regression import ModelTrain_Regression
from src.feature_engineering.feature_engineering_helper import FeatureEngineering
from loguru import logger
from from_root import from_root
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, accuracy_score, precision_score, \
f1_score, recall_score
from src.utils.common.project_report_helper import ProjectReports
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import numpy as np
from sklearn.model_selection import train_test_split
from prettytable import PrettyTable
from src.utils.common.plotly_helper import PlotlyHelper
app_training = Blueprint('training', __name__)
config_args = read_config("./config.yaml")
mysql = MySqlHelper.get_connection_obj()
log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])
logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO")
UPLOAD_FOLDER = config_args['dir_structure']['upload_folder']
ALLOWED_EXTENSIONS = set(['zip'])
@app_training.route('/model_training/<action>', methods=['GET'])
def model_training(action):
try:
if 'pid' in session:
df = load_data()
if df is not None:
target_column = ""
if session['target_column'] is not None:
target_column = session['target_column']
target_column = session['target_column']
cols_ = [col for col in df.columns if col != target_column]
# Check data contain any categorical independent features
Categorical_columns = Preprocessing.col_seperator(df.loc[:, cols_], "Categorical_columns")
if len(Categorical_columns.columns) > 0:
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Data contain some categorical indepedent features, please perform encoding first")
"""Check If Project type is Regression or Classificaion and target Columns is not Selected"""
if session['project_type'] != 3 and session['target_column'] is None:
return redirect('/target-column')
if action == 'help':
return render_template('model_training/help.html')
elif action == 'auto_training':
logger.info('Redirect To Auto Training Page')
ProjectReports.insert_record_ml('Redirect To Auto Training Page')
if session['project_type'] == 3:
return render_template('model_training/auto_training.html',
project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Auto Training is not available for Clustering!!!")
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'])
elif action == 'custom_training' or action == 'final_train_model':
query = f""" select a.pid ProjectId , a.TargetColumn TargetName,
a.Model_Name ModelName,
b.Schedule_date,
b.schedule_time ,
a.Model_Trained,
b.train_status ,
b.email,
b.deleted
from tblProjects as a
join tblProject_scheduler as b on a.Pid = b.ProjectId where b.ProjectId = '{session.get('project_name')}'
and b.deleted=0
"""
result = mysql.fetch_one(query)
if result is not None:
return render_template('scheduler/training_blocker.html')
logger.info('Redirect To Custom Training Page')
ProjectReports.insert_record_ml('Redirect To Custom Training Page')
try:
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS)
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS)
elif session['project_type'] == 3:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS)
else:
return render_template('model_training/custom_training.html')
except Exception as e:
logger.error(e)
return render_template('model_training/custom_training.html')
else:
return 'Non-Implemented Action'
else:
return redirect('/')
else:
return redirect(url_for('/'))
except Exception as e:
logger.error('Error in Model Training')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/model_training/<action>', methods=['POST'])
def model_training_post(action):
try:
if 'pid' in session:
df = load_data()
model = None
range = None
random_state = None
if df is not None:
if action == 'help':
return render_template('model_training/help.html')
elif action == 'custom_training':
try:
model = request.form['model']
range = int(request.form['range'])
if model != "KNeighborsClassifier" and model != "SVR":
random_state = int(request.form['random_state'])
logger.info('Submitted Custom Training Page')
ProjectReports.insert_record_ml('Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}")
target = session['target_column']
if session['project_type'] != 3:
X = df.drop(target, axis=1)
y = df[target]
train_model_fun = None
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=range / 100,
random_state=random_state)
model_params = {}
if model == "LinearRegression":
Model_Params = LinearRegression_Params
train_model_fun = RegressionModels.linear_regression_regressor
elif model == "Ridge":
Model_Params = Ridge_Params
train_model_fun = RegressionModels.ridge_regressor
elif model == "Lasso":
Model_Params = Lasso_Params
train_model_fun = RegressionModels.lasso_regressor
elif model == "ElasticNet":
Model_Params = ElasticNet_Params
train_model_fun = RegressionModels.elastic_net_regressor
elif model == "DecisionTreeRegressor":
Model_Params = DecisionTreeRegressor_Params
train_model_fun = RegressionModels.decision_tree_regressor
elif model == "RandomForestRegressor":
Model_Params = RandomForestRegressor_Params
train_model_fun = RegressionModels.random_forest_regressor
elif model == "SVR":
Model_Params = SVR_params
train_model_fun = RegressionModels.support_vector_regressor
elif model == "AdaBoostRegressor":
Model_Params = AdabootRegressor_Params
train_model_fun = RegressionModels.ada_boost_regressor
elif model == "GradientBoostingRegressor":
Model_Params = GradientBoostRegressor_Params
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model == "LogisticRegression":
Model_Params = LogisticRegression_Params
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model == "SVC":
Model_Params = SVC_Params
train_model_fun = ClassificationModels.support_vector_classifier
elif model == "KNeighborsClassifier":
print('here')
Model_Params = KNeighborsClassifier_Params
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model == "DecisionTreeClassifier":
Model_Params = DecisionTreeClassifier_Params
train_model_fun = ClassificationModels.decision_tree_classifier
elif model == "RandomForestClassifier":
Model_Params = RandomForestClassifier_Params
train_model_fun = ClassificationModels.random_forest_classifier
elif model == "AdaBoostClassifier":
Model_Params = AdaBoostClassifier_Params
train_model_fun = ClassificationModels.ada_boost_classifier
elif model == "GradientBoostClassifier":
Model_Params = GradientBoostingClassifier_Params
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model = train_model_fun(X_train, y_train, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Trained Data Size", "value": len(X_train)},
{"key": "Test Data Size", "value": len(X_test)}]
scores = []
# Regression
if trained_model is not None and session['project_type'] == 1:
y_pred = trained_model.predict(X_test)
scores.append({"key": "r2_score", "value": r2_score(y_test, y_pred)})
scores.append(
{"key": "mean_absolute_error", "value": mean_absolute_error(y_test, y_pred)})
scores.append(
{"key": "mean_squared_error", "value": mean_squared_error(y_test, y_pred)})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}', Model_Trained=0 Where Id='{session.get('pid')}"'''
mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
# Classification
if trained_model is not None and session['project_type'] == 2:
y_pred = trained_model.predict(X_test)
scores.append({"key": "Accuracy", "value": accuracy_score(y_test, y_pred)})
scores.append({"key": "Classes", "value": df[target].unique()})
scores.append(
{"key": "Precision", "value": precision_score(y_test, y_pred, average=None)})
scores.append({"key": "Recall", "value": recall_score(y_test, y_pred, average=None)})
scores.append({"key": "F1_score", "value": f1_score(y_test, y_pred, average=None)})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}', Model_Trained=0 Where Id='{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
elif session['project_type'] == 3:
X = df
train_model_fun = None
model_params = {}
if model == "KMeans":
Model_Params = KmeansClustering_Params
train_model_fun = ClusteringModels.kmeans_clustering
elif model == "DBSCAN":
Model_Params = DbscanClustering_Params
train_model_fun = ClusteringModels.dbscan_clustering
elif model == "AgglomerativeClustering":
Model_Params = AgglomerativeClustering_Params
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Train Data Size", "value": len(X)},
{"key": "Test Data Size", "value": 0}]
scores = []
# Clustering
if trained_model is not None and session['project_type'] == 3:
scores.append({"key": "Predicted Classes",
"value": pd.DataFrame(data=y_pred, columns=['y_pred'])[
'y_pred'].unique()})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}', Model_Trained=0 Where Id='{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
else:
raise Exception("Model Couldn't train, please check parametes")
except Exception as e:
logger.error('Error Submitted Custom Training Page')
ProjectReports.insert_record_ml('Error Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}",
'', 0, str(e))
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS, status="error", msg=str(e))
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS, status="error", msg=str(e))
else:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS, status="error", msg=str(e))
elif action == "auto_training":
try:
target = session['target_column']
if target is None:
return redirect(url_for('/target-column'))
# data_len = len(df)
# data_len = 10000 if data_len > 10000 else int(len(df) * 0.9)
# df = df.sample(frac=1).loc[:data_len, :]
trainer = None
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=0.75,
random_state=101)
if session['project_type'] == 1:
trainer = ModelTrain_Regression(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
elif session['project_type'] == 2:
trainer = ModelTrain_Classification(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
except Exception as ex:
return render_template('model_training/auto_training.html', status="error",
project_type=session['project_type'],
target_column=session['target_column'], msg=str(ex))
elif action == 'final_train_model':
try:
logger.info('Final Train Model')
ProjectReports.insert_record_ml('Final Train Model')
query = f'''select Model_Name from tblProjects Where Id="{session.get('pid')}"'''
model_name = mysql.fetch_one(query)[0]
if session['project_type'] != 3:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "LinearRegression":
train_model_fun = RegressionModels.linear_regression_regressor
elif model_name == "Ridge":
train_model_fun = RegressionModels.ridge_regressor
elif model_name == "Lasso":
train_model_fun = RegressionModels.lasso_regressor
elif model_name == "ElasticNet":
train_model_fun = RegressionModels.elastic_net_regressor
elif model_name == "DecisionTreeRegressor":
train_model_fun = RegressionModels.decision_tree_regressor
elif model_name == "RandomForestRegressor":
train_model_fun = RegressionModels.random_forest_regressor
elif model_name == "SVR":
train_model_fun = RegressionModels.support_vector_regressor
elif model_name == "AdaBoostRegressor":
train_model_fun = RegressionModels.ada_boost_regressor
elif model_name == "GradientBoostingRegressor":
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model_name == "LogisticRegression":
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model_name == "SVC":
train_model_fun = ClassificationModels.support_vector_classifier
elif model_name == "KNeighborsClassifier":
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model_name == "DecisionTreeClassifier":
train_model_fun = ClassificationModels.decision_tree_classifier
elif model_name == "RandomForestClassifier":
train_model_fun = ClassificationModels.random_forest_classifier
elif model_name == "AdaBoostClassifier":
train_model_fun = ClassificationModels.ada_boost_classifier
elif model_name == "GradientBoostClassifier":
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
trained_model = train_model_fun(X, y, True, **model_params)
"""Save Final Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
elif session['project_type'] == 3:
X = df
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "KMeans":
train_model_fun = ClusteringModels.kmeans_clustering
elif model_name == "DBSCAN":
train_model_fun = ClusteringModels.dbscan_clustering
elif model_name == "AgglomerativeClustering":
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non Implemented mtd'
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
render_template('model_training/model_result.html', action=action, status="error",
msg="Model is not found, please train model again")
if action == "Scheduled_model":
path = os.path.join(from_root(), 'artifacts', 'model_temp.pkl')
pass
else:
return "Non Implemented Method"
else:
logger.critical('DataFrame has no data')
return redirect('/')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/congrats', methods=['GET', 'POST'])
def congrats():
try:
if 'pid' in session:
df = load_data()
if df is not None:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', status="error",
msg="Model is not found, please train model again")
else:
for key, value in model.get_params():
exec(key + "=value")
logger.info('Loaded Congrats Page')
ProjectReports.insert_record_ml('Loaded Congrats Page')
if request.method == "GET":
return render_template('model_training/congrats.html')
else:
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/prediction', methods=['GET', 'POST'])
def prediction():
try:
if 'pid' in session:
file_path = ""
logger.info('Loaded Prediction Page')
ProjectReports.insert_record_ml('Loaded Prediction Page')
if request.method == "GET":
is_trained = mysql.fetch_all(
f"SELECT * FROM tblProjects WHERE Id ={session.get("pid")} AND Model_Trained=1")
if is_trained is None:
return render_template('model_training/prediction_page.html', status="error",
msg="your model is not trained, please train model first")
else:
return render_template('model_training/prediction_page.html', status="success")
else:
try:
f = request.files['file']
ALLOWED_EXTENSIONS = ['csv', 'tsv', 'json']
msg = ""
if len(request.files) == 0:
msg = 'Please select a file to upload'
elif f.filename.strip() == '':
msg = 'Please select a file to upload'
elif f.filename.rsplit('.', 1)[1].lower() not in ALLOWED_EXTENSIONS:
msg = 'This file format is not allowed, please select mentioned one'
if msg:
logger.error(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
filename = secure_filename(f.filename)
file_path = os.path.join(config_args['dir_structure']['upload_folder'], filename)
f.save(file_path)
if file_path.endswith('.csv'):
df = pd.read_csv(file_path)
elif file_path.endswith('.tsv'):
df = pd.read_csv(file_path, sep='\t')
elif file_path.endswith('.json'):
df = pd.read_json(file_path)
else:
msg = 'This file format is currently not supported'
logger.info(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
prediction = make_prediction(df)
data = prediction.to_html()
if len(data) > 0:
save_prediction_result(prediction)
return render_template('model_training/prediction_result.html', status="success", data=data)
else:
return render_template('model_training/prediction_result.html', status="error",
msg="There is some issue, coudn't perform prediction. Please check your data")
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('model_training/prediction_page.html', status="error", msg=str(e))
finally:
if file_path:
os.remove(file_path)
else:
logger.error('Project id not found, redirect to home page')
ProjectReports.insert_record_ml('Project id not found, redirect to home page', '', '', 0, 'Error')
return redirect('/')
except Exception as e:
logger.error(e)
return redirect('/')
@app_training.route('/download_prediction', methods=['POST'])
def download_prediction():
try:
return load_prediction_result()
except Exception as e:
logger.error(e)
return jsonify({'success': False})
@app_training.route('/model_training/ann', methods=['GET'])
def ann_training():
try:
return render_template('model_training/ann.html', optimizers=OPTIMIZERS,
activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
def save_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def load_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def create_layers(data=None, df=None, feature_map={}, typ=None):
layers = []
activation = {'ReLU': nn.ReLU(),
'ELU': nn.ELU(),
'LeakyReLU': nn.LeakyReLU(),
'Softmax': nn.Softmax(),
'PReLU': nn.PReLU(),
'SELU': nn.SELU(),
'Tanh': nn.Tanh(),
'Softplus': nn.Softplus(),
'Softmin': nn.Softmin(),
'Sigmoid': nn.Sigmoid(),
'RReLU': nn.RReLU(),
}
infer_in = data[0]['units']
for i in data:
if i['type'] == 'input':
in_feature = df.shape[1]
out_feature = i['units']
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
layers.append(activation[i['activation']])
if i['type'] == 'linear':
in_feature = infer_in
out_feature = i['units']
layers.append(nn.Linear(in_feature, out_feature))
layers.append(activation[i['activation']])
infer_in = out_feature
if i['type'] == 'batch_normalization':
layers.append(nn.BatchNorm1d(num_features=infer_in))
if i['type'] == 'dropout':
layers.append(nn.Dropout(p=i['percentage']))
if i['type'] == 'output':
if typ == 'Regression':
in_feature = infer_in
out_feature = 1
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'Classification':
in_feature = infer_in
out_feature = len(feature_map.keys())
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'cluestring':
return 'CLuestring cant be performed using Ann'
return layers
class CustomTrainData(Dataset):
def __init__(self, train_df, target):
self.train_df = train_df
self.target = target
self.x = torch.from_numpy(self.train_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.train_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
class CustomTestData(Dataset):
def __init__(self, test_df, target):
self.test_df = test_df
self.target = target
self.x = torch.from_numpy(self.test_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.test_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
return table, total_params
def trainTestSplit(df, target, size=0.25):
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 - size, random_state=101)
return X_train, X_test, y_train, y_test
def main(Data=None, df=None, target=None, size=None, num_epoch=None, typ=None):
model_info = {}
model_metrice = {}
model_metrice_plot = {}
feature_map = {}
if typ == 'Classification':
for i in enumerate(df[target].unique()):
feature_map[i[1]] = i[0]
df[target] = df[target].replace(feature_map)
model_info['feature_map'] = feature_map
model_info['split_size'] = size
model_info['batch_size'] = 32
X_train, X_test, y_train, y_test = trainTestSplit(df, target, size=size)
# Data class creation
trainData = CustomTrainData(X_train, y_train)
testData = CustomTestData(X_test, y_test)
# Data loader creation
train_data_loader = DataLoader(trainData, batch_size=32, shuffle=True)
test_data_loader = DataLoader(testData, batch_size=32)
# Model Creation
model = nn.Sequential(*create_layers(Data['layerUnits'], X_train, feature_map, typ))
print(model)
# Optimizer and Loss ---- > front end
table, total_params = count_parameters(model)
model_info['table'] = table.get_html_string()
model_info['total_params'] = total_params
model_info['optimizer'] = Data['optimizers']
model_info['loss'] = Data['loss']
model_info['model'] = list(model)
optimizer_selection = {'Adam': torch.optim.Adam(model.parameters(), lr=float(Data['learningRate'])),
'AdaGrad': torch.optim.Adagrad(model.parameters(), lr=float(Data['learningRate'])),
'AdaMax': torch.optim.Adamax(model.parameters(), lr=float(Data['learningRate'])),
'RMSProps': torch.optim.RMSprop(model.parameters(), lr=float(Data['learningRate']))}
optimizer = optimizer_selection[Data['optimizers']]
if typ == "Classification":
loss_selection_classification = {'BCEWithLogitsLoss': nn.BCEWithLogitsLoss(), 'CrossEntropyLoss': nn.CrossEntropyLoss()}
loss_func = loss_selection_classification[Data['loss']]
if typ == "Regression":
loss_selection_regression = {'MAE': nn.L1Loss(), 'MSE': nn.MSELoss(), 'Huber Loss': nn.HuberLoss(),
'Smoth L1': nn.SmoothL1Loss()}
loss_func = loss_selection_regression[Data['loss']]
print(loss_func)
# Regression
# Train
if typ == "Regression":
loss_perEpoch = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
# print(features.shape,labels.shape)
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = [x for x in range(len(loss_perEpoch))]
# Test
model.eval()
test_loss = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
output = model(features)
test_loss.append(loss_func(output, labels).item())
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice['test_accuracy'] = None
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]
print("Test Loss :", np.mean(test_loss))
# Classification
if typ == 'Classification':
# Train
loss_perEpoch = []
train_acc = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1]
# print(features,labels)
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 8 == 0:
train_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = train_acc
# Test
model.eval()
test_loss = []
test_acc = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1]
output = model(features)
test_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
test_loss.append(loss_func(output, labels).item())
print("Test Loss :", np.mean(test_loss), " ", "Test Accuracy :", np.mean(test_acc))
model_metrice['test_accuracy'] = np.mean(test_acc)
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]
return model_info, model_metrice, model_metrice_plot
@app_training.route('/model_training/ann', methods=['POST'])
def ann_model_training():
try:
data = request.get_json(force=True)
print(data)
df = load_data()
target = session['target_column']
typ = 'Regression' if session['project_type'] == 1 else 'Classification'
model_info, model_metrice, model_metrice_plot = main(data, df, target=target, size=float(data['trainSplitPercent']), num_epoch=int(data['epoch']), typ=typ)
graphJSON = {}
graphJSON['train'] = PlotlyHelper.line(df, x=model_metrice_plot['train_accuracy'], y=model_metrice_plot['train_loss'])
graphJSON['test'] = PlotlyHelper.line(df, x=model_metrice_plot['test_accuracy'], y=model_metrice_plot['test_loss'])
return render_template('model_training/ann_summary.html', model_info=model_info, model_metrice=model_metrice, status="success", graphJSON=graphJSON)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
@app_training.route('/model_training/cnn', methods=['GET'])
def cnn_training():
try:
return render_template('model_training/cnn.html', optimizers=OPTIMIZERS, poolings = POOLING,
activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app_training.route('/model_training/upload_zip', methods=['POST'])
def cnn_model_training():
try:
if 'zip_file' not in request.files:
print('No file part')
file = request.files['zip_file']
if file.filename == '':
print('No selected file')
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
return jsonify({'success': True})
except Exception as e:
logger.error(e)
return jsonify({'success': False})
| from flask import Blueprint, redirect, url_for, render_template, request, session
from src.constants.model_params import Ridge_Params, Lasso_Params, ElasticNet_Params, RandomForestRegressor_Params, \
SVR_params, AdabootRegressor_Params, \
GradientBoostRegressor_Params
from src.constants.model_params import KmeansClustering_Params, DbscanClustering_Params, AgglomerativeClustering_Params
from src.constants.model_params import LogisticRegression_Params, SVC_Params, KNeighborsClassifier_Params, \
DecisionTreeClassifier_Params, RandomForestClassifier_Params, GradientBoostingClassifier_Params, \
AdaBoostClassifier_Params
from src.constants.constants import ACTIVATION_FUNCTIONS, CLASSIFICATION_MODELS, CLUSTERING_MODELS, OPTIMIZERS, \
REGRESSION_LOSS, POOLING
from flask.json import jsonify
from src.constants.model_params import DecisionTreeRegressor_Params, LinearRegression_Params
from src.model.custom.classification_models import ClassificationModels
from src.model.custom.regression_models import RegressionModels
from src.model.custom.clustering_models import ClusteringModels
from src.preprocessing.preprocessing_helper import Preprocessing
from src.constants.constants import REGRESSION_MODELS
from src.utils.common.prediction_helper import make_prediction
from src.utils.databases.mysql_helper import MySqlHelper
from werkzeug.utils import secure_filename
import os
from src.utils.common.common_helper import get_param_value, load_prediction_result, load_project_model, \
read_config, save_prediction_result, save_project_model
import pandas as pd
from src.utils.common.data_helper import load_data
from src.model.auto.Auto_classification import ModelTrain_Classification
from src.model.auto.Auto_regression import ModelTrain_Regression
from src.feature_engineering.feature_engineering_helper import FeatureEngineering
from loguru import logger
from from_root import from_root
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, accuracy_score, precision_score, \
f1_score, recall_score
from src.utils.common.project_report_helper import ProjectReports
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import numpy as np
from sklearn.model_selection import train_test_split
from prettytable import PrettyTable
from src.utils.common.plotly_helper import PlotlyHelper
app_training = Blueprint('training', __name__)
config_args = read_config("./config.yaml")
mysql = MySqlHelper.get_connection_obj()
log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])
logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO")
UPLOAD_FOLDER = config_args['dir_structure']['upload_folder']
ALLOWED_EXTENSIONS = set(['zip'])
@app_training.route('/model_training/<action>', methods=['GET'])
def model_training(action):
try:
if 'pid' in session:
df = load_data()
if df is not None:
target_column = ""
if session['target_column'] is not None:
target_column = session['target_column']
target_column = session['target_column']
cols_ = [col for col in df.columns if col != target_column]
# Check data contain any categorical independent features
Categorical_columns = Preprocessing.col_seperator(df.loc[:, cols_], "Categorical_columns")
if len(Categorical_columns.columns) > 0:
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Data contain some categorical indepedent features, please perform encoding first")
"""Check If Project type is Regression or Classificaion and target Columns is not Selected"""
if session['project_type'] != 3 and session['target_column'] is None:
return redirect('/target-column')
if action == 'help':
return render_template('model_training/help.html')
elif action == 'auto_training':
logger.info('Redirect To Auto Training Page')
ProjectReports.insert_record_ml('Redirect To Auto Training Page')
if session['project_type'] == 3:
return render_template('model_training/auto_training.html',
project_type=session['project_type'],
target_column=session['target_column'], status="error",
msg="Auto Training is not available for Clustering!!!")
return render_template('model_training/auto_training.html', project_type=session['project_type'],
target_column=session['target_column'])
elif action == 'custom_training' or action == 'final_train_model':
query = f""" select a.pid ProjectId , a.TargetColumn TargetName,
a.Model_Name ModelName,
b.Schedule_date,
b.schedule_time ,
a.Model_Trained,
b.train_status ,
b.email,
b.deleted
from tblProjects as a
join tblProject_scheduler as b on a.Pid = b.ProjectId where b.ProjectId = '{session.get('project_name')}'
and b.deleted=0
"""
result = mysql.fetch_one(query)
if result is not None:
return render_template('scheduler/training_blocker.html')
logger.info('Redirect To Custom Training Page')
ProjectReports.insert_record_ml('Redirect To Custom Training Page')
try:
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS)
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS)
elif session['project_type'] == 3:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS)
else:
return render_template('model_training/custom_training.html')
except Exception as e:
logger.error(e)
return render_template('model_training/custom_training.html')
else:
return 'Non-Implemented Action'
else:
return redirect('/')
else:
return redirect(url_for('/'))
except Exception as e:
logger.error('Error in Model Training')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/model_training/<action>', methods=['POST'])
def model_training_post(action):
try:
if 'pid' in session:
df = load_data()
model = None
range = None
random_state = None
if df is not None:
if action == 'help':
return render_template('model_training/help.html')
elif action == 'custom_training':
try:
model = request.form['model']
range = int(request.form['range'])
if model != "KNeighborsClassifier" and model != "SVR":
random_state = int(request.form['random_state'])
logger.info('Submitted Custom Training Page')
ProjectReports.insert_record_ml('Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}")
target = session['target_column']
if session['project_type'] != 3:
X = df.drop(target, axis=1)
y = df[target]
train_model_fun = None
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=range / 100,
random_state=random_state)
model_params = {}
if model == "LinearRegression":
Model_Params = LinearRegression_Params
train_model_fun = RegressionModels.linear_regression_regressor
elif model == "Ridge":
Model_Params = Ridge_Params
train_model_fun = RegressionModels.ridge_regressor
elif model == "Lasso":
Model_Params = Lasso_Params
train_model_fun = RegressionModels.lasso_regressor
elif model == "ElasticNet":
Model_Params = ElasticNet_Params
train_model_fun = RegressionModels.elastic_net_regressor
elif model == "DecisionTreeRegressor":
Model_Params = DecisionTreeRegressor_Params
train_model_fun = RegressionModels.decision_tree_regressor
elif model == "RandomForestRegressor":
Model_Params = RandomForestRegressor_Params
train_model_fun = RegressionModels.random_forest_regressor
elif model == "SVR":
Model_Params = SVR_params
train_model_fun = RegressionModels.support_vector_regressor
elif model == "AdaBoostRegressor":
Model_Params = AdabootRegressor_Params
train_model_fun = RegressionModels.ada_boost_regressor
elif model == "GradientBoostingRegressor":
Model_Params = GradientBoostRegressor_Params
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model == "LogisticRegression":
Model_Params = LogisticRegression_Params
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model == "SVC":
Model_Params = SVC_Params
train_model_fun = ClassificationModels.support_vector_classifier
elif model == "KNeighborsClassifier":
print('here')
Model_Params = KNeighborsClassifier_Params
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model == "DecisionTreeClassifier":
Model_Params = DecisionTreeClassifier_Params
train_model_fun = ClassificationModels.decision_tree_classifier
elif model == "RandomForestClassifier":
Model_Params = RandomForestClassifier_Params
train_model_fun = ClassificationModels.random_forest_classifier
elif model == "AdaBoostClassifier":
Model_Params = AdaBoostClassifier_Params
train_model_fun = ClassificationModels.ada_boost_classifier
elif model == "GradientBoostClassifier":
Model_Params = GradientBoostingClassifier_Params
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model = train_model_fun(X_train, y_train, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Trained Data Size", "value": len(X_train)},
{"key": "Test Data Size", "value": len(X_test)}]
scores = []
# Regression
if trained_model is not None and session['project_type'] == 1:
y_pred = trained_model.predict(X_test)
scores.append({"key": "r2_score", "value": r2_score(y_test, y_pred)})
scores.append(
{"key": "mean_absolute_error", "value": mean_absolute_error(y_test, y_pred)})
scores.append(
{"key": "mean_squared_error", "value": mean_squared_error(y_test, y_pred)})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
# Classification
if trained_model is not None and session['project_type'] == 2:
y_pred = trained_model.predict(X_test)
scores.append({"key": "Accuracy", "value": accuracy_score(y_test, y_pred)})
scores.append({"key": "Classes", "value": df[target].unique()})
scores.append(
{"key": "Precision", "value": precision_score(y_test, y_pred, average=None)})
scores.append({"key": "Recall", "value": recall_score(y_test, y_pred, average=None)})
scores.append({"key": "F1_score", "value": f1_score(y_test, y_pred, average=None)})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
elif session['project_type'] == 3:
X = df
train_model_fun = None
model_params = {}
if model == "KMeans":
Model_Params = KmeansClustering_Params
train_model_fun = ClusteringModels.kmeans_clustering
elif model == "DBSCAN":
Model_Params = DbscanClustering_Params
train_model_fun = ClusteringModels.dbscan_clustering
elif model == "AgglomerativeClustering":
Model_Params = AgglomerativeClustering_Params
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non-Implemented Action'
for param in Model_Params:
model_params[param['name']] = get_param_value(param, request.form[param['name']])
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model)
reports = [{"key": "Model Name", "value": model},
{"key": "Data Size", "value": len(df)},
{"key": "Train Data Size", "value": len(X)},
{"key": "Test Data Size", "value": 0}]
scores = []
# Clustering
if trained_model is not None and session['project_type'] == 3:
scores.append({"key": "Predicted Classes",
"value": pd.DataFrame(data=y_pred, columns=['y_pred'])[
'y_pred'].unique()})
# Model Name Set in table while training
query = f'''Update tblProjects Set Model_Name="{model}", Model_Trained=0 Where Id="{session.get('pid')}"'''
result = mysql.update_record(query)
return render_template('model_training/model_result.html', action=action,
status="success",
reports=reports, scores=scores, model_params=model_params)
else:
raise Exception("Model Couldn't train, please check parametes")
except Exception as e:
logger.error('Error Submitted Custom Training Page')
ProjectReports.insert_record_ml('Error Submitted Custom Training Page',
f"Model:{model}; Range:{range}; Random_State: {random_state}",
'', 0, str(e))
if session['project_type'] == 2:
return render_template('model_training/classification.html', action=action,
models=CLASSIFICATION_MODELS, status="error", msg=str(e))
elif session['project_type'] == 1:
return render_template('model_training/regression.html', action=action,
models=REGRESSION_MODELS, status="error", msg=str(e))
else:
return render_template('model_training/clustering.html', action=action,
models=CLUSTERING_MODELS, status="error", msg=str(e))
elif action == "auto_training":
try:
target = session['target_column']
if target is None:
return redirect(url_for('/target-column'))
# data_len = len(df)
# data_len = 10000 if data_len > 10000 else int(len(df) * 0.9)
# df = df.sample(frac=1).loc[:data_len, :]
trainer = None
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = FeatureEngineering.train_test_Split(cleanedData=X,
label=y,
train_size=0.75,
random_state=101)
if session['project_type'] == 1:
trainer = ModelTrain_Regression(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
elif session['project_type'] == 2:
trainer = ModelTrain_Classification(X_train, X_test, y_train, y_test, True)
result = trainer.results()
result = result.to_html()
return render_template('model_training/auto_training.html', status="success",
project_type=session['project_type'],
target_column=session['target_column'], train_done=True,
result=result)
except Exception as ex:
return render_template('model_training/auto_training.html', status="error",
project_type=session['project_type'],
target_column=session['target_column'], msg=str(ex))
elif action == 'final_train_model':
try:
logger.info('Final Train Model')
ProjectReports.insert_record_ml('Final Train Model')
query = f'''select Model_Name from tblProjects Where Id="{session.get('pid')}"'''
model_name = mysql.fetch_one(query)[0]
if session['project_type'] != 3:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "LinearRegression":
train_model_fun = RegressionModels.linear_regression_regressor
elif model_name == "Ridge":
train_model_fun = RegressionModels.ridge_regressor
elif model_name == "Lasso":
train_model_fun = RegressionModels.lasso_regressor
elif model_name == "ElasticNet":
train_model_fun = RegressionModels.elastic_net_regressor
elif model_name == "DecisionTreeRegressor":
train_model_fun = RegressionModels.decision_tree_regressor
elif model_name == "RandomForestRegressor":
train_model_fun = RegressionModels.random_forest_regressor
elif model_name == "SVR":
train_model_fun = RegressionModels.support_vector_regressor
elif model_name == "AdaBoostRegressor":
train_model_fun = RegressionModels.ada_boost_regressor
elif model_name == "GradientBoostingRegressor":
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model_name == "LogisticRegression":
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model_name == "SVC":
train_model_fun = ClassificationModels.support_vector_classifier
elif model_name == "KNeighborsClassifier":
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model_name == "DecisionTreeClassifier":
train_model_fun = ClassificationModels.decision_tree_classifier
elif model_name == "RandomForestClassifier":
train_model_fun = ClassificationModels.random_forest_classifier
elif model_name == "AdaBoostClassifier":
train_model_fun = ClassificationModels.ada_boost_classifier
elif model_name == "GradientBoostClassifier":
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
trained_model = train_model_fun(X, y, True, **model_params)
"""Save Final Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
elif session['project_type'] == 3:
X = df
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "KMeans":
train_model_fun = ClusteringModels.kmeans_clustering
elif model_name == "DBSCAN":
train_model_fun = ClusteringModels.dbscan_clustering
elif model_name == "AgglomerativeClustering":
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non Implemented mtd'
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
render_template('model_training/model_result.html', action=action, status="error",
msg="Model is not found, please train model again")
if action == "Scheduled_model":
path = os.path.join(from_root(), 'artifacts', 'model_temp.pkl')
pass
else:
return "Non Implemented Method"
else:
logger.critical('DataFrame has no data')
return redirect('/')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/congrats', methods=['GET', 'POST'])
def congrats():
try:
if 'pid' in session:
df = load_data()
if df is not None:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', status="error",
msg="Model is not found, please train model again")
else:
for key, value in model.get_params():
exec(key + "=value")
logger.info('Loaded Congrats Page')
ProjectReports.insert_record_ml('Loaded Congrats Page')
if request.method == "GET":
return render_template('model_training/congrats.html')
else:
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/prediction', methods=['GET', 'POST'])
def prediction():
try:
if 'pid' in session:
file_path = ""
logger.info('Loaded Prediction Page')
ProjectReports.insert_record_ml('Loaded Prediction Page')
if request.method == "GET":
is_trained = mysql.fetch_all(
f"SELECT * FROM tblProjects WHERE Id ={session.get('pid')} AND Model_Trained=1")
if is_trained is None:
return render_template('model_training/prediction_page.html', status="error",
msg="your model is not trained, please train model first")
else:
return render_template('model_training/prediction_page.html', status="success")
else:
try:
f = request.files['file']
ALLOWED_EXTENSIONS = ['csv', 'tsv', 'json']
msg = ""
if len(request.files) == 0:
msg = 'Please select a file to upload'
elif f.filename.strip() == '':
msg = 'Please select a file to upload'
elif f.filename.rsplit('.', 1)[1].lower() not in ALLOWED_EXTENSIONS:
msg = 'This file format is not allowed, please select mentioned one'
if msg:
logger.error(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
filename = secure_filename(f.filename)
file_path = os.path.join(config_args['dir_structure']['upload_folder'], filename)
f.save(file_path)
if file_path.endswith('.csv'):
df = pd.read_csv(file_path)
elif file_path.endswith('.tsv'):
df = pd.read_csv(file_path, sep='\t')
elif file_path.endswith('.json'):
df = pd.read_json(file_path)
else:
msg = 'This file format is currently not supported'
logger.info(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
prediction = make_prediction(df)
data = prediction.to_html()
if len(data) > 0:
save_prediction_result(prediction)
return render_template('model_training/prediction_result.html', status="success", data=data)
else:
return render_template('model_training/prediction_result.html', status="error",
msg="There is some issue, coudn't perform prediction. Please check your data")
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('model_training/prediction_page.html', status="error", msg=str(e))
finally:
if file_path:
os.remove(file_path)
else:
logger.error('Project id not found, redirect to home page')
ProjectReports.insert_record_ml('Project id not found, redirect to home page', '', '', 0, 'Error')
return redirect('/')
except Exception as e:
logger.error(e)
return redirect('/')
@app_training.route('/download_prediction', methods=['POST'])
def download_prediction():
try:
return load_prediction_result()
except Exception as e:
logger.error(e)
return jsonify({'success': False})
@app_training.route('/model_training/ann', methods=['GET'])
def ann_training():
try:
return render_template('model_training/ann.html', optimizers=OPTIMIZERS,
activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
def save_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def load_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def create_layers(data=None, df=None, feature_map={}, typ=None):
layers = []
activation = {'ReLU': nn.ReLU(),
'ELU': nn.ELU(),
'LeakyReLU': nn.LeakyReLU(),
'Softmax': nn.Softmax(),
'PReLU': nn.PReLU(),
'SELU': nn.SELU(),
'Tanh': nn.Tanh(),
'Softplus': nn.Softplus(),
'Softmin': nn.Softmin(),
'Sigmoid': nn.Sigmoid(),
'RReLU': nn.RReLU(),
}
infer_in = data[0]['units']
for i in data:
if i['type'] == 'input':
in_feature = df.shape[1]
out_feature = i['units']
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
layers.append(activation[i['activation']])
if i['type'] == 'linear':
in_feature = infer_in
out_feature = i['units']
layers.append(nn.Linear(in_feature, out_feature))
layers.append(activation[i['activation']])
infer_in = out_feature
if i['type'] == 'batch_normalization':
layers.append(nn.BatchNorm1d(num_features=infer_in))
if i['type'] == 'dropout':
layers.append(nn.Dropout(p=i['percentage']))
if i['type'] == 'output':
if typ == 'Regression':
in_feature = infer_in
out_feature = 1
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'Classification':
in_feature = infer_in
out_feature = len(feature_map.keys())
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'cluestring':
return 'CLuestring cant be performed using Ann'
return layers
class CustomTrainData(Dataset):
def __init__(self, train_df, target):
self.train_df = train_df
self.target = target
self.x = torch.from_numpy(self.train_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.train_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
class CustomTestData(Dataset):
def __init__(self, test_df, target):
self.test_df = test_df
self.target = target
self.x = torch.from_numpy(self.test_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.test_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
return table, total_params
def trainTestSplit(df, target, size=0.25):
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 - size, random_state=101)
return X_train, X_test, y_train, y_test
def main(Data=None, df=None, target=None, size=None, num_epoch=None, typ=None):
model_info = {}
model_metrice = {}
model_metrice_plot = {}
feature_map = {}
if typ == 'Classification':
for i in enumerate(df[target].unique()):
feature_map[i[1]] = i[0]
df[target] = df[target].replace(feature_map)
model_info['feature_map'] = feature_map
model_info['split_size'] = size
model_info['batch_size'] = 32
X_train, X_test, y_train, y_test = trainTestSplit(df, target, size=size)
# Data class creation
trainData = CustomTrainData(X_train, y_train)
testData = CustomTestData(X_test, y_test)
# Data loader creation
train_data_loader = DataLoader(trainData, batch_size=32, shuffle=True)
test_data_loader = DataLoader(testData, batch_size=32)
# Model Creation
model = nn.Sequential(*create_layers(Data['layerUnits'], X_train, feature_map, typ))
print(model)
# Optimizer and Loss ---- > front end
table, total_params = count_parameters(model)
model_info['table'] = table.get_html_string()
model_info['total_params'] = total_params
model_info['optimizer'] = Data['optimizers']
model_info['loss'] = Data['loss']
model_info['model'] = list(model)
optimizer_selection = {'Adam': torch.optim.Adam(model.parameters(), lr=float(Data['learningRate'])),
'AdaGrad': torch.optim.Adagrad(model.parameters(), lr=float(Data['learningRate'])),
'AdaMax': torch.optim.Adamax(model.parameters(), lr=float(Data['learningRate'])),
'RMSProps': torch.optim.RMSprop(model.parameters(), lr=float(Data['learningRate']))}
optimizer = optimizer_selection[Data['optimizers']]
if typ == "Classification":
loss_selection_classification = {'BCEWithLogitsLoss': nn.BCEWithLogitsLoss(), 'CrossEntropyLoss': nn.CrossEntropyLoss()}
loss_func = loss_selection_classification[Data['loss']]
if typ == "Regression":
loss_selection_regression = {'MAE': nn.L1Loss(), 'MSE': nn.MSELoss(), 'Huber Loss': nn.HuberLoss(),
'Smoth L1': nn.SmoothL1Loss()}
loss_func = loss_selection_regression[Data['loss']]
print(loss_func)
# Regression
# Train
if typ == "Regression":
loss_perEpoch = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
# print(features.shape,labels.shape)
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = [x for x in range(len(loss_perEpoch))]
# Test
model.eval()
test_loss = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
output = model(features)
test_loss.append(loss_func(output, labels).item())
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice['test_accuracy'] = None
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]
print("Test Loss :", np.mean(test_loss))
# Classification
if typ == 'Classification':
# Train
loss_perEpoch = []
train_acc = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1]
# print(features,labels)
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 8 == 0:
train_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = train_acc
# Test
model.eval()
test_loss = []
test_acc = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1]
output = model(features)
test_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
test_loss.append(loss_func(output, labels).item())
print("Test Loss :", np.mean(test_loss), " ", "Test Accuracy :", np.mean(test_acc))
model_metrice['test_accuracy'] = np.mean(test_acc)
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]
return model_info, model_metrice, model_metrice_plot
@app_training.route('/model_training/ann', methods=['POST'])
def ann_model_training():
try:
data = request.get_json(force=True)
print(data)
df = load_data()
target = session['target_column']
typ = 'Regression' if session['project_type'] == 1 else 'Classification'
model_info, model_metrice, model_metrice_plot = main(data, df, target=target, size=float(data['trainSplitPercent']), num_epoch=int(data['epoch']), typ=typ)
graphJSON = {}
graphJSON['train'] = PlotlyHelper.line(df, x=model_metrice_plot['train_accuracy'], y=model_metrice_plot['train_loss'])
graphJSON['test'] = PlotlyHelper.line(df, x=model_metrice_plot['test_accuracy'], y=model_metrice_plot['test_loss'])
return render_template('model_training/ann_summary.html', model_info=model_info, model_metrice=model_metrice, status="success", graphJSON=graphJSON)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
@app_training.route('/model_training/cnn', methods=['GET'])
def cnn_training():
try:
return render_template('model_training/cnn.html', optimizers=OPTIMIZERS, poolings = POOLING,
activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app_training.route('/model_training/upload_zip', methods=['POST'])
def cnn_model_training():
try:
if 'zip_file' not in request.files:
print('No file part')
file = request.files['zip_file']
if file.filename == '':
print('No selected file')
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
return jsonify({'success': True})
except Exception as e:
logger.error(e)
return jsonify({'success': False})
|
import os
import time
import ujson as json
import torch
import sys
import pickle
import numpy as np
from torch.utils.data import Dataset
import torch.distributed as dist
import torch.nn.functional as F
from bootleg.symbols.alias_entity_table import AliasEntityTable
from bootleg.symbols.constants import *
from bootleg.prep import prep_data
from bootleg.utils import logging_utils, data_utils, train_utils
from bootleg.utils.utils import import_class
from bootleg.utils import utils
# https://github.com/pytorch/pytorch/issues/37581#issuecomment-624516586
import warnings
warnings.filterwarnings("ignore", message=".*The given NumPy array is not writeable.*")
class WikiDataset(Dataset):
"""
Main dataset class that handles preparing a batch of input.
Things to note
**Input is a sentence with mentions that are both true and false golds. A true gold is one that was directly
mined with Wikipedia. A false gold is one that was generated by weak labelling.
**We determine entities that are in a slice by if the true entity index is -1 or not. During train, if use_weak_label is true,
we allow the model to leverage true and false golds. During eval, we only score true enchors.
**Some embeddings require more expensive processing. E.g., extracting the pairs of candidate entities that are connected
in a KG. When this processing is done in the dataloader where is can benefit from multiple dataloader threads,
the embedding is stored in batch_on_the_fly. This embedding must have a batch_prep method
When this processing is done during data prep, the embedding is stored in batch_prep.
**If training a NIL model, we support randomly removing the true entity from the candidate list and setting the true
entity index to be the NIL entity.
**We support data slices (subsets of data) for both training (if slice model) and eval. If using slices for training model,
we supports probabilistic slice indices.
Attributes:
batch_prepped_emb_file_names: embedding that are batch prepped in advance
batch_on_the_fly_embs: embedding where the batch_prep method is called in the __get_item__ method
random_nil: whether to do NIL candidate random generation
Batch Inputs:
start_idx_in_sent: first token index of a mention,
end_idx_in_sent: last token index of a mention,
alias_idx: the alias (mention) index in our alias dictionary,
word_indices: word indexes into the word emeddings (e.g., BERT token indices),
sent_idx: unique sentence index,
subsent_idx: unique subsentence index in the case of sentence windowing,
entity_indices: the entity indices in our entity dictionary,
alias_list_pos: keeps track of the original alias position in the list of all aliases in case the sentence
is split via windowing
true_entity_idx_for_train: entity indices for true and false golds, as seen during train
slice_indices (optional): if slice dataset, we pass in matrix where each row is alias and each column
is 0/1 if that mention is in the slice or not
<ind_task_name> (option): probabilistic labels of if an mention is in a slice or not (used in slicing model)
<pred_task_name>: NED prediction labels; for slice model, predictions of aliases not in the slice are masked
<embs>: all batch prep or batch on the fly emeddings
"""
def __init__(self, args, use_weak_label, input_src, dataset_name,
is_writer, distributed, word_symbols, entity_symbols,
slice_dataset=None, dataset_is_eval=False):
# Need to save args to reinstantiate logger
self.args = args
self.logger = logging_utils.get_logger(args)
# Number of candidates, including NIL if a NIL model (train_in_candidates is False)
self.K = entity_symbols.max_candidates + (not args.data_config.train_in_candidates)
self.num_entities_with_pad_and_nocand = entity_symbols.num_entities_with_pad_and_nocand
self.dataset_name = dataset_name
self.slice_dataset = slice_dataset
self.dataset_is_eval = dataset_is_eval
# Slice names used for eval slices and a slicing model
self.slice_names = train_utils.get_data_slices(args, dataset_is_eval)
self.storage_type_file = data_utils.get_storage_file(self.dataset_name)
# Mappings from sent_idx to row_id in dataset
self.sent_idx_file = os.path.splitext(dataset_name)[0] + "_sent_idx.json"
self.type_pred = False
if args.data_config.type_prediction.use_type_pred:
self.type_pred = True
self.eid2typeid, self.num_types_with_pad = self.load_coarse_type_table(args, entity_symbols)
# Load memory mapped file
self.logger.info("Loading dataset...")
self.logger.debug("Seeing if " + dataset_name + " exists")
if (args.data_config.overwrite_preprocessed_data or
(not os.path.exists(self.dataset_name)) or
(not os.path.exists(self.sent_idx_file)) or
(not os.path.exists(self.storage_type_file)) or
(not os.path.exists(data_utils.get_batch_prep_config(self.dataset_name)))):
start = time.time()
self.logger.debug(f"Building dataset with {input_src}")
# Only prep data once per node
if is_writer:
prep_data(args, use_weak_label=use_weak_label, dataset_is_eval=self.dataset_is_eval,
input_src=input_src, dataset_name=dataset_name,
prep_dir=data_utils.get_data_prep_dir(args))
if distributed:
# Make sure all processes wait for data to be created
dist.barrier()
self.logger.debug(f"Finished building and saving dataset in {round(time.time() - start, 2)}s.")
start = time.time()
# Storage type for loading memory mapped file of dataset
self.storage_type = pickle.load(open(self.storage_type_file, 'rb'))
self.data = np.memmap(self.dataset_name, dtype=self.storage_type, mode='r')
self.data_len = len(self.data)
# Mapping from sentence idx to rows in the dataset (indices).
# Needed when sampling sentence indices from slices for evaluation.
sent_idx_to_idx_str = utils.load_json_file(self.sent_idx_file)
self.sent_idx_to_idx = {int(i):val for i,val in sent_idx_to_idx_str.items()}
self.logger.info(f"Finished loading dataset.")
# Stores info about the batch prepped embedding memory mapped files and their shapes and datatypes
# so we can load them
self.batch_prep_config = utils.load_json_file(data_utils.get_batch_prep_config(self.dataset_name))
self.batch_prepped_emb_files = {}
self.batch_prepped_emb_file_names = {}
for emb in args.data_config.ent_embeddings:
if 'batch_prep' in emb and emb['batch_prep']:
assert emb.key in self.batch_prep_config, f'Need to prep {emb.key}. Please call prep instead of run with batch_prep_embeddings set to true.'
self.batch_prepped_emb_file_names[emb.key] = os.path.join(os.path.dirname(self.dataset_name),
os.path.basename(self.batch_prep_config[emb.key]['file_name']))
self.batch_prepped_emb_files[emb.key] = np.memmap(
self.batch_prepped_emb_file_names[emb.key],
dtype=self.batch_prep_config[emb.key]['dtype'],
shape=tuple(self.batch_prep_config[emb.key]['shape']),
mode='r')
assert len(self.batch_prepped_emb_files[emb.key]) == self.data_len,\
f'Preprocessed emb data file {self.batch_prep_config[emb.key]['file_name']} does not match length of main data file.'
# Stores embeddings that we compute on the fly; these are embeddings where batch_on_the_fly is set to true.
self.batch_on_the_fly_embs = {}
for emb in args.data_config.ent_embeddings:
if 'batch_on_the_fly' in emb and emb['batch_on_the_fly'] is True:
mod, load_class = import_class("bootleg.embeddings", emb.load_class)
try:
self.batch_on_the_fly_embs[emb.key] = getattr(mod, load_class)(main_args=args,
emb_args=emb['args'], entity_symbols=entity_symbols,
model_device=None, word_symbols=None, key=emb.key)
except AttributeError as e:
self.logger.warning(f'No prep method found for {emb.load_class} with error {e}')
except Exception as e:
print("ERROR", e)
# The data in this table shouldn't be pickled since we delete it in the class __getstate__
self.alias2entity_table = AliasEntityTable(args=args, entity_symbols=entity_symbols)
# Random NIL percent
self.mask_perc = args.train_config.random_nil_perc
self.random_nil = False
# Don't want to random mask for eval
if not dataset_is_eval:
# Whether to use a random NIL training regime
self.random_nil = args.train_config.random_nil
if self.random_nil:
self.logger.info(f'Using random nils during training with {self.mask_perc} percent')
def __len__(self):
return self.data_len
def __getitem__(self, key):
# start = time.time()
example = self.data[key]
entity_indices = self.alias2entity_table(example['alias_idx'])
# True entities will be true and false golds for train (if use_weak_label in config is true) and just true golds for eval
true_entities = torch.from_numpy(example['true_entity_idx'])
M = true_entities.shape
if self.random_nil:
# example['true_entity_idx'] is M -> we want to sample some % of these and set them to not in candidate list
# randomly mask each entity embedding
bern_prob = (torch.ones(M) * self.mask_perc)
keep_mask = torch.bernoulli(bern_prob) < 1
# whichever we sample, we want to set corresponding true candidate to -1 and mask it out
# to simulate not being in the candidate list
# can't have negatives for one hot so we temporarily cast padded values to 0
padded_entities = true_entities == -1
true_entities = true_entities.masked_fill(padded_entities, 0)
one_hot_true_entities = F.one_hot(true_entities, num_classes=self.K)
one_hot_true_entities[keep_mask.unsqueeze(-1).expand_as(one_hot_true_entities)] = 0
one_hot_true_entities[padded_entities.unsqueeze(-1).expand_as(one_hot_true_entities)] = 0
entity_indices = entity_indices.masked_fill(one_hot_true_entities, -1)
# set new true label to 0 ('not in candidate')
true_entities = true_entities.masked_fill(~keep_mask, 0)
# make sure original padded entities are padded
true_entities = true_entities.masked_fill(padded_entities, -1)
start_idx_in_sent = example['start_idx_in_sent']
end_idx_in_sent = example['end_idx_in_sent']
example_dict = {'start_idx_in_sent': start_idx_in_sent,
'end_idx_in_sent': end_idx_in_sent,
'alias_idx': example['alias_idx'],
'word_indices': example['word_indices'],
'sent_idx': example['sent_idx'],
'subsent_idx': example['subsent_idx'],
'entity_indices': entity_indices,
# due to subsentence split, we need to keep track of the original alias position in the list
# to do eval over slices when distributed
# (examples from a sentence may be distributed across different GPUs)
'alias_list_pos': example['alias_list_pos'],
# true entities of the mentions seen during train (true and false golds); in eval, we only keep
# true entities of true golds
'true_entity_idx_for_train': example['true_entity_idx_for_train']}
# If this dataset is associated with slices, slice_indices is a incidence matrix indicating
# for each alias in the batch, which ones participate in which slice (slices keep track of sentence indexes and aliases to predict)
# Slices are not windowed like that are for training data.
if self.slice_dataset is not None:
# -1 is pad and should not be in the mapping from sentence index to row in array.
assert -1 != self.slice_dataset.sent_idx_arr[example["sent_idx"]]
# One row per mention and one column per slice
slice_indices = np.hstack([self.slice_dataset.data[slice_name][self.slice_dataset.sent_idx_arr[example["sent_idx"]]].alias_to_predict.T
for slice_name in self.slice_names])
prob_labels_arr = np.hstack([self.slice_dataset.data[slice_name][self.slice_dataset.sent_idx_arr[example["sent_idx"]]].prob_labels.T
for slice_name in self.slice_names])
# alias_list_pos will have -1 for no alias; we want these to become zero in slice_indices.
# Therefore we add a pad row to the bottom of slice_indices
slice_indices = np.vstack([slice_indices, np.zeros(slice_indices.shape[1])]).astype(int)
slice_indices = slice_indices[example['alias_list_pos']]
# Probabilistic slice labels for slice indicator head training
prob_labels_arr = np.vstack([prob_labels_arr, np.zeros(prob_labels_arr.shape[1])]).astype(float)
prob_labels_arr = prob_labels_arr[example['alias_list_pos']]
# If this is an eval dataset, keep slice indices intact for eval_wrapper
example_dict['slice_indices'] = slice_indices
# Assign true entity idx to -1 if example alias doesn't participate in slice
for i, slice_name in enumerate(self.slice_names):
prob_labels = prob_labels_arr[:,i]
bin_in_slice_labels = slice_indices[:,i]
# NED prediction labels; set predictions to be -1 for masking for mentions not in a slice
pred_labels = np.copy(true_entities)
pred_labels[~(bin_in_slice_labels).astype(bool)] = -1
# Mask out slice alias labels for which we don't want to make a prediction
# We need to use true_entity_idx to account for subsentences which indicate
# which alias to predict
prob_labels[true_entities == -1] = -1
ind_task_name = train_utils.get_slice_head_ind_name(slice_name)
pred_task_name = train_utils.get_slice_head_pred_name(slice_name)
# Add indicator head and prediction head labels
example_dict[ind_task_name] = prob_labels
example_dict[pred_task_name] = pred_labels
else:
example_dict[train_utils.get_slice_head_pred_name(FINAL_LOSS)] = example['true_entity_idx']
# Add type preds
if self.type_pred:
example_dict["type_labels"] = self.eid2typeid[true_entities]
# Add embeddings to example forward
for emb_name in self.batch_prepped_emb_files:
example_dict[emb_name] = np.asarray(self.batch_prepped_emb_files[emb_name][key])
# Prep the embeddings (this will call the batch_prep method for the embedding)
for emb_name, emb in self.batch_on_the_fly_embs.items():
example_dict[emb_name] = emb.batch_prep(example['alias_idx'], entity_indices)
return example_dict
def __getstate__(self):
state = self.__dict__.copy()
# Not picklable
del state['data']
del state['logger']
# the sent_idx mapping is expensive to pickle so remove
# also not needed in dataloader workers so we don't need to setstate for it
del state['sent_idx_to_idx']
del state['batch_prepped_emb_files']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.data = np.memmap(self.dataset_name, dtype=self.storage_type, mode='r')
self.batch_prepped_emb_files = {}
for emb_name, file_name in self.batch_prepped_emb_file_names.items():
self.batch_prepped_emb_files[emb_name] = np.memmap(self.batch_prepped_emb_file_names[emb_name],
dtype=self.batch_prep_config[emb_name]['dtype'],
shape=tuple(self.batch_prep_config[emb_name]['shape']),
mode='r')
self.logger = logging_utils.get_logger(self.args)
def __repr__(self):
return f"Dataset {self.dataset_name}"
def load_coarse_type_table(self, args, entity_symbols):
emb_dir = args.data_config.emb_dir
coarse_type_file = args.data_config.type_prediction.file
with open(os.path.join(emb_dir, coarse_type_file)) as in_f:
# take the first type; UNK type is 0
qid2type = {}
max_type = 0
for k, v in json.load(in_f).items():
if len(v) > 0:
qid2type[k] = v[0]+1
else:
qid2type[k] = 0
max_type = max(max_type, qid2type[k])
# We assume types are indexed from 0. So, 6 types will have indices 0 - 5. Max type will get 5+1 = 6.
assert max_type == args.data_config.type_prediction.num_types,\
f"{args.data_config.type_prediction.num_types} from args.data_config.type_prediction.num_types must match our computed number {max_type}"
# All qids get unk types
values = [0 for _ in range(self.num_entities_with_pad_and_nocand)]
for qid in qid2type:
if entity_symbols.qid_exists(qid):
values[entity_symbols.get_eid(qid)] = qid2type[qid]
# Padded eid gets -1
values[-1] = -1
num_types_with_pad = max_type+1
eid2coarsetype = torch.tensor(values)
return eid2coarsetype, num_types_with_pad
| import os
import time
import ujson as json
import torch
import sys
import pickle
import numpy as np
from torch.utils.data import Dataset
import torch.distributed as dist
import torch.nn.functional as F
from bootleg.symbols.alias_entity_table import AliasEntityTable
from bootleg.symbols.constants import *
from bootleg.prep import prep_data
from bootleg.utils import logging_utils, data_utils, train_utils
from bootleg.utils.utils import import_class
from bootleg.utils import utils
# https://github.com/pytorch/pytorch/issues/37581#issuecomment-624516586
import warnings
warnings.filterwarnings("ignore", message=".*The given NumPy array is not writeable.*")
class WikiDataset(Dataset):
"""
Main dataset class that handles preparing a batch of input.
Things to note
**Input is a sentence with mentions that are both true and false golds. A true gold is one that was directly
mined with Wikipedia. A false gold is one that was generated by weak labelling.
**We determine entities that are in a slice by if the true entity index is -1 or not. During train, if use_weak_label is true,
we allow the model to leverage true and false golds. During eval, we only score true enchors.
**Some embeddings require more expensive processing. E.g., extracting the pairs of candidate entities that are connected
in a KG. When this processing is done in the dataloader where is can benefit from multiple dataloader threads,
the embedding is stored in batch_on_the_fly. This embedding must have a batch_prep method
When this processing is done during data prep, the embedding is stored in batch_prep.
**If training a NIL model, we support randomly removing the true entity from the candidate list and setting the true
entity index to be the NIL entity.
**We support data slices (subsets of data) for both training (if slice model) and eval. If using slices for training model,
we supports probabilistic slice indices.
Attributes:
batch_prepped_emb_file_names: embedding that are batch prepped in advance
batch_on_the_fly_embs: embedding where the batch_prep method is called in the __get_item__ method
random_nil: whether to do NIL candidate random generation
Batch Inputs:
start_idx_in_sent: first token index of a mention,
end_idx_in_sent: last token index of a mention,
alias_idx: the alias (mention) index in our alias dictionary,
word_indices: word indexes into the word emeddings (e.g., BERT token indices),
sent_idx: unique sentence index,
subsent_idx: unique subsentence index in the case of sentence windowing,
entity_indices: the entity indices in our entity dictionary,
alias_list_pos: keeps track of the original alias position in the list of all aliases in case the sentence
is split via windowing
true_entity_idx_for_train: entity indices for true and false golds, as seen during train
slice_indices (optional): if slice dataset, we pass in matrix where each row is alias and each column
is 0/1 if that mention is in the slice or not
<ind_task_name> (option): probabilistic labels of if an mention is in a slice or not (used in slicing model)
<pred_task_name>: NED prediction labels; for slice model, predictions of aliases not in the slice are masked
<embs>: all batch prep or batch on the fly emeddings
"""
def __init__(self, args, use_weak_label, input_src, dataset_name,
is_writer, distributed, word_symbols, entity_symbols,
slice_dataset=None, dataset_is_eval=False):
# Need to save args to reinstantiate logger
self.args = args
self.logger = logging_utils.get_logger(args)
# Number of candidates, including NIL if a NIL model (train_in_candidates is False)
self.K = entity_symbols.max_candidates + (not args.data_config.train_in_candidates)
self.num_entities_with_pad_and_nocand = entity_symbols.num_entities_with_pad_and_nocand
self.dataset_name = dataset_name
self.slice_dataset = slice_dataset
self.dataset_is_eval = dataset_is_eval
# Slice names used for eval slices and a slicing model
self.slice_names = train_utils.get_data_slices(args, dataset_is_eval)
self.storage_type_file = data_utils.get_storage_file(self.dataset_name)
# Mappings from sent_idx to row_id in dataset
self.sent_idx_file = os.path.splitext(dataset_name)[0] + "_sent_idx.json"
self.type_pred = False
if args.data_config.type_prediction.use_type_pred:
self.type_pred = True
self.eid2typeid, self.num_types_with_pad = self.load_coarse_type_table(args, entity_symbols)
# Load memory mapped file
self.logger.info("Loading dataset...")
self.logger.debug("Seeing if " + dataset_name + " exists")
if (args.data_config.overwrite_preprocessed_data or
(not os.path.exists(self.dataset_name)) or
(not os.path.exists(self.sent_idx_file)) or
(not os.path.exists(self.storage_type_file)) or
(not os.path.exists(data_utils.get_batch_prep_config(self.dataset_name)))):
start = time.time()
self.logger.debug(f"Building dataset with {input_src}")
# Only prep data once per node
if is_writer:
prep_data(args, use_weak_label=use_weak_label, dataset_is_eval=self.dataset_is_eval,
input_src=input_src, dataset_name=dataset_name,
prep_dir=data_utils.get_data_prep_dir(args))
if distributed:
# Make sure all processes wait for data to be created
dist.barrier()
self.logger.debug(f"Finished building and saving dataset in {round(time.time() - start, 2)}s.")
start = time.time()
# Storage type for loading memory mapped file of dataset
self.storage_type = pickle.load(open(self.storage_type_file, 'rb'))
self.data = np.memmap(self.dataset_name, dtype=self.storage_type, mode='r')
self.data_len = len(self.data)
# Mapping from sentence idx to rows in the dataset (indices).
# Needed when sampling sentence indices from slices for evaluation.
sent_idx_to_idx_str = utils.load_json_file(self.sent_idx_file)
self.sent_idx_to_idx = {int(i):val for i,val in sent_idx_to_idx_str.items()}
self.logger.info(f"Finished loading dataset.")
# Stores info about the batch prepped embedding memory mapped files and their shapes and datatypes
# so we can load them
self.batch_prep_config = utils.load_json_file(data_utils.get_batch_prep_config(self.dataset_name))
self.batch_prepped_emb_files = {}
self.batch_prepped_emb_file_names = {}
for emb in args.data_config.ent_embeddings:
if 'batch_prep' in emb and emb['batch_prep']:
assert emb.key in self.batch_prep_config, f'Need to prep {emb.key}. Please call prep instead of run with batch_prep_embeddings set to true.'
self.batch_prepped_emb_file_names[emb.key] = os.path.join(os.path.dirname(self.dataset_name),
os.path.basename(self.batch_prep_config[emb.key]['file_name']))
self.batch_prepped_emb_files[emb.key] = np.memmap(
self.batch_prepped_emb_file_names[emb.key],
dtype=self.batch_prep_config[emb.key]['dtype'],
shape=tuple(self.batch_prep_config[emb.key]['shape']),
mode='r')
assert len(self.batch_prepped_emb_files[emb.key]) == self.data_len,\
f'Preprocessed emb data file {self.batch_prep_config[emb.key]["file_name"]} does not match length of main data file.'
# Stores embeddings that we compute on the fly; these are embeddings where batch_on_the_fly is set to true.
self.batch_on_the_fly_embs = {}
for emb in args.data_config.ent_embeddings:
if 'batch_on_the_fly' in emb and emb['batch_on_the_fly'] is True:
mod, load_class = import_class("bootleg.embeddings", emb.load_class)
try:
self.batch_on_the_fly_embs[emb.key] = getattr(mod, load_class)(main_args=args,
emb_args=emb['args'], entity_symbols=entity_symbols,
model_device=None, word_symbols=None, key=emb.key)
except AttributeError as e:
self.logger.warning(f'No prep method found for {emb.load_class} with error {e}')
except Exception as e:
print("ERROR", e)
# The data in this table shouldn't be pickled since we delete it in the class __getstate__
self.alias2entity_table = AliasEntityTable(args=args, entity_symbols=entity_symbols)
# Random NIL percent
self.mask_perc = args.train_config.random_nil_perc
self.random_nil = False
# Don't want to random mask for eval
if not dataset_is_eval:
# Whether to use a random NIL training regime
self.random_nil = args.train_config.random_nil
if self.random_nil:
self.logger.info(f'Using random nils during training with {self.mask_perc} percent')
def __len__(self):
return self.data_len
def __getitem__(self, key):
# start = time.time()
example = self.data[key]
entity_indices = self.alias2entity_table(example['alias_idx'])
# True entities will be true and false golds for train (if use_weak_label in config is true) and just true golds for eval
true_entities = torch.from_numpy(example['true_entity_idx'])
M = true_entities.shape
if self.random_nil:
# example['true_entity_idx'] is M -> we want to sample some % of these and set them to not in candidate list
# randomly mask each entity embedding
bern_prob = (torch.ones(M) * self.mask_perc)
keep_mask = torch.bernoulli(bern_prob) < 1
# whichever we sample, we want to set corresponding true candidate to -1 and mask it out
# to simulate not being in the candidate list
# can't have negatives for one hot so we temporarily cast padded values to 0
padded_entities = true_entities == -1
true_entities = true_entities.masked_fill(padded_entities, 0)
one_hot_true_entities = F.one_hot(true_entities, num_classes=self.K)
one_hot_true_entities[keep_mask.unsqueeze(-1).expand_as(one_hot_true_entities)] = 0
one_hot_true_entities[padded_entities.unsqueeze(-1).expand_as(one_hot_true_entities)] = 0
entity_indices = entity_indices.masked_fill(one_hot_true_entities, -1)
# set new true label to 0 ('not in candidate')
true_entities = true_entities.masked_fill(~keep_mask, 0)
# make sure original padded entities are padded
true_entities = true_entities.masked_fill(padded_entities, -1)
start_idx_in_sent = example['start_idx_in_sent']
end_idx_in_sent = example['end_idx_in_sent']
example_dict = {'start_idx_in_sent': start_idx_in_sent,
'end_idx_in_sent': end_idx_in_sent,
'alias_idx': example['alias_idx'],
'word_indices': example['word_indices'],
'sent_idx': example['sent_idx'],
'subsent_idx': example['subsent_idx'],
'entity_indices': entity_indices,
# due to subsentence split, we need to keep track of the original alias position in the list
# to do eval over slices when distributed
# (examples from a sentence may be distributed across different GPUs)
'alias_list_pos': example['alias_list_pos'],
# true entities of the mentions seen during train (true and false golds); in eval, we only keep
# true entities of true golds
'true_entity_idx_for_train': example['true_entity_idx_for_train']}
# If this dataset is associated with slices, slice_indices is a incidence matrix indicating
# for each alias in the batch, which ones participate in which slice (slices keep track of sentence indexes and aliases to predict)
# Slices are not windowed like that are for training data.
if self.slice_dataset is not None:
# -1 is pad and should not be in the mapping from sentence index to row in array.
assert -1 != self.slice_dataset.sent_idx_arr[example["sent_idx"]]
# One row per mention and one column per slice
slice_indices = np.hstack([self.slice_dataset.data[slice_name][self.slice_dataset.sent_idx_arr[example["sent_idx"]]].alias_to_predict.T
for slice_name in self.slice_names])
prob_labels_arr = np.hstack([self.slice_dataset.data[slice_name][self.slice_dataset.sent_idx_arr[example["sent_idx"]]].prob_labels.T
for slice_name in self.slice_names])
# alias_list_pos will have -1 for no alias; we want these to become zero in slice_indices.
# Therefore we add a pad row to the bottom of slice_indices
slice_indices = np.vstack([slice_indices, np.zeros(slice_indices.shape[1])]).astype(int)
slice_indices = slice_indices[example['alias_list_pos']]
# Probabilistic slice labels for slice indicator head training
prob_labels_arr = np.vstack([prob_labels_arr, np.zeros(prob_labels_arr.shape[1])]).astype(float)
prob_labels_arr = prob_labels_arr[example['alias_list_pos']]
# If this is an eval dataset, keep slice indices intact for eval_wrapper
example_dict['slice_indices'] = slice_indices
# Assign true entity idx to -1 if example alias doesn't participate in slice
for i, slice_name in enumerate(self.slice_names):
prob_labels = prob_labels_arr[:,i]
bin_in_slice_labels = slice_indices[:,i]
# NED prediction labels; set predictions to be -1 for masking for mentions not in a slice
pred_labels = np.copy(true_entities)
pred_labels[~(bin_in_slice_labels).astype(bool)] = -1
# Mask out slice alias labels for which we don't want to make a prediction
# We need to use true_entity_idx to account for subsentences which indicate
# which alias to predict
prob_labels[true_entities == -1] = -1
ind_task_name = train_utils.get_slice_head_ind_name(slice_name)
pred_task_name = train_utils.get_slice_head_pred_name(slice_name)
# Add indicator head and prediction head labels
example_dict[ind_task_name] = prob_labels
example_dict[pred_task_name] = pred_labels
else:
example_dict[train_utils.get_slice_head_pred_name(FINAL_LOSS)] = example['true_entity_idx']
# Add type preds
if self.type_pred:
example_dict["type_labels"] = self.eid2typeid[true_entities]
# Add embeddings to example forward
for emb_name in self.batch_prepped_emb_files:
example_dict[emb_name] = np.asarray(self.batch_prepped_emb_files[emb_name][key])
# Prep the embeddings (this will call the batch_prep method for the embedding)
for emb_name, emb in self.batch_on_the_fly_embs.items():
example_dict[emb_name] = emb.batch_prep(example['alias_idx'], entity_indices)
return example_dict
def __getstate__(self):
state = self.__dict__.copy()
# Not picklable
del state['data']
del state['logger']
# the sent_idx mapping is expensive to pickle so remove
# also not needed in dataloader workers so we don't need to setstate for it
del state['sent_idx_to_idx']
del state['batch_prepped_emb_files']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.data = np.memmap(self.dataset_name, dtype=self.storage_type, mode='r')
self.batch_prepped_emb_files = {}
for emb_name, file_name in self.batch_prepped_emb_file_names.items():
self.batch_prepped_emb_files[emb_name] = np.memmap(self.batch_prepped_emb_file_names[emb_name],
dtype=self.batch_prep_config[emb_name]['dtype'],
shape=tuple(self.batch_prep_config[emb_name]['shape']),
mode='r')
self.logger = logging_utils.get_logger(self.args)
def __repr__(self):
return f"Dataset {self.dataset_name}"
def load_coarse_type_table(self, args, entity_symbols):
emb_dir = args.data_config.emb_dir
coarse_type_file = args.data_config.type_prediction.file
with open(os.path.join(emb_dir, coarse_type_file)) as in_f:
# take the first type; UNK type is 0
qid2type = {}
max_type = 0
for k, v in json.load(in_f).items():
if len(v) > 0:
qid2type[k] = v[0]+1
else:
qid2type[k] = 0
max_type = max(max_type, qid2type[k])
# We assume types are indexed from 0. So, 6 types will have indices 0 - 5. Max type will get 5+1 = 6.
assert max_type == args.data_config.type_prediction.num_types,\
f"{args.data_config.type_prediction.num_types} from args.data_config.type_prediction.num_types must match our computed number {max_type}"
# All qids get unk types
values = [0 for _ in range(self.num_entities_with_pad_and_nocand)]
for qid in qid2type:
if entity_symbols.qid_exists(qid):
values[entity_symbols.get_eid(qid)] = qid2type[qid]
# Padded eid gets -1
values[-1] = -1
num_types_with_pad = max_type+1
eid2coarsetype = torch.tensor(values)
return eid2coarsetype, num_types_with_pad
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import json
import glob
import os
import argparse
from typing import Tuple, Union, List
from collections import Counter
from tqdm import tqdm
from multiprocessing import Pool
pd.options.mode.chained_assignment = None # default='warn'
# ====================================================================
def get_data(img_pth: Union[str, os.PathLike]) -> dict:
"""Get a single data from the given file.json path"""
with open(img_pth, 'r') as f:
data = json.load(f)
return data
def get_original_df(
path: Union[str, os.PathLike],
filename: str,
processes_per_cpu: int = 2) -> Tuple[pd.DataFrame, bool]:
"""Get a DataFrame from all the can_bus*.json files in the dataset"""
save_path = os.path.join(os.getcwd(), 'data_analysis', filename)
if os.path.isfile(save_path):
print('.npy file exists, loading it...')
data = list(np.load(save_path, allow_pickle=True))
else:
# Construct the dataset
print('.npy file not found, constructing it...')
all_data_paths = sorted(glob.glob(os.path.join(path, '**/can_bus*.json'), recursive=True))
with Pool(os.cpu_count() * processes_per_cpu) as p:
data = list(tqdm(p.imap(get_data, all_data_paths), total=len(all_data_paths)))
np.save(save_path, data)
# Create dataframe with the data
df = pd.DataFrame(data)
print(df.describe())
return df, False
# ====================================================================
def get_augmented_df(preloads_name: str) -> Tuple[pd.DataFrame, bool]:
"""Use the preloads file to load the data; will be augmented, as that's what we did"""
assert preloads_name.endswith('.npy')
data = np.load(os.path.join(os.getcwd(), '_preloads', preloads_name), allow_pickle=True)[1]
df = pd.DataFrame(data)
print(df.describe())
return df, True
# ====================================================================
def violin_plot(df: pd.DataFrame, save_name: str, augmented: bool) -> None:
"""Save violin plot for the interesting parameters using df"""
directions_dict = {'No Action': 2.0, 'Turn Left': 3.0, 'Turn Right': 4.0, 'Continue Straight': 5.0}
# Auxiliary function for setting the quartile lines
def set_lines(ax):
for l in ax.lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('white')
l.set_alpha(0.7)
for l in ax.lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.3)
l.set_color('black')
l.set_alpha(0.8)
for key in directions_dict:
# Get respective subset of the dataframe
data = df[df['directions'] == directions_dict[key]]
fig = plt.figure(figsize=(8, 6))
gs = fig.add_gridspec(1, 4)
fig.add_subplot(gs[0, 0])
ax = sns.violinplot(y='steer', data=data, color='r', inner='quartile')
set_lines(ax)
fig.add_subplot(gs[0, 1])
ax = sns.violinplot(y='throttle', data=data, color='g', inner='quartile')
set_lines(ax)
fig.add_subplot(gs[0, 2])
ax = sns.violinplot(y='brake', data=data, color='b', inner='quartile')
set_lines(ax)
fig.add_subplot(gs[0, 3])
ax = sns.violinplot(y='speed', data=data, color='m', inner='quartile')
set_lines(ax)
# When using tight layout, we need the title to be spaced accordingly
fig.tight_layout()
fig.subplots_adjust(top=0.88)
stitle = f'Direction: {key} - $N={len(data)}$ - ${100 * len(data)/len(df):6.3f}$% of total'
stitle = f'{stitle} - Augmented' if augmented else stitle
fig.suptitle(stitle, fontsize=16)
fname = f'{save_name}-{key.replace(' ', '')}'
fname = f'{fname}-aug' if augmented else fname
fig_name = os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots', f'{fname}.png')
os.makedirs(os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots'), exist_ok=True)
plt.savefig(fig_name)
plt.close()
# ====================================================================
def plot_clients(path: Union[str, os.PathLike], df: pd.DataFrame, augmented: bool, speed_factor: float) -> None:
"""Plot the steer, throttle, brake, and speed of a client during its data collection"""
# Some sanity check
if path.endswith(os.sep):
path = path[:-1]
# Get dataset name and make the necessary directories
dataset_name = os.path.basename(path)
s_path = os.path.join(os.getcwd(), 'data_analysis', dataset_name, 'clients')
os.makedirs(s_path, exist_ok=True)
# Get the number of clients/cars that collected the data
clients = glob.glob(os.path.join(path, '**/*'))
clients = [cl for cl in clients if os.path.isdir(cl)] # Remove path of metadata.json
num_clients = len(clients)
# Total number of frames and for a single client
num_frames = len(df)
num_frames_per_client = num_frames // num_clients
# Aux function
def get_change_locs(df: pd.DataFrame, cli: int) -> Tuple[List[int], List[float]]:
"""Get the index and directions from the df of the actions taken by the client"""
df['directions_str'] = df['directions'].astype(str) # In order to compare, turn directions into a string
# Shift directions column by 1 (filling the top with the head), and compare to the original
df['change'] = df['directions_str'].shift(1, fill_value=df['directions_str'].head(1)) != df['directions_str']
# Get the rows where there's a change
index_change = list(df.loc[df['change'] == True].index.values)
# Add the first frame
index_change = [(cli - 1) * len(df)] + index_change
# For these indexes, get the value of the direction
dirs = list(df['directions'][index_change].values)
# Add the last frame
index_change = index_change + [cli * len(df) - 1]
return index_change, dirs
# Dictionaries containing the name and color for plotting the direction given to the car
my_labels = {2.0: 'No Action', 3.0: 'Turn Left', 4.0: 'Turn Right', 5.0: 'Continue Straight'}
colors = {2.0: 'gold', 3.0: 'gray', 4.0: 'cyan', 5.0: 'magenta'}
# Initialize the total counts per action
total_action_counts = Counter({2.0: 0, 3.0: 0, 4.0: 0, 5.0: 0})
max_speed_clients = {}
idx_change_clients = {}
dirs_clients = {}
# Make a plot for each client
for client in tqdm(range(1, num_clients + 1), total=num_clients, unit='clients'):
if augmented:
# Dataframe will have augmented data, which uses center, left, right, center, ... data
df_client = df[(client - 1) * num_frames_per_client: client * num_frames_per_client: 3]
else:
df_client = df[(client - 1) * num_frames_per_client: client * num_frames_per_client]
# Augmented data will have been normalized already
df_client['speed'] = df_client['speed'].div(speed_factor) # normalize to range [0, 1]
# The actual max speed (see if it differs from collected data)
actual_max_speed = df_client['speed'].max()
max_speed_clients[client] = actual_max_speed
# Build the plot
fig, ax = plt.subplots(figsize=(48, 16))
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
df_client.plot(y=['steer', 'throttle', 'brake', 'speed'], ax=ax)
# Set the area colors for when an direction is taken
idx_change, dirs = get_change_locs(df_client, client)
for idx, dir in enumerate(dirs):
ax.axvspan(idx_change[idx], idx_change[idx + 1], facecolor=colors[dir], alpha=0.5, label=my_labels[dir])
# Save these index and directions for each client
idx_change_clients[f'client_{client:02d}'] = [int(idx) for idx in idx_change]
dirs_clients[f'client_{client:02d}'] = [float(d) for d in dirs]
# Count the directions taken by the client
dirs_count = Counter(dirs)
# Add this to the total for the whole dataset
total_action_counts += dirs_count
# Add the counts to the title
total_actions = ''
for key in my_labels:
total_actions += f' - {my_labels[key]}: {dirs_count[key]}'
# Set title and x and y axes labels
suptitle = f'Client {client} - Actual max speed: {actual_max_speed:.4f}'
suptitle = f'{suptitle} - Augmented' if augmented else suptitle
suptitle = f'{suptitle}{total_actions}'
plt.suptitle(suptitle, fontsize=30)
plt.xlabel('Frame idx', fontsize=22)
plt.ylabel('Normed value', fontsize=22)
plt.xticks(list(range((client - 1) * num_frames_per_client,
client * num_frames_per_client + 1, len(df_client) // 20))) # ticks in 5% increments
# Fix the legend / remove duplicated areas and labels
hand, labl = ax.get_legend_handles_labels()
handout = []
lablout = []
for h, l in zip(hand, labl):
if l not in lablout:
lablout.append(l)
handout.append(h)
ax.legend(handout, lablout, fontsize='x-large')
sname = os.path.join(s_path, f'{dataset_name}_Client{client:02d}')
sname = f'{sname}-aug' if augmented else sname
plt.savefig(f'{sname}.png', dpi=300)
plt.close()
# Add summary and save it as a JSON file
actions_summary = {
'avg_no_action': total_action_counts[2.0] / num_clients,
'avg_turn_left': total_action_counts[3.0] / num_clients,
'avg_turn_right': total_action_counts[4.0] / num_clients,
'avg_continue_straight': total_action_counts[5.0] / num_clients
}
summary = {
'num_clients': num_clients,
'num_frames_per_client': num_frames_per_client,
'hours_per_client': num_frames_per_client / (20 * 60 * 60),
'total_action_counts': total_action_counts,
'actions_summary': actions_summary,
'max_speed_clients': max_speed_clients,
'idx_change_clients': idx_change_clients,
'dirs_clients': dirs_clients
}
with open(os.path.join(s_path, f'{dataset_name}-summary.json'), 'w') as f:
json.dump(summary, f, indent=4)
# ====================================================================
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, help='Path to the head of the dataset', required=True)
parser.add_argument('--filename', type=str, help='Name of file to save', default=None)
parser.add_argument('--preloads-name', type=str, help='Name of preload file', default=None)
parser.add_argument('--processes-per-cpu', '-proc', type=int, help='Processes per cpu (default: %(default)s)', default=2)
parser.add_argument('--speed-factor', '-sf', type=float, help='Speed factor to normalize data (default: %(default)s)', default=14.0)
parser.add_argument('--plot-clients', action='store_true', help='Add flag to plot the actions and speed of a client')
args = parser.parse_args()
# Create dir if it doesn't exist
if not os.path.exists(os.path.join(os.getcwd(), 'data_analysis')):
os.mkdir(os.path.join(os.getcwd(), 'data_analysis'))
print('Getting the dataframe...')
if args.preloads_name is not None:
# Preloaded data is augmented
df, augmented = get_augmented_df(preloads_name=args.preloads_name)
save_name = os.path.basename(args.preloads_name).split('.')[0]
else:
assert args.filename is not None
assert args.filename.endswith('.npy')
df, augmented = get_original_df(args.path, args.filename, args.processes_per_cpu)
save_name = os.path.basename(args.filename).split('.')[0]
# Create and save the violin plots
print('Plotting data...')
violin_plot(df, save_name, augmented)
if args.plot_clients:
print(f'Plotting actions taken by all clients in {args.path}...')
plot_clients(path=args.path, df=df, augmented=augmented, speed_factor=args.speed_factor)
print('Done!')
# ====================================================================
if __name__ == '__main__':
main()
# ====================================================================
| import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import json
import glob
import os
import argparse
from typing import Tuple, Union, List
from collections import Counter
from tqdm import tqdm
from multiprocessing import Pool
pd.options.mode.chained_assignment = None # default='warn'
# ====================================================================
def get_data(img_pth: Union[str, os.PathLike]) -> dict:
"""Get a single data from the given file.json path"""
with open(img_pth, 'r') as f:
data = json.load(f)
return data
def get_original_df(
path: Union[str, os.PathLike],
filename: str,
processes_per_cpu: int = 2) -> Tuple[pd.DataFrame, bool]:
"""Get a DataFrame from all the can_bus*.json files in the dataset"""
save_path = os.path.join(os.getcwd(), 'data_analysis', filename)
if os.path.isfile(save_path):
print('.npy file exists, loading it...')
data = list(np.load(save_path, allow_pickle=True))
else:
# Construct the dataset
print('.npy file not found, constructing it...')
all_data_paths = sorted(glob.glob(os.path.join(path, '**/can_bus*.json'), recursive=True))
with Pool(os.cpu_count() * processes_per_cpu) as p:
data = list(tqdm(p.imap(get_data, all_data_paths), total=len(all_data_paths)))
np.save(save_path, data)
# Create dataframe with the data
df = pd.DataFrame(data)
print(df.describe())
return df, False
# ====================================================================
def get_augmented_df(preloads_name: str) -> Tuple[pd.DataFrame, bool]:
"""Use the preloads file to load the data; will be augmented, as that's what we did"""
assert preloads_name.endswith('.npy')
data = np.load(os.path.join(os.getcwd(), '_preloads', preloads_name), allow_pickle=True)[1]
df = pd.DataFrame(data)
print(df.describe())
return df, True
# ====================================================================
def violin_plot(df: pd.DataFrame, save_name: str, augmented: bool) -> None:
"""Save violin plot for the interesting parameters using df"""
directions_dict = {'No Action': 2.0, 'Turn Left': 3.0, 'Turn Right': 4.0, 'Continue Straight': 5.0}
# Auxiliary function for setting the quartile lines
def set_lines(ax):
for l in ax.lines:
l.set_linestyle('--')
l.set_linewidth(0.6)
l.set_color('white')
l.set_alpha(0.7)
for l in ax.lines[1::3]:
l.set_linestyle('-')
l.set_linewidth(1.3)
l.set_color('black')
l.set_alpha(0.8)
for key in directions_dict:
# Get respective subset of the dataframe
data = df[df['directions'] == directions_dict[key]]
fig = plt.figure(figsize=(8, 6))
gs = fig.add_gridspec(1, 4)
fig.add_subplot(gs[0, 0])
ax = sns.violinplot(y='steer', data=data, color='r', inner='quartile')
set_lines(ax)
fig.add_subplot(gs[0, 1])
ax = sns.violinplot(y='throttle', data=data, color='g', inner='quartile')
set_lines(ax)
fig.add_subplot(gs[0, 2])
ax = sns.violinplot(y='brake', data=data, color='b', inner='quartile')
set_lines(ax)
fig.add_subplot(gs[0, 3])
ax = sns.violinplot(y='speed', data=data, color='m', inner='quartile')
set_lines(ax)
# When using tight layout, we need the title to be spaced accordingly
fig.tight_layout()
fig.subplots_adjust(top=0.88)
stitle = f'Direction: {key} - $N={len(data)}$ - ${100 * len(data)/len(df):6.3f}$% of total'
stitle = f'{stitle} - Augmented' if augmented else stitle
fig.suptitle(stitle, fontsize=16)
fname = f'{save_name}-{key.replace(" ", "")}'
fname = f'{fname}-aug' if augmented else fname
fig_name = os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots', f'{fname}.png')
os.makedirs(os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots'), exist_ok=True)
plt.savefig(fig_name)
plt.close()
# ====================================================================
def plot_clients(path: Union[str, os.PathLike], df: pd.DataFrame, augmented: bool, speed_factor: float) -> None:
"""Plot the steer, throttle, brake, and speed of a client during its data collection"""
# Some sanity check
if path.endswith(os.sep):
path = path[:-1]
# Get dataset name and make the necessary directories
dataset_name = os.path.basename(path)
s_path = os.path.join(os.getcwd(), 'data_analysis', dataset_name, 'clients')
os.makedirs(s_path, exist_ok=True)
# Get the number of clients/cars that collected the data
clients = glob.glob(os.path.join(path, '**/*'))
clients = [cl for cl in clients if os.path.isdir(cl)] # Remove path of metadata.json
num_clients = len(clients)
# Total number of frames and for a single client
num_frames = len(df)
num_frames_per_client = num_frames // num_clients
# Aux function
def get_change_locs(df: pd.DataFrame, cli: int) -> Tuple[List[int], List[float]]:
"""Get the index and directions from the df of the actions taken by the client"""
df['directions_str'] = df['directions'].astype(str) # In order to compare, turn directions into a string
# Shift directions column by 1 (filling the top with the head), and compare to the original
df['change'] = df['directions_str'].shift(1, fill_value=df['directions_str'].head(1)) != df['directions_str']
# Get the rows where there's a change
index_change = list(df.loc[df['change'] == True].index.values)
# Add the first frame
index_change = [(cli - 1) * len(df)] + index_change
# For these indexes, get the value of the direction
dirs = list(df['directions'][index_change].values)
# Add the last frame
index_change = index_change + [cli * len(df) - 1]
return index_change, dirs
# Dictionaries containing the name and color for plotting the direction given to the car
my_labels = {2.0: 'No Action', 3.0: 'Turn Left', 4.0: 'Turn Right', 5.0: 'Continue Straight'}
colors = {2.0: 'gold', 3.0: 'gray', 4.0: 'cyan', 5.0: 'magenta'}
# Initialize the total counts per action
total_action_counts = Counter({2.0: 0, 3.0: 0, 4.0: 0, 5.0: 0})
max_speed_clients = {}
idx_change_clients = {}
dirs_clients = {}
# Make a plot for each client
for client in tqdm(range(1, num_clients + 1), total=num_clients, unit='clients'):
if augmented:
# Dataframe will have augmented data, which uses center, left, right, center, ... data
df_client = df[(client - 1) * num_frames_per_client: client * num_frames_per_client: 3]
else:
df_client = df[(client - 1) * num_frames_per_client: client * num_frames_per_client]
# Augmented data will have been normalized already
df_client['speed'] = df_client['speed'].div(speed_factor) # normalize to range [0, 1]
# The actual max speed (see if it differs from collected data)
actual_max_speed = df_client['speed'].max()
max_speed_clients[client] = actual_max_speed
# Build the plot
fig, ax = plt.subplots(figsize=(48, 16))
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
df_client.plot(y=['steer', 'throttle', 'brake', 'speed'], ax=ax)
# Set the area colors for when an direction is taken
idx_change, dirs = get_change_locs(df_client, client)
for idx, dir in enumerate(dirs):
ax.axvspan(idx_change[idx], idx_change[idx + 1], facecolor=colors[dir], alpha=0.5, label=my_labels[dir])
# Save these index and directions for each client
idx_change_clients[f'client_{client:02d}'] = [int(idx) for idx in idx_change]
dirs_clients[f'client_{client:02d}'] = [float(d) for d in dirs]
# Count the directions taken by the client
dirs_count = Counter(dirs)
# Add this to the total for the whole dataset
total_action_counts += dirs_count
# Add the counts to the title
total_actions = ''
for key in my_labels:
total_actions += f' - {my_labels[key]}: {dirs_count[key]}'
# Set title and x and y axes labels
suptitle = f'Client {client} - Actual max speed: {actual_max_speed:.4f}'
suptitle = f'{suptitle} - Augmented' if augmented else suptitle
suptitle = f'{suptitle}{total_actions}'
plt.suptitle(suptitle, fontsize=30)
plt.xlabel('Frame idx', fontsize=22)
plt.ylabel('Normed value', fontsize=22)
plt.xticks(list(range((client - 1) * num_frames_per_client,
client * num_frames_per_client + 1, len(df_client) // 20))) # ticks in 5% increments
# Fix the legend / remove duplicated areas and labels
hand, labl = ax.get_legend_handles_labels()
handout = []
lablout = []
for h, l in zip(hand, labl):
if l not in lablout:
lablout.append(l)
handout.append(h)
ax.legend(handout, lablout, fontsize='x-large')
sname = os.path.join(s_path, f'{dataset_name}_Client{client:02d}')
sname = f'{sname}-aug' if augmented else sname
plt.savefig(f'{sname}.png', dpi=300)
plt.close()
# Add summary and save it as a JSON file
actions_summary = {
'avg_no_action': total_action_counts[2.0] / num_clients,
'avg_turn_left': total_action_counts[3.0] / num_clients,
'avg_turn_right': total_action_counts[4.0] / num_clients,
'avg_continue_straight': total_action_counts[5.0] / num_clients
}
summary = {
'num_clients': num_clients,
'num_frames_per_client': num_frames_per_client,
'hours_per_client': num_frames_per_client / (20 * 60 * 60),
'total_action_counts': total_action_counts,
'actions_summary': actions_summary,
'max_speed_clients': max_speed_clients,
'idx_change_clients': idx_change_clients,
'dirs_clients': dirs_clients
}
with open(os.path.join(s_path, f'{dataset_name}-summary.json'), 'w') as f:
json.dump(summary, f, indent=4)
# ====================================================================
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, help='Path to the head of the dataset', required=True)
parser.add_argument('--filename', type=str, help='Name of file to save', default=None)
parser.add_argument('--preloads-name', type=str, help='Name of preload file', default=None)
parser.add_argument('--processes-per-cpu', '-proc', type=int, help='Processes per cpu (default: %(default)s)', default=2)
parser.add_argument('--speed-factor', '-sf', type=float, help='Speed factor to normalize data (default: %(default)s)', default=14.0)
parser.add_argument('--plot-clients', action='store_true', help='Add flag to plot the actions and speed of a client')
args = parser.parse_args()
# Create dir if it doesn't exist
if not os.path.exists(os.path.join(os.getcwd(), 'data_analysis')):
os.mkdir(os.path.join(os.getcwd(), 'data_analysis'))
print('Getting the dataframe...')
if args.preloads_name is not None:
# Preloaded data is augmented
df, augmented = get_augmented_df(preloads_name=args.preloads_name)
save_name = os.path.basename(args.preloads_name).split('.')[0]
else:
assert args.filename is not None
assert args.filename.endswith('.npy')
df, augmented = get_original_df(args.path, args.filename, args.processes_per_cpu)
save_name = os.path.basename(args.filename).split('.')[0]
# Create and save the violin plots
print('Plotting data...')
violin_plot(df, save_name, augmented)
if args.plot_clients:
print(f'Plotting actions taken by all clients in {args.path}...')
plot_clients(path=args.path, df=df, augmented=augmented, speed_factor=args.speed_factor)
print('Done!')
# ====================================================================
if __name__ == '__main__':
main()
# ====================================================================
|
from gpiozero import CPUTemperature
from tabulate import tabulate
from math import floor
import numpy as np
import termplotlib as tpl
import time
import shutil
def roundNum(num, digits):
return floor(num * 10 ** digits) / (10 ** digits)
def CtoF(temp):
fahrenheit = (temp + 1.8) + 32
rounded = roundNum(fahrenheit, 3)
return str(rounded)
cpu = CPUTemperature()
colors = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKCYAN': '\033[96m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
}
times = [0]
temps = [cpu.temperature]
while True:
tickRate = 2 #takes data every {tickRate} seconds
minutes = 5
numPoints = int(60 / tickRate * minutes)
width, height = shutil.get_terminal_size()
if len(temps) > numPoints:
temps = temps[-numPoints:]
times = times[-numPoints:]
temps.append(cpu.temperature)
times.append(times[-1] + tickRate)
averageTemp = roundNum(np.average(temps), 3)
cpuTempColor = ''
if cpu.temperature < 50:
cpuTempColor = colors['OKBLUE']
elif cpu.temperature < 65:
cpuTempColor = colors['OKCYAN']
elif cpu.temperature < 80:
cpuTempColor = colors['OKGREEN']
else:
cpuTempColor = colors['FAIL'] + colors['BOLD']
table = [[
f"{cpuTempColor}{str(cpu.temperature)}\N{DEGREE SIGN}C / {CtoF(cpu.temperature)}\N{DEGREE SIGN}F\n",
f"{colors["OKGREEN"]}{averageTemp} / {CtoF(averageTemp)}\N{DEGREE SIGN}F\n",
f"{colors["OKGREEN"]}{np.amax(temps)} / {CtoF(np.amax(temps))}\N{DEGREE SIGN}F\n",
f"{colors["OKGREEN"]}{np.amin(temps)} / {CtoF(np.amin(temps))}\N{DEGREE SIGN}F"
]]
headers = [
f"{colors["OKGREEN"]}CPU TEMPERATURE",
f"{colors["OKGREEN"]}Average Temperature (last {minutes} minutes)",
f"{colors["FAIL"]}Peak Temperature (last {minutes} minutes)",
f"{colors["OKCYAN"]}Lowest Temperature (last {minutes} minutes){colors["OKGREEN"]}", #OKGREEN at end is to make sure table lines are green, not cyan
]
print('\n')
fig = tpl.figure()
plotConfig = {
'width': width-2,
'height': height-5,
'label': 'CPU Temperature',
'xlabel': 'Time (s)',
'xlim': [times[0], times[-1:]],
'ylim': [np.amin(temps)-2, np.amax(temps)+2],
'title': f"CPU Temperature over last {minutes} minutes",
}
fig.plot(times, temps, **plotConfig)
fig.show()
# width=width-2, height=height-5, label='CPU Temperature', xlabel='Time (s)', , ylim=[np.amin(temps)-2, np.amax(temps)+2], title='CPU Temperature over last 5 minutes'
print('\n')
print(tabulate(table, headers=headers))
time.sleep(tickRate) | from gpiozero import CPUTemperature
from tabulate import tabulate
from math import floor
import numpy as np
import termplotlib as tpl
import time
import shutil
def roundNum(num, digits):
return floor(num * 10 ** digits) / (10 ** digits)
def CtoF(temp):
fahrenheit = (temp + 1.8) + 32
rounded = roundNum(fahrenheit, 3)
return str(rounded)
cpu = CPUTemperature()
colors = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKCYAN': '\033[96m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
}
times = [0]
temps = [cpu.temperature]
while True:
tickRate = 2 #takes data every {tickRate} seconds
minutes = 5
numPoints = int(60 / tickRate * minutes)
width, height = shutil.get_terminal_size()
if len(temps) > numPoints:
temps = temps[-numPoints:]
times = times[-numPoints:]
temps.append(cpu.temperature)
times.append(times[-1] + tickRate)
averageTemp = roundNum(np.average(temps), 3)
cpuTempColor = ''
if cpu.temperature < 50:
cpuTempColor = colors['OKBLUE']
elif cpu.temperature < 65:
cpuTempColor = colors['OKCYAN']
elif cpu.temperature < 80:
cpuTempColor = colors['OKGREEN']
else:
cpuTempColor = colors['FAIL'] + colors['BOLD']
table = [[
f"{cpuTempColor}{str(cpu.temperature)}\N{DEGREE SIGN}C / {CtoF(cpu.temperature)}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{averageTemp} / {CtoF(averageTemp)}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{np.amax(temps)} / {CtoF(np.amax(temps))}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{np.amin(temps)} / {CtoF(np.amin(temps))}\N{DEGREE SIGN}F"
]]
headers = [
f"{colors['OKGREEN']}CPU TEMPERATURE",
f"{colors['OKGREEN']}Average Temperature (last {minutes} minutes)",
f"{colors['FAIL']}Peak Temperature (last {minutes} minutes)",
f"{colors['OKCYAN']}Lowest Temperature (last {minutes} minutes){colors['OKGREEN']}", #OKGREEN at end is to make sure table lines are green, not cyan
]
print('\n')
fig = tpl.figure()
plotConfig = {
'width': width-2,
'height': height-5,
'label': 'CPU Temperature',
'xlabel': 'Time (s)',
'xlim': [times[0], times[-1:]],
'ylim': [np.amin(temps)-2, np.amax(temps)+2],
'title': f"CPU Temperature over last {minutes} minutes",
}
fig.plot(times, temps, **plotConfig)
fig.show()
# width=width-2, height=height-5, label='CPU Temperature', xlabel='Time (s)', , ylim=[np.amin(temps)-2, np.amax(temps)+2], title='CPU Temperature over last 5 minutes'
print('\n')
print(tabulate(table, headers=headers))
time.sleep(tickRate) |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 23:07:15 2019
@author: ydima
"""
import logging
import os
from pathlib import Path
import random
import shlex
import string
from subprocess import PIPE, Popen
import tempfile
from typing import Dict, List, Optional, Union
import pandas as pd
from .constants import (
DIRECTIONS,
IN,
IS_WIN32,
NEWLINE,
OUT,
QUERY,
QUERYOUT,
SQLCHAR,
TABLE,
VIEW,
BCPandasException,
BCPandasValueError,
read_data_settings,
sql_collation,
)
logger = logging.getLogger(__name__)
def bcp(
sql_item: str,
direction: str,
flat_file: str,
creds,
sql_type: str = "table",
schema: str = "dbo",
format_file_path: str = None,
batch_size: int = None,
col_delimiter: str = None,
row_terminator: str = None,
bcp_path: Union[str, Path] = None,
error_file_path: str = None
):
"""
See https://docs.microsoft.com/en-us/sql/tools/bcp-utility
"""
combos = {TABLE: [IN, OUT], QUERY: [QUERYOUT], VIEW: [IN, OUT]}
direc = direction.lower()
# validation
if direc not in DIRECTIONS:
raise BCPandasValueError(
f"Param 'direction' must be one of {DIRECTIONS}, you passed {direc}"
)
if direc not in combos[sql_type]:
raise BCPandasValueError(
f"Wrong combo of direction and SQL object, you passed {sql_type} and {direc} ."
)
# auth
if creds.with_krb_auth:
auth = ["-T"]
else:
auth = ["-U", creds.username, "-P", creds.password]
# prepare SQL item string
if sql_type == QUERY:
# remove newlines for queries, otherwise messes up BCP
sql_item_string = quote_this("".join(sql_item.splitlines()))
else:
sql_item_string = f"{schema}.{sql_item}"
# construct BCP command
bcp_command = [
"bcp" if bcp_path is None else quote_this(str(bcp_path)),
sql_item_string,
direc,
flat_file,
"-S",
creds.server,
"-d",
creds.database,
"-q", # Executes the SET QUOTED_IDENTIFIERS ON statement, needed for Azure SQL DW
"-e",
error_file_path
] + auth
if batch_size:
bcp_command += ["-b", str(batch_size)]
# formats
if direc == IN:
bcp_command += ["-f", format_file_path]
elif direc in (OUT, QUERYOUT):
bcp_command += [
"-c", # marking as character data, not Unicode (maybe make as param?)
quote_this(
f"-t{read_data_settings["delimiter"] if col_delimiter is None else col_delimiter}"
),
quote_this(
f"-r{read_data_settings["newline"] if row_terminator is None else row_terminator}"
),
]
# execute
bcp_command_log = [c if c != creds.password else "[REDACTED]" for c in bcp_command]
logger.info(f"Executing BCP command now... \nBCP command is: {bcp_command_log}")
ret_code = run_cmd(bcp_command)
if ret_code:
raise BCPandasException(f"Bcp command failed with exit code {ret_code}")
def get_temp_file() -> str:
"""
Returns full path to a temporary file without creating it.
"""
tmp_dir = tempfile.gettempdir()
file_path = os.path.join(
tmp_dir, "".join(random.choices(string.ascii_letters + string.digits, k=21))
)
return file_path
def _escape(input_string: str) -> str:
"""
Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25
"""
return (
input_string.replace('"', '\\"')
.replace("'", "\\'")
.replace("\r", "\\r")
.replace("\n", "\\n")
)
def build_format_file(
df: pd.DataFrame, delimiter: str, db_cols_order: Optional[Dict[str, int]] = None
) -> str:
"""
Creates the non-xml SQL format file. Puts 4 spaces between each section.
See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server
for the specification of the file.
# TODO add params/options to control:
# - the char type (not just SQLCHAR),
Parameters
----------
df : pandas DataFrame
delimiter : a valid delimiter character
db_cols_order : dict, optional
Dict of {database column name -> ordinal position of the column}.
Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table.
1-indexed, so the first columns is 1, second is 2, etc.
Only needed if the order of the columns in the dataframe doesn't match the database.
Returns
-------
A string containing the format file
"""
_space = " " * 4
format_file_str = f"9.0\n{len(df.columns)}\n" # Version and Number of columns
for col_num, col_name in enumerate(df.columns, start=1):
# last col gets a newline sep
_delim = delimiter if col_num != len(df.columns) else NEWLINE
_line = _space.join(
[
str(col_num), # Host file field order
SQLCHAR, # Host file data type
str(0), # Prefix length
str(0), # Host file data length
f'"{_escape(_delim)}"', # Terminator (see note below)
str(
col_num if not db_cols_order else db_cols_order[str(col_name)]
), # Server column order
str(col_name), # Server column name, optional as long as not blank
sql_collation, # Column collation
"\n",
]
)
format_file_str += _line
# FYI very important to surround the Terminator with quotes, otherwise BCP fails with:
# "Unexpected EOF encountered in BCP data-file". Hugely frustrating bug.
return format_file_str
def quote_this(this: str, skip: bool = False) -> str:
"""
OS-safe way to quote a string.
Returns the string with quotes around it.
On Windows ~~it's double quotes~~ we skip quoting,
on Linux it's single quotes.
"""
if isinstance(this, str):
if IS_WIN32:
return this # TODO maybe change?
else:
return shlex.quote(this)
else:
return this
def run_cmd(cmd: List[str]) -> int:
"""
Runs the given command.
Prints STDOUT in real time, prints STDERR when command is complete,
and logs both STDOUT and STDERR.
Paramters
---------
cmd : list of str
The command to run, to be submitted to `subprocess.Popen()`
Returns
-------
The exit code of the command
"""
if IS_WIN32:
with_shell = False
else:
with_shell = True
cmd = " ".join(cmd) # type: ignore
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, encoding="utf-8", errors="utf-8", shell=with_shell,)
# live stream STDOUT
while True:
outs = proc.stdout.readline()
if outs:
print(outs, end="")
logger.info(outs)
if proc.poll() is not None and outs == "":
break
errs = proc.stderr.readlines()
if errs:
print(errs, end="")
logger.error(errs)
return proc.returncode
| # -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 23:07:15 2019
@author: ydima
"""
import logging
import os
from pathlib import Path
import random
import shlex
import string
from subprocess import PIPE, Popen
import tempfile
from typing import Dict, List, Optional, Union
import pandas as pd
from .constants import (
DIRECTIONS,
IN,
IS_WIN32,
NEWLINE,
OUT,
QUERY,
QUERYOUT,
SQLCHAR,
TABLE,
VIEW,
BCPandasException,
BCPandasValueError,
read_data_settings,
sql_collation,
)
logger = logging.getLogger(__name__)
def bcp(
sql_item: str,
direction: str,
flat_file: str,
creds,
sql_type: str = "table",
schema: str = "dbo",
format_file_path: str = None,
batch_size: int = None,
col_delimiter: str = None,
row_terminator: str = None,
bcp_path: Union[str, Path] = None,
error_file_path: str = None
):
"""
See https://docs.microsoft.com/en-us/sql/tools/bcp-utility
"""
combos = {TABLE: [IN, OUT], QUERY: [QUERYOUT], VIEW: [IN, OUT]}
direc = direction.lower()
# validation
if direc not in DIRECTIONS:
raise BCPandasValueError(
f"Param 'direction' must be one of {DIRECTIONS}, you passed {direc}"
)
if direc not in combos[sql_type]:
raise BCPandasValueError(
f"Wrong combo of direction and SQL object, you passed {sql_type} and {direc} ."
)
# auth
if creds.with_krb_auth:
auth = ["-T"]
else:
auth = ["-U", creds.username, "-P", creds.password]
# prepare SQL item string
if sql_type == QUERY:
# remove newlines for queries, otherwise messes up BCP
sql_item_string = quote_this("".join(sql_item.splitlines()))
else:
sql_item_string = f"{schema}.{sql_item}"
# construct BCP command
bcp_command = [
"bcp" if bcp_path is None else quote_this(str(bcp_path)),
sql_item_string,
direc,
flat_file,
"-S",
creds.server,
"-d",
creds.database,
"-q", # Executes the SET QUOTED_IDENTIFIERS ON statement, needed for Azure SQL DW
"-e",
error_file_path
] + auth
if batch_size:
bcp_command += ["-b", str(batch_size)]
# formats
if direc == IN:
bcp_command += ["-f", format_file_path]
elif direc in (OUT, QUERYOUT):
bcp_command += [
"-c", # marking as character data, not Unicode (maybe make as param?)
quote_this(
f"-t{read_data_settings['delimiter'] if col_delimiter is None else col_delimiter}"
),
quote_this(
f"-r{read_data_settings['newline'] if row_terminator is None else row_terminator}"
),
]
# execute
bcp_command_log = [c if c != creds.password else "[REDACTED]" for c in bcp_command]
logger.info(f"Executing BCP command now... \nBCP command is: {bcp_command_log}")
ret_code = run_cmd(bcp_command)
if ret_code:
raise BCPandasException(f"Bcp command failed with exit code {ret_code}")
def get_temp_file() -> str:
"""
Returns full path to a temporary file without creating it.
"""
tmp_dir = tempfile.gettempdir()
file_path = os.path.join(
tmp_dir, "".join(random.choices(string.ascii_letters + string.digits, k=21))
)
return file_path
def _escape(input_string: str) -> str:
"""
Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25
"""
return (
input_string.replace('"', '\\"')
.replace("'", "\\'")
.replace("\r", "\\r")
.replace("\n", "\\n")
)
def build_format_file(
df: pd.DataFrame, delimiter: str, db_cols_order: Optional[Dict[str, int]] = None
) -> str:
"""
Creates the non-xml SQL format file. Puts 4 spaces between each section.
See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server
for the specification of the file.
# TODO add params/options to control:
# - the char type (not just SQLCHAR),
Parameters
----------
df : pandas DataFrame
delimiter : a valid delimiter character
db_cols_order : dict, optional
Dict of {database column name -> ordinal position of the column}.
Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table.
1-indexed, so the first columns is 1, second is 2, etc.
Only needed if the order of the columns in the dataframe doesn't match the database.
Returns
-------
A string containing the format file
"""
_space = " " * 4
format_file_str = f"9.0\n{len(df.columns)}\n" # Version and Number of columns
for col_num, col_name in enumerate(df.columns, start=1):
# last col gets a newline sep
_delim = delimiter if col_num != len(df.columns) else NEWLINE
_line = _space.join(
[
str(col_num), # Host file field order
SQLCHAR, # Host file data type
str(0), # Prefix length
str(0), # Host file data length
f'"{_escape(_delim)}"', # Terminator (see note below)
str(
col_num if not db_cols_order else db_cols_order[str(col_name)]
), # Server column order
str(col_name), # Server column name, optional as long as not blank
sql_collation, # Column collation
"\n",
]
)
format_file_str += _line
# FYI very important to surround the Terminator with quotes, otherwise BCP fails with:
# "Unexpected EOF encountered in BCP data-file". Hugely frustrating bug.
return format_file_str
def quote_this(this: str, skip: bool = False) -> str:
"""
OS-safe way to quote a string.
Returns the string with quotes around it.
On Windows ~~it's double quotes~~ we skip quoting,
on Linux it's single quotes.
"""
if isinstance(this, str):
if IS_WIN32:
return this # TODO maybe change?
else:
return shlex.quote(this)
else:
return this
def run_cmd(cmd: List[str]) -> int:
"""
Runs the given command.
Prints STDOUT in real time, prints STDERR when command is complete,
and logs both STDOUT and STDERR.
Paramters
---------
cmd : list of str
The command to run, to be submitted to `subprocess.Popen()`
Returns
-------
The exit code of the command
"""
if IS_WIN32:
with_shell = False
else:
with_shell = True
cmd = " ".join(cmd) # type: ignore
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, encoding="utf-8", errors="utf-8", shell=with_shell,)
# live stream STDOUT
while True:
outs = proc.stdout.readline()
if outs:
print(outs, end="")
logger.info(outs)
if proc.poll() is not None and outs == "":
break
errs = proc.stderr.readlines()
if errs:
print(errs, end="")
logger.error(errs)
return proc.returncode
|
from BiblioAlly import catalog as cat, domain, translator as bibtex
class IeeeXTranslator(bibtex.Translator):
def _document_from_proto_document(self, proto_document):
bibtex.Translator._translate_kind(proto_document)
kind = proto_document['type']
fields = proto_document['field']
if 'title' in fields:
title = self._unbroken(self._uncurlied(fields['title']))
else:
title = ''
if 'abstract' in fields:
abstract = self._unbroken(self._uncurlied(fields['abstract']))
else:
abstract = ''
year = int(fields['year'])
author_field = ''
if 'author' in fields:
author_field = self._unbroken(self._all_uncurly(fields['author'].replace('}and', ' and')))
if author_field == '':
author_field = 'Author, Unamed'
authors = self._authors_from_field(author_field)
affiliations = self._expand_affiliations(None, authors)
keywords = []
if 'keywords' in fields:
all_keywords = self._all_uncurly(fields['keywords']).split(';')
keyword_names = set()
for keyword_name in all_keywords:
sub_keyword_names = keyword_name.split(',')
for sub_keyword_name in sub_keyword_names:
name = sub_keyword_name.strip().capitalize()
if name not in keyword_names:
keyword_names.add(name)
keyword_names = list(keyword_names)
for keyword_name in keyword_names:
keywords.append(domain.Keyword(name=keyword_name))
document = domain.Document(proto_document['id'].strip(), kind, title, abstract, keywords, year, affiliations)
document.generator = "IEEE Xplore"
if 'doi' in fields:
document.doi = self._uncurlied(fields['doi'])
if 'journal' in fields:
document.journal = self._uncurlied(fields['journal'])
elif 'booktitle' in fields and kind == 'inproceedings':
document.journal = self._uncurlied(fields['booktitle'])
if 'number' in fields:
if len(self._uncurlied(fields['number'])) > 0:
document.number = self._uncurlied(fields['number'])
if 'pages' in fields:
if len(self._uncurlied(fields['pages'])) > 0:
document.pages = self._uncurlied(fields['pages'])
if 'url' in fields:
if len(self._uncurlied(fields['url'])) > 0:
document.url = self._uncurlied(fields['url'])
if 'volume' in fields:
if len(self._uncurlied(fields['volume'])) > 0:
document.volume = self._uncurlied(fields['volume'])
return document
def _proto_document_from_document(self, document: domain.Document):
kind = document.kind
if kind == 'proceedings':
kind = 'inproceedings'
fields = dict()
fields['external_key'] = document.external_key
doc_authors = document.authors
doc_authors.sort(key=lambda doc_author: doc_author.first)
doc_authors.reverse()
all_authors = [(doc_author.author.long_name if doc_author.author.long_name is not None
else doc_author.author.short_name) for doc_author in doc_authors]
fields['author'] = self._curly(all_authors, separator=' and ')
if document.journal is not None:
if document.kind == 'article':
fields['journal'] = self._curly(str(document.journal))
else:
fields['booktitle'] = self._curly(str(document.journal))
fields['title'] = self._curly(document.title)
affiliations = []
for doc_author in doc_authors:
institution = doc_author.institution
if institution is not None:
affiliation = ', '.join([institution.name, institution.country])
affiliations.append(affiliation)
if len(affiliations) > 0:
fields['affiliation'] = self._curly(affiliations, '; ')
fields['year'] = self._curly(str(document.year))
if document.international_number is not None:
fields['ISSN'] = self._curly(str(document.international_number))
if document.publisher is not None:
fields['publisher'] = self._curly(str(document.publisher))
if document.address is not None:
fields['address'] = self._curly(str(document.address))
if document.doi is not None:
fields['doi'] = self._curly(str(document.doi))
if document.international_number is not None:
fields['url'] = self._curly(str(document.url))
fields['abstract'] = self._curly(document.abstract)
if document.pages is not None:
fields['pages'] = self._curly(str(document.pages))
if document.volume is not None:
fields['volume'] = self._curly(str(document.volume))
if document.number is not None:
fields['number'] = self._curly(str(document.number))
if document.language is not None:
fields['language'] = self._curly(str(document.language))
keywords = [keyword.name for keyword in document.keywords]
fields['keywords'] = self._curly(keywords, ';')
if len(document.references) > 0:
fields['references'] = self._curly('; '.join(document.references))
if document.document_type is not None:
fields['document_type'] = self._curly(document.document_type)
fields['source'] = self._curly(document.generator)
proto_document = {
'type': kind,
'fields': fields
}
return proto_document
def _as_bibtex(self, proto_document):
kind = proto_document['type'].upper()
fields = proto_document['fields']
external_key = fields['external_key']
del fields['external_key']
key_value = []
for key, value in fields.items():
key_value.append(f'{key}={value}')
bibtex = f'@{kind}' + '{' + f'{external_key},\n' + ',\n'.join(key_value) + '\n}'
return bibtex
IeeeXplore = "IeeeXplore"
cat.Catalog.translators[IeeeXplore] = IeeeXTranslator
| from BiblioAlly import catalog as cat, domain, translator as bibtex
class IeeeXTranslator(bibtex.Translator):
def _document_from_proto_document(self, proto_document):
bibtex.Translator._translate_kind(proto_document)
kind = proto_document['type']
fields = proto_document['field']
if 'title' in fields:
title = self._unbroken(self._uncurlied(fields['title']))
else:
title = ''
if 'abstract' in fields:
abstract = self._unbroken(self._uncurlied(fields['abstract']))
else:
abstract = ''
year = int(fields['year'])
author_field = ''
if 'author' in fields:
author_field = self._unbroken(self._all_uncurly(fields['author'].replace('}and', ' and')))
if author_field == '':
author_field = 'Author, Unamed'
authors = self._authors_from_field(author_field)
affiliations = self._expand_affiliations(None, authors)
keywords = []
if 'keywords' in fields:
all_keywords = self._all_uncurly(fields['keywords']).split(';')
keyword_names = set()
for keyword_name in all_keywords:
sub_keyword_names = keyword_name.split(',')
for sub_keyword_name in sub_keyword_names:
name = sub_keyword_name.strip().capitalize()
if name not in keyword_names:
keyword_names.add(name)
keyword_names = list(keyword_names)
for keyword_name in keyword_names:
keywords.append(domain.Keyword(name=keyword_name))
document = domain.Document(proto_document['id'].strip(), kind, title, abstract, keywords, year, affiliations)
document.generator = "IEEE Xplore"
if 'doi' in fields:
document.doi = self._uncurlied(fields['doi'])
if 'journal' in fields:
document.journal = self._uncurlied(fields['journal'])
elif 'booktitle' in fields and kind == 'inproceedings':
document.journal = self._uncurlied(fields['booktitle'])
if 'number' in fields:
if len(self._uncurlied(fields['number'])) > 0:
document.number = self._uncurlied(fields['number'])
if 'pages' in fields:
if len(self._uncurlied(fields['pages'])) > 0:
document.pages = self._uncurlied(fields['pages'])
if 'url' in fields:
if len(self._uncurlied(fields['url'])) > 0:
document.url = self._uncurlied(fields['url'])
if 'volume' in fields:
if len(self._uncurlied(fields['volume'])) > 0:
document.volume = self._uncurlied(fields['volume'])
return document
def _proto_document_from_document(self, document: domain.Document):
kind = document.kind
if kind == 'proceedings':
kind = 'inproceedings'
fields = dict()
fields['external_key'] = document.external_key
doc_authors = document.authors
doc_authors.sort(key=lambda doc_author: doc_author.first)
doc_authors.reverse()
all_authors = [(doc_author.author.long_name if doc_author.author.long_name is not None
else doc_author.author.short_name) for doc_author in doc_authors]
fields['author'] = self._curly(all_authors, separator=' and ')
if document.journal is not None:
if document.kind == 'article':
fields['journal'] = self._curly(str(document.journal))
else:
fields['booktitle'] = self._curly(str(document.journal))
fields['title'] = self._curly(document.title)
affiliations = []
for doc_author in doc_authors:
institution = doc_author.institution
if institution is not None:
affiliation = ', '.join([institution.name, institution.country])
affiliations.append(affiliation)
if len(affiliations) > 0:
fields['affiliation'] = self._curly(affiliations, '; ')
fields['year'] = self._curly(str(document.year))
if document.international_number is not None:
fields['ISSN'] = self._curly(str(document.international_number))
if document.publisher is not None:
fields['publisher'] = self._curly(str(document.publisher))
if document.address is not None:
fields['address'] = self._curly(str(document.address))
if document.doi is not None:
fields['doi'] = self._curly(str(document.doi))
if document.international_number is not None:
fields['url'] = self._curly(str(document.url))
fields['abstract'] = self._curly(document.abstract)
if document.pages is not None:
fields['pages'] = self._curly(str(document.pages))
if document.volume is not None:
fields['volume'] = self._curly(str(document.volume))
if document.number is not None:
fields['number'] = self._curly(str(document.number))
if document.language is not None:
fields['language'] = self._curly(str(document.language))
keywords = [keyword.name for keyword in document.keywords]
fields['keywords'] = self._curly(keywords, ';')
if len(document.references) > 0:
fields['references'] = self._curly('; '.join(document.references))
if document.document_type is not None:
fields['document_type'] = self._curly(document.document_type)
fields['source'] = self._curly(document.generator)
proto_document = {
'type': kind,
'fields': fields
}
return proto_document
def _as_bibtex(self, proto_document):
kind = proto_document['type'].upper()
fields = proto_document['fields']
external_key = fields['external_key']
del fields['external_key']
key_value = []
for key, value in fields.items():
key_value.append(f'{key}={value}')
bibtex = f'@{kind}' + '{' + f'{external_key},\n' + ',\n'.join(key_value) + '\n}'
return bibtex
IeeeXplore = "IeeeXplore"
cat.Catalog.translators[IeeeXplore] = IeeeXTranslator
|
import pytest
from receptor.router import MeshRouter
test_networks = [
(
[
("a", "b", 1),
("a", "d", 1),
("a", "f", 1),
("b", "d", 1),
("b", "c", 1),
("c", "e", 1),
("c", "h", 1),
("c", "j", 1),
("e", "f", 1),
("e", "g", 1),
("e", "h", 1),
("f", "g", 1),
("g", "h", 1),
("h", "j", 1),
("h", "k", 1),
("j", "k", 1),
("j", "m", 1),
("l", "m", 1),
],
[("a", "f", "f"), ("a", "m", "b"), ("h", "d", "c")],
[("a", {"b", "d", "f"}), ("f", {"a", "e", "g"}), ("j", {"c", "h", "k", "m"})],
),
(
[("a", "b", 1), ("b", "c", 1), ("c", "d", 1), ("d", "e", 1), ("e", "f", 1)],
[("a", "f", "b"), ("c", "a", "b"), ("f", "c", "e")],
[("a", {"b"}), ("f", {"e"}), ("c", {"b", "d"})],
),
]
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_next_hop(edges, expected_next_hops, expected_neighbors):
for node_id, remote, enh in expected_next_hops:
r = MeshRouter(node_id=node_id)
r.add_or_update_edges(edges)
assert r.next_hop(remote) == enh
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_neighbors(edges, expected_next_hops, expected_neighbors):
r = MeshRouter(node_id=edges[0][0])
r.add_or_update_edges(edges)
for node_id, neighbors in expected_neighbors:
assert r.get_neighbors(node_id) == neighbors
| import pytest
from receptor.router import MeshRouter
test_networks = [
(
[
("a", "b", 1),
("a", "d", 1),
("a", "f", 1),
("b", "d", 1),
("b", "c", 1),
("c", "e", 1),
("c", "h", 1),
("c", "j", 1),
("e", "f", 1),
("e", "g", 1),
("e", "h", 1),
("f", "g", 1),
("g", "h", 1),
("h", "j", 1),
("h", "k", 1),
("j", "k", 1),
("j", "m", 1),
("l", "m", 1),
],
[("a", "f", "f"), ("a", "m", "b"), ("h", "d", "c")],
[("a", {"b", "d", "f"}), ("f", {"a", "e", "g"}), ("j", {"c", "h", "k", "m"})],
),
(
[("a", "b", 1), ("b", "c", 1), ("c", "d", 1), ("d", "e", 1), ("e", "f", 1)],
[("a", "f", "b"), ("c", "a", "b"), ("f", "c", "e")],
[("a", {"b"}), ("f", {"e"}), ("c", {"b", "d"})],
),
]
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_next_hop(edges, expected_next_hops, expected_neighbors):
for node_id, remote, enh in expected_next_hops:
r = MeshRouter(node_id=node_id)
r.add_or_update_edges(edges)
assert r.next_hop(remote) == enh
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_neighbors(edges, expected_next_hops, expected_neighbors):
r = MeshRouter(node_id=edges[0][0])
r.add_or_update_edges(edges)
for node_id, neighbors in expected_neighbors:
assert r.get_neighbors(node_id) == neighbors
|
# pylint: disable=invalid-name
# Requires Python 3.6+
# Ref: https://www.sphinx-doc.org/en/master/usage/configuration.html
"""Configuration for the Sphinx documentation generator."""
import sys
from functools import partial
from pathlib import Path
from setuptools_scm import get_version
# -- Path setup --------------------------------------------------------------
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve() # pylint: disable=no-member
get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, str(PROJECT_ROOT_DIR))
# Make in-tree extension importable in non-tox setups/envs, like RTD.
# Refs:
# https://github.com/readthedocs/readthedocs.org/issues/6311
# https://github.com/readthedocs/readthedocs.org/issues/7182
sys.path.insert(0, str((Path(__file__).parent / '_ext').resolve()))
# -- Project information -----------------------------------------------------
github_url = 'https://github.com'
github_repo_org = 'abhinavsingh'
github_repo_name = 'proxy.py'
github_repo_slug = f'{github_repo_org}/{github_repo_name}'
github_repo_url = f'{github_url}/{github_repo_slug}'
github_sponsors_url = f'{github_url}/sponsors'
project = github_repo_name.title()
author = f'{project} project contributors'
copyright = author # pylint: disable=redefined-builtin
# The short X.Y version
version = '.'.join(
get_scm_version(
local_scheme='no-local-version',
).split('.')[:3],
)
# The full version, including alpha/beta/rc tags
release = get_scm_version()
rst_epilog = f"""
.. |project| replace:: {project}
"""
# -- General configuration ---------------------------------------------------
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# stdlib-party extensions:
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
# Third-party extensions:
'myst_parser', # extended markdown; https://pypi.org/project/myst-parser/
'sphinxcontrib.apidoc',
]
# Conditional third-party extensions:
try:
import sphinxcontrib.spelling as _sphinxcontrib_spelling
except ImportError:
extensions.append('spelling_stub_ext')
else:
del _sphinxcontrib_spelling
extensions.append('sphinxcontrib.spelling')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'changelog-fragments.d/**', # Towncrier-managed change notes
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'furo'
html_show_sphinx = True
html_theme_options = {
}
html_context = {
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = f'{project} Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = 'Documentation'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = f'https://{github_repo_name.replace('.', '')}.readthedocs.io/en/latest/'
# The master toctree document.
root_doc = master_doc = 'index' # Sphinx 4+ / 3- # noqa: WPS429
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
'myst': ('https://myst-parser.rtfd.io/en/latest', None),
'python': ('https://docs.python.org/3', None),
'python2': ('https://docs.python.org/2', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for sphinxcontrib.apidoc extension ------------------------------
apidoc_excluded_paths = [
'plugin/cache/*',
'testing/*.py',
]
apidoc_extra_args = [
'--implicit-namespaces',
'--private', # include “_private” modules
]
apidoc_module_dir = str(PROJECT_ROOT_DIR / 'proxy')
apidoc_module_first = False
apidoc_output_dir = 'pkg'
apidoc_separate_modules = True
apidoc_toc_file = None
# -- Options for sphinxcontrib.spelling extension ----------------------------
spelling_ignore_acronyms = True
spelling_ignore_importable_modules = True
spelling_ignore_pypi_package_names = True
spelling_ignore_python_builtins = True
spelling_ignore_wiki_words = True
spelling_show_suggestions = True
spelling_word_list_filename = [
'spelling_wordlist.txt',
]
# -- Options for extlinks extension ------------------------------------------
extlinks = {
'issue': (f'{github_repo_url}/issues/%s', '#'), # noqa: WPS323
'pr': (f'{github_repo_url}/pull/%s', 'PR #'), # noqa: WPS323
'commit': (f'{github_repo_url}/commit/%s', ''), # noqa: WPS323
'gh': (f'{github_url}/%s', 'GitHub: '), # noqa: WPS323
'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
}
# -- Options for linkcheck builder -------------------------------------------
linkcheck_ignore = [
r'http://localhost:\d+/', # local URLs
]
linkcheck_workers = 25
# -- Options for myst_parser extension ------------------------------------------
myst_enable_extensions = [
'colon_fence', # allow to optionally use ::: instead of ```
'deflist',
'html_admonition', # allow having HTML admonitions
'html_image', # allow HTML <img> in Markdown
# FIXME: `linkify` turns "Proxy.Py` into a link so it's disabled now
# Ref: https://github.com/executablebooks/MyST-Parser/issues/428#issuecomment-970277208
# "linkify", # auto-detect URLs @ plain text, needs myst-parser[linkify]
'replacements', # allows Jinja2-style replacements
'smartquotes', # use "cursive" quotes
'substitution', # replace common ASCII shortcuts into their symbols
]
myst_substitutions = {
'project': project,
}
# -- Strict mode -------------------------------------------------------------
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
nitpicky = True
_any_role = 'any'
_py_obj_role = 'py:obj'
_py_class_role = 'py:class'
nitpick_ignore = [
(_any_role, '<proxy.HttpProxyBasePlugin>'),
(_any_role, '__init__'),
(_any_role, 'Client'),
(_any_role, 'event_queue'),
(_any_role, 'fd_queue'),
(_any_role, 'flag.flags'),
(_any_role, 'flags.work_klass'),
(_any_role, 'flush'),
(_any_role, 'httpx'),
(_any_role, 'HttpParser.state'),
(_any_role, 'HttpProtocolHandler'),
(_any_role, 'multiprocessing.Manager'),
(_any_role, 'proxy.core.base.tcp_upstream.TcpUpstreamConnectionHandler'),
(_any_role, 'work_klass'),
(_py_class_role, '_asyncio.Task'),
(_py_class_role, 'asyncio.events.AbstractEventLoop'),
(_py_class_role, 'CacheStore'),
(_py_class_role, 'HttpParser'),
(_py_class_role, 'HttpProtocolHandlerPlugin'),
(_py_class_role, 'HttpProxyBasePlugin'),
(_py_class_role, 'HttpWebServerBasePlugin'),
(_py_class_role, 'multiprocessing.context.Process'),
(_py_class_role, 'multiprocessing.synchronize.Lock'),
(_py_class_role, 'NonBlockingQueue'),
(_py_class_role, 'paramiko.channel.Channel'),
(_py_class_role, 'proxy.http.parser.parser.T'),
(_py_class_role, 'proxy.plugin.cache.store.base.CacheStore'),
(_py_class_role, 'proxy.core.pool.AcceptorPool'),
(_py_class_role, 'proxy.core.executors.ThreadlessPool'),
(_py_class_role, 'proxy.core.acceptor.threadless.T'),
(_py_class_role, 'queue.Queue[Any]'),
(_py_class_role, 'TcpClientConnection'),
(_py_class_role, 'TcpServerConnection'),
(_py_class_role, 'unittest.case.TestCase'),
(_py_class_role, 'unittest.result.TestResult'),
(_py_class_role, 'UUID'),
(_py_class_role, 'Url'),
(_py_class_role, 'WebsocketFrame'),
(_py_class_role, 'Work'),
(_py_obj_role, 'proxy.core.acceptor.threadless.T'),
]
| # pylint: disable=invalid-name
# Requires Python 3.6+
# Ref: https://www.sphinx-doc.org/en/master/usage/configuration.html
"""Configuration for the Sphinx documentation generator."""
import sys
from functools import partial
from pathlib import Path
from setuptools_scm import get_version
# -- Path setup --------------------------------------------------------------
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve() # pylint: disable=no-member
get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, str(PROJECT_ROOT_DIR))
# Make in-tree extension importable in non-tox setups/envs, like RTD.
# Refs:
# https://github.com/readthedocs/readthedocs.org/issues/6311
# https://github.com/readthedocs/readthedocs.org/issues/7182
sys.path.insert(0, str((Path(__file__).parent / '_ext').resolve()))
# -- Project information -----------------------------------------------------
github_url = 'https://github.com'
github_repo_org = 'abhinavsingh'
github_repo_name = 'proxy.py'
github_repo_slug = f'{github_repo_org}/{github_repo_name}'
github_repo_url = f'{github_url}/{github_repo_slug}'
github_sponsors_url = f'{github_url}/sponsors'
project = github_repo_name.title()
author = f'{project} project contributors'
copyright = author # pylint: disable=redefined-builtin
# The short X.Y version
version = '.'.join(
get_scm_version(
local_scheme='no-local-version',
).split('.')[:3],
)
# The full version, including alpha/beta/rc tags
release = get_scm_version()
rst_epilog = f"""
.. |project| replace:: {project}
"""
# -- General configuration ---------------------------------------------------
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# stdlib-party extensions:
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
# Third-party extensions:
'myst_parser', # extended markdown; https://pypi.org/project/myst-parser/
'sphinxcontrib.apidoc',
]
# Conditional third-party extensions:
try:
import sphinxcontrib.spelling as _sphinxcontrib_spelling
except ImportError:
extensions.append('spelling_stub_ext')
else:
del _sphinxcontrib_spelling
extensions.append('sphinxcontrib.spelling')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'changelog-fragments.d/**', # Towncrier-managed change notes
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'furo'
html_show_sphinx = True
html_theme_options = {
}
html_context = {
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = f'{project} Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = 'Documentation'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = f'https://{github_repo_name.replace(".", "")}.readthedocs.io/en/latest/'
# The master toctree document.
root_doc = master_doc = 'index' # Sphinx 4+ / 3- # noqa: WPS429
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
'myst': ('https://myst-parser.rtfd.io/en/latest', None),
'python': ('https://docs.python.org/3', None),
'python2': ('https://docs.python.org/2', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for sphinxcontrib.apidoc extension ------------------------------
apidoc_excluded_paths = [
'plugin/cache/*',
'testing/*.py',
]
apidoc_extra_args = [
'--implicit-namespaces',
'--private', # include “_private” modules
]
apidoc_module_dir = str(PROJECT_ROOT_DIR / 'proxy')
apidoc_module_first = False
apidoc_output_dir = 'pkg'
apidoc_separate_modules = True
apidoc_toc_file = None
# -- Options for sphinxcontrib.spelling extension ----------------------------
spelling_ignore_acronyms = True
spelling_ignore_importable_modules = True
spelling_ignore_pypi_package_names = True
spelling_ignore_python_builtins = True
spelling_ignore_wiki_words = True
spelling_show_suggestions = True
spelling_word_list_filename = [
'spelling_wordlist.txt',
]
# -- Options for extlinks extension ------------------------------------------
extlinks = {
'issue': (f'{github_repo_url}/issues/%s', '#'), # noqa: WPS323
'pr': (f'{github_repo_url}/pull/%s', 'PR #'), # noqa: WPS323
'commit': (f'{github_repo_url}/commit/%s', ''), # noqa: WPS323
'gh': (f'{github_url}/%s', 'GitHub: '), # noqa: WPS323
'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
}
# -- Options for linkcheck builder -------------------------------------------
linkcheck_ignore = [
r'http://localhost:\d+/', # local URLs
]
linkcheck_workers = 25
# -- Options for myst_parser extension ------------------------------------------
myst_enable_extensions = [
'colon_fence', # allow to optionally use ::: instead of ```
'deflist',
'html_admonition', # allow having HTML admonitions
'html_image', # allow HTML <img> in Markdown
# FIXME: `linkify` turns "Proxy.Py` into a link so it's disabled now
# Ref: https://github.com/executablebooks/MyST-Parser/issues/428#issuecomment-970277208
# "linkify", # auto-detect URLs @ plain text, needs myst-parser[linkify]
'replacements', # allows Jinja2-style replacements
'smartquotes', # use "cursive" quotes
'substitution', # replace common ASCII shortcuts into their symbols
]
myst_substitutions = {
'project': project,
}
# -- Strict mode -------------------------------------------------------------
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
nitpicky = True
_any_role = 'any'
_py_obj_role = 'py:obj'
_py_class_role = 'py:class'
nitpick_ignore = [
(_any_role, '<proxy.HttpProxyBasePlugin>'),
(_any_role, '__init__'),
(_any_role, 'Client'),
(_any_role, 'event_queue'),
(_any_role, 'fd_queue'),
(_any_role, 'flag.flags'),
(_any_role, 'flags.work_klass'),
(_any_role, 'flush'),
(_any_role, 'httpx'),
(_any_role, 'HttpParser.state'),
(_any_role, 'HttpProtocolHandler'),
(_any_role, 'multiprocessing.Manager'),
(_any_role, 'proxy.core.base.tcp_upstream.TcpUpstreamConnectionHandler'),
(_any_role, 'work_klass'),
(_py_class_role, '_asyncio.Task'),
(_py_class_role, 'asyncio.events.AbstractEventLoop'),
(_py_class_role, 'CacheStore'),
(_py_class_role, 'HttpParser'),
(_py_class_role, 'HttpProtocolHandlerPlugin'),
(_py_class_role, 'HttpProxyBasePlugin'),
(_py_class_role, 'HttpWebServerBasePlugin'),
(_py_class_role, 'multiprocessing.context.Process'),
(_py_class_role, 'multiprocessing.synchronize.Lock'),
(_py_class_role, 'NonBlockingQueue'),
(_py_class_role, 'paramiko.channel.Channel'),
(_py_class_role, 'proxy.http.parser.parser.T'),
(_py_class_role, 'proxy.plugin.cache.store.base.CacheStore'),
(_py_class_role, 'proxy.core.pool.AcceptorPool'),
(_py_class_role, 'proxy.core.executors.ThreadlessPool'),
(_py_class_role, 'proxy.core.acceptor.threadless.T'),
(_py_class_role, 'queue.Queue[Any]'),
(_py_class_role, 'TcpClientConnection'),
(_py_class_role, 'TcpServerConnection'),
(_py_class_role, 'unittest.case.TestCase'),
(_py_class_role, 'unittest.result.TestResult'),
(_py_class_role, 'UUID'),
(_py_class_role, 'Url'),
(_py_class_role, 'WebsocketFrame'),
(_py_class_role, 'Work'),
(_py_obj_role, 'proxy.core.acceptor.threadless.T'),
]
|
from typing import Dict
from handler import Context, Arguments, CommandResult
from rpg.items import Item
from utils.formatting import codeblock
from utils.command_helpers import get_author_player
async def run(ctx: Context, args: Arguments) -> CommandResult:
player = await get_author_player(ctx)
if player.inventory.size:
counts: Dict[Item, int] = {}
for item in player.inventory:
counts[item] = counts.get(item, 0) + 1
inventory = "\n".join(
f"{item}{" x " + str(count) if count > 1 else ""}"
for item, count in counts.items()
)
else:
inventory = "Ваш инвентарь пуст"
equipment_item_map = [
(slot, getattr(player.equipment, slot)) for slot in player.equipment._slots
]
equipment = "\n".join(f"{slot:>10}: {item}" for (slot, item) in equipment_item_map)
return codeblock(f"Экипировка:\n\n{equipment}\n\nИнвентарь:\n\n{inventory}")
| from typing import Dict
from handler import Context, Arguments, CommandResult
from rpg.items import Item
from utils.formatting import codeblock
from utils.command_helpers import get_author_player
async def run(ctx: Context, args: Arguments) -> CommandResult:
player = await get_author_player(ctx)
if player.inventory.size:
counts: Dict[Item, int] = {}
for item in player.inventory:
counts[item] = counts.get(item, 0) + 1
inventory = "\n".join(
f"{item}{' x ' + str(count) if count > 1 else ''}"
for item, count in counts.items()
)
else:
inventory = "Ваш инвентарь пуст"
equipment_item_map = [
(slot, getattr(player.equipment, slot)) for slot in player.equipment._slots
]
equipment = "\n".join(f"{slot:>10}: {item}" for (slot, item) in equipment_item_map)
return codeblock(f"Экипировка:\n\n{equipment}\n\nИнвентарь:\n\n{inventory}")
|
from src.utils.config import config
import json
# import uuid
import requests
_NAMESPACE = "WS"
_VER_NAMESPACE = "WSVER"
_SAMPLE_NAMESPACE = "SMP"
# versioned and non-versioned index have same version
_SAMPLE_SET_INDEX_VERSION = 1
_SAMPLE_SET_INDEX_NAME = 'sample_set_' + str(_SAMPLE_SET_INDEX_VERSION)
_VER_SAMPLE_SET_INDEX_NAME = 'sample_set_version_' + str(_SAMPLE_SET_INDEX_VERSION)
# versioned and non-versioned index have same version
_SAMPLE_INDEX_VERSION = 1
_SAMPLE_INDEX_NAME = 'sample_' + str(_SAMPLE_INDEX_VERSION)
# _VER_SAMPLE_INDEX_NAME = 'sample_version_' + str(_SAMPLE_INDEX_VERSION)
def _get_sample(sample_info):
""" Get sample from SampleService
sample_info - dict containing 'id' and 'version' of a sample
"""
headers = {"Authorization": config()['ws_token']}
params = {
"id": sample_info['id']
}
if sample_info.get('version'):
params['version'] = sample_info['version']
payload = {
"method": "SampleService.get_sample",
"id": "", # str(uuid.uuid4()),
"params": [params],
"version": "1.1"
}
resp = requests.post(url=config()['sample_service_url'], headers=headers, data=json.dumps(payload))
if not resp.ok:
raise RuntimeError(f"Returned from sample service with status {resp.status_code} - {resp.text}")
resp_json = resp.json()
if resp_json.get('error'):
raise RuntimeError(f"Error from SampleService - {resp_json["error"]}")
sample = resp_json['result'][0]
return sample
def _flatten_meta(meta, prefix=None):
""" Flattens metadata fields in a Sample object. Fields are concatenated into a
single string field to save into an Elasticsearch index
meta - Sample Metadata to be flattened
prefix - (optional) prefix for the metadata values. default=None
"""
new_meta = {}
for key in meta:
if prefix:
val = prefix + ":"
else:
val = ""
if "value" in meta[key]:
val += str(meta[key]['value'])
if "units" in meta[key]:
val += ";" + str(meta[key]['units'])
new_meta[key] = val
return new_meta
def _combine_meta(meta, flattened_meta, idx):
""" Combine newly flattened metadata with existing metadata. This Function is designed to keep the indexing
of the different metadata fields consistent for each node within the sample node tree s.t. all the
fields in index (idx) 0 will be from item 0 in the node tree. Empty string ("") entries are Empty and
added simply so that the indexing of all fields line up.
meta - existing metadata.
flattened_meta - newly flattened metadata.
idx - current index of ndoe_tree.
"""
for key in flattened_meta:
if key in meta:
meta[key] += ["" for _ in range(idx - len(meta[key]))] + [flattened_meta[key]]
else:
meta[key] = ["" for _ in range(idx)] + [flattened_meta[key]]
return meta
def index_sample_set(obj_data, ws_info, obj_data_v1):
"""Indexer for KBaseSets.SampleSet object type"""
info = obj_data['info']
if not obj_data.get('data'):
raise Exception("no data in object")
data = obj_data['data']
workspace_id = info[6]
object_id = info[0]
version = info[4]
sample_set_id = f"{_NAMESPACE}::{workspace_id}:{object_id}"
ver_sample_set_id = f"{_VER_NAMESPACE}::{workspace_id}:{object_id}:{version}"
sample_set_index = {
"_action": "index",
"doc": {
"description": data["description"],
"sample_ids": [s['id'] for s in data['samples']],
"sample_names": [s['name'] for s in data['samples']],
"sample_versions": [s['version'] for s in data['samples']]
},
"index": _SAMPLE_SET_INDEX_NAME,
"id": sample_set_id
}
yield sample_set_index
ver_sample_set_index = dict(sample_set_index)
ver_sample_set_index['index'] = _VER_SAMPLE_SET_INDEX_NAME
ver_sample_set_index['id'] = ver_sample_set_id
yield ver_sample_set_index
for samp in data["samples"]:
# query the sample service for sample
sample = _get_sample(samp)
sample_id = f"{_SAMPLE_NAMESPACE}::{sample["id"]}:{sample["version"]}"
# not sure on how we need to handle more than 1 node.
if len(sample['node_tree']) == 1:
meta_controlled = _flatten_meta(
sample['node_tree'][0]['meta_controlled']
)
meta_user = _flatten_meta(
sample['node_tree'][0]['meta_user']
)
meta_controlled['node_id'] = sample['node_tree'][0]['id']
else:
meta_controlled, meta_user = {}, {}
for idx, node in enumerate(sample['node_tree']):
meta_controlled = _combine_meta(
meta_controlled,
_flatten_meta(
node['meta_controlled']
),
idx
)
meta_user = _combine_meta(
meta_user,
_flatten_meta(
node['meta_user']
),
idx
)
meta_controlled['node_id'] = node['id']
sample_index = {
"_action": "index",
"doc": {
"save_date": sample['save_date'],
"sample_version": sample['version'],
"name": sample['name'],
"parent_id": sample_set_id,
**meta_user,
**meta_controlled
},
"index": _SAMPLE_INDEX_NAME,
"id": sample_id
}
yield sample_index
| from src.utils.config import config
import json
# import uuid
import requests
_NAMESPACE = "WS"
_VER_NAMESPACE = "WSVER"
_SAMPLE_NAMESPACE = "SMP"
# versioned and non-versioned index have same version
_SAMPLE_SET_INDEX_VERSION = 1
_SAMPLE_SET_INDEX_NAME = 'sample_set_' + str(_SAMPLE_SET_INDEX_VERSION)
_VER_SAMPLE_SET_INDEX_NAME = 'sample_set_version_' + str(_SAMPLE_SET_INDEX_VERSION)
# versioned and non-versioned index have same version
_SAMPLE_INDEX_VERSION = 1
_SAMPLE_INDEX_NAME = 'sample_' + str(_SAMPLE_INDEX_VERSION)
# _VER_SAMPLE_INDEX_NAME = 'sample_version_' + str(_SAMPLE_INDEX_VERSION)
def _get_sample(sample_info):
""" Get sample from SampleService
sample_info - dict containing 'id' and 'version' of a sample
"""
headers = {"Authorization": config()['ws_token']}
params = {
"id": sample_info['id']
}
if sample_info.get('version'):
params['version'] = sample_info['version']
payload = {
"method": "SampleService.get_sample",
"id": "", # str(uuid.uuid4()),
"params": [params],
"version": "1.1"
}
resp = requests.post(url=config()['sample_service_url'], headers=headers, data=json.dumps(payload))
if not resp.ok:
raise RuntimeError(f"Returned from sample service with status {resp.status_code} - {resp.text}")
resp_json = resp.json()
if resp_json.get('error'):
raise RuntimeError(f"Error from SampleService - {resp_json['error']}")
sample = resp_json['result'][0]
return sample
def _flatten_meta(meta, prefix=None):
""" Flattens metadata fields in a Sample object. Fields are concatenated into a
single string field to save into an Elasticsearch index
meta - Sample Metadata to be flattened
prefix - (optional) prefix for the metadata values. default=None
"""
new_meta = {}
for key in meta:
if prefix:
val = prefix + ":"
else:
val = ""
if "value" in meta[key]:
val += str(meta[key]['value'])
if "units" in meta[key]:
val += ";" + str(meta[key]['units'])
new_meta[key] = val
return new_meta
def _combine_meta(meta, flattened_meta, idx):
""" Combine newly flattened metadata with existing metadata. This Function is designed to keep the indexing
of the different metadata fields consistent for each node within the sample node tree s.t. all the
fields in index (idx) 0 will be from item 0 in the node tree. Empty string ("") entries are Empty and
added simply so that the indexing of all fields line up.
meta - existing metadata.
flattened_meta - newly flattened metadata.
idx - current index of ndoe_tree.
"""
for key in flattened_meta:
if key in meta:
meta[key] += ["" for _ in range(idx - len(meta[key]))] + [flattened_meta[key]]
else:
meta[key] = ["" for _ in range(idx)] + [flattened_meta[key]]
return meta
def index_sample_set(obj_data, ws_info, obj_data_v1):
"""Indexer for KBaseSets.SampleSet object type"""
info = obj_data['info']
if not obj_data.get('data'):
raise Exception("no data in object")
data = obj_data['data']
workspace_id = info[6]
object_id = info[0]
version = info[4]
sample_set_id = f"{_NAMESPACE}::{workspace_id}:{object_id}"
ver_sample_set_id = f"{_VER_NAMESPACE}::{workspace_id}:{object_id}:{version}"
sample_set_index = {
"_action": "index",
"doc": {
"description": data["description"],
"sample_ids": [s['id'] for s in data['samples']],
"sample_names": [s['name'] for s in data['samples']],
"sample_versions": [s['version'] for s in data['samples']]
},
"index": _SAMPLE_SET_INDEX_NAME,
"id": sample_set_id
}
yield sample_set_index
ver_sample_set_index = dict(sample_set_index)
ver_sample_set_index['index'] = _VER_SAMPLE_SET_INDEX_NAME
ver_sample_set_index['id'] = ver_sample_set_id
yield ver_sample_set_index
for samp in data["samples"]:
# query the sample service for sample
sample = _get_sample(samp)
sample_id = f"{_SAMPLE_NAMESPACE}::{sample['id']}:{sample['version']}"
# not sure on how we need to handle more than 1 node.
if len(sample['node_tree']) == 1:
meta_controlled = _flatten_meta(
sample['node_tree'][0]['meta_controlled']
)
meta_user = _flatten_meta(
sample['node_tree'][0]['meta_user']
)
meta_controlled['node_id'] = sample['node_tree'][0]['id']
else:
meta_controlled, meta_user = {}, {}
for idx, node in enumerate(sample['node_tree']):
meta_controlled = _combine_meta(
meta_controlled,
_flatten_meta(
node['meta_controlled']
),
idx
)
meta_user = _combine_meta(
meta_user,
_flatten_meta(
node['meta_user']
),
idx
)
meta_controlled['node_id'] = node['id']
sample_index = {
"_action": "index",
"doc": {
"save_date": sample['save_date'],
"sample_version": sample['version'],
"name": sample['name'],
"parent_id": sample_set_id,
**meta_user,
**meta_controlled
},
"index": _SAMPLE_INDEX_NAME,
"id": sample_id
}
yield sample_index
|
import discord
import io
import aiohttp
from aiohttp import request, ClientSession
from src.embeds.image_embed import ImageEmbed
async def request_canvas_image(ctx, url, member: discord.Member = None, params={}, is_gif=False):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with ClientSession() as wastedSession:
async with wastedSession.get(f'{url}?avatar={member.avatar_url_as(format='png', size=1024)}{params_url}') as wastedImage:
imageData = io.BytesIO(await wastedImage.read())
await wastedSession.close()
await ctx.send(file=discord.File(imageData, 'image.gif' if is_gif else 'image.png'))
async def request_image(ctx, url, params={}, key="link", title="Requested image", description="", footer=""):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
if "caption" in data:
title = data["caption"]
embed = ImageEmbed(
ctx,
title,
description,
footer,
data[key]
)
await embed.send()
else:
await ctx.send(f"API returned a {response.status} status :((")
async def request_text(ctx, url, key, params={}, text_format="{}"):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
print(params_url)
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
print(data)
await ctx.send(text_format.format(data[key]))
else:
await ctx.send(f"API returned a {response.status} status :((")
| import discord
import io
import aiohttp
from aiohttp import request, ClientSession
from src.embeds.image_embed import ImageEmbed
async def request_canvas_image(ctx, url, member: discord.Member = None, params={}, is_gif=False):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with ClientSession() as wastedSession:
async with wastedSession.get(f'{url}?avatar={member.avatar_url_as(format="png", size=1024)}{params_url}') as wastedImage:
imageData = io.BytesIO(await wastedImage.read())
await wastedSession.close()
await ctx.send(file=discord.File(imageData, 'image.gif' if is_gif else 'image.png'))
async def request_image(ctx, url, params={}, key="link", title="Requested image", description="", footer=""):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
if "caption" in data:
title = data["caption"]
embed = ImageEmbed(
ctx,
title,
description,
footer,
data[key]
)
await embed.send()
else:
await ctx.send(f"API returned a {response.status} status :((")
async def request_text(ctx, url, key, params={}, text_format="{}"):
params_url = "&" + "&".join(["{}={}".format(k, v)
for k, v in params.items()]) if params != {} else ""
print(params_url)
async with request("GET", url + params_url) as response:
if response.status == 200:
data = await response.json()
print(data)
await ctx.send(text_format.format(data[key]))
else:
await ctx.send(f"API returned a {response.status} status :((")
|
"""Dataset, producer, and config metadata."""
import logging
import warnings
import sqlalchemy as sa
from .._globals import REGISTRY as registry
from .. import _tools
from .. import backend as _backend
__all__ = ['Dataset', 'Producer', 'Config']
log = logging.getLogger(__name__)
@registry.mapped
class Dataset:
"""Git commit loaded into the database."""
__tablename__ = '__dataset__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
title = sa.Column(sa.Text, sa.CheckConstraint("title != ''"), nullable=False)
git_commit = sa.Column(sa.String(40), sa.CheckConstraint('length(git_commit) = 40'),
nullable=False, unique=True)
git_describe = sa.Column(sa.Text, sa.CheckConstraint("git_describe != ''"),
nullable=False, unique=True)
clean = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"))
exclude_raw = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
@classmethod
def get_dataset(cls, *, bind, strict, fallback=None):
table = cls.__tablename__
log.debug('read %r from %r', table, bind)
try:
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
except sa.exc.OperationalError as e:
if 'no such table' in e.orig.args[0]:
pass
else:
log.exception('error selecting %r', table)
if strict: # pragma: no cover
raise RuntimeError('failed to select %r from %r', table, bind) from e
return fallback
except ValueError as e:
log.exception('error selecting %r', table)
if 'not enough values to unpack' in e.args[0] and not strict:
return fallback
else: # pragma: no cover
raise RuntimeError('failed to select %r from %r', table, bind) from e
except Exception as e: # pragma: no cover
log.exception('error selecting %r', table)
raise RuntimeError('failed to select %r from %r', table, bind) from e
else:
return result
@classmethod
def log_dataset(cls, params, *,
ignore_dirty: bool = False,
also_print: bool = False, print_file=None):
name = cls.__tablename__
log.info('git describe %(git_describe)r clean: %(clean)r', params)
log.debug('%s.title: %r', name, params['title'])
log.info('%s.git_commit: %r', name, params['git_commit'])
if 'version' in params:
log.info('%s.version: %r', name, params['version'])
log.debug('%s.exclude_raw: %r', name, params['exclude_raw'])
if also_print or print_file is not None:
print('git describe {git_describe!r}'
' clean: {clean!r}'.format_map(params),
file=print_file)
print(f"{name}.title: {params["title"]!r}'",
file=print_file)
print(f"{name}.git_commit: {params["git_commit"]!r}",
file=print_file)
if 'version' in params:
print(f"{name}.version: {params["version"]!r}",
file=print_file)
print(f"{name}.exclude_raw: {params["exclude_raw"]!r}",
file=print_file)
if not params['clean'] and not ignore_dirty:
warnings.warn(f'{name} not clean,'
' pass ignore_dirty=True to disable') # pragma: no cover
@registry.mapped
class Producer:
"""Name and version of the package that created a __dataset__."""
__tablename__ = '__producer__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
name = sa.Column(sa.Text, sa.CheckConstraint("name != ''"),
unique=True, nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"),
nullable=False)
@classmethod
def get_producer(cls, *, bind):
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
return result
@classmethod
def log_producer(cls, params, *, also_print=False, print_file=None):
name = cls.__tablename__
log.info('%s.name: %s', name, params['name'])
log.info('%s.version: %s', name, params['version'])
if also_print or print_file is not None:
print(f"{name}.name: {params["name"]}", file=print_file)
print(f"{name}.version: {params["version"]}", file=print_file)
@registry.mapped
class Config:
"""Configuration setting from ``glottolog/config/*.ini``."""
__tablename__ = '_config'
filename = sa.Column(sa.String, sa.CheckConstraint("filename != ''"),
primary_key=True)
section = sa.Column(sa.String, sa.CheckConstraint("section != ''"),
primary_key=True)
option = sa.Column(sa.String, sa.CheckConstraint("option != ''"),
primary_key=True)
value = sa.Column(sa.Text, sa.CheckConstraint("value != ''"),
nullable=False)
line = sa.Column(sa.Integer, sa.CheckConstraint('line > 0'),
nullable=False)
__table_args__ = (sa.UniqueConstraint(filename, line),
{'info': {'without_rowid': True}})
@classmethod
def load(cls, filename: str, *, bind,
_groupby_section=_tools.groupby_itemgetter(0)):
select_values = (sa.select(Config.section, Config.option, Config.value)
.filter_by(filename=filename)
.order_by('section', 'option'))
result = _backend.iterrows(select_values, bind=bind)
return {section: {option: value for _, option, value in grp}
for section, grp in _groupby_section(result)}
| """Dataset, producer, and config metadata."""
import logging
import warnings
import sqlalchemy as sa
from .._globals import REGISTRY as registry
from .. import _tools
from .. import backend as _backend
__all__ = ['Dataset', 'Producer', 'Config']
log = logging.getLogger(__name__)
@registry.mapped
class Dataset:
"""Git commit loaded into the database."""
__tablename__ = '__dataset__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
title = sa.Column(sa.Text, sa.CheckConstraint("title != ''"), nullable=False)
git_commit = sa.Column(sa.String(40), sa.CheckConstraint('length(git_commit) = 40'),
nullable=False, unique=True)
git_describe = sa.Column(sa.Text, sa.CheckConstraint("git_describe != ''"),
nullable=False, unique=True)
clean = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"))
exclude_raw = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
@classmethod
def get_dataset(cls, *, bind, strict, fallback=None):
table = cls.__tablename__
log.debug('read %r from %r', table, bind)
try:
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
except sa.exc.OperationalError as e:
if 'no such table' in e.orig.args[0]:
pass
else:
log.exception('error selecting %r', table)
if strict: # pragma: no cover
raise RuntimeError('failed to select %r from %r', table, bind) from e
return fallback
except ValueError as e:
log.exception('error selecting %r', table)
if 'not enough values to unpack' in e.args[0] and not strict:
return fallback
else: # pragma: no cover
raise RuntimeError('failed to select %r from %r', table, bind) from e
except Exception as e: # pragma: no cover
log.exception('error selecting %r', table)
raise RuntimeError('failed to select %r from %r', table, bind) from e
else:
return result
@classmethod
def log_dataset(cls, params, *,
ignore_dirty: bool = False,
also_print: bool = False, print_file=None):
name = cls.__tablename__
log.info('git describe %(git_describe)r clean: %(clean)r', params)
log.debug('%s.title: %r', name, params['title'])
log.info('%s.git_commit: %r', name, params['git_commit'])
if 'version' in params:
log.info('%s.version: %r', name, params['version'])
log.debug('%s.exclude_raw: %r', name, params['exclude_raw'])
if also_print or print_file is not None:
print('git describe {git_describe!r}'
' clean: {clean!r}'.format_map(params),
file=print_file)
print(f"{name}.title: {params['title']!r}'",
file=print_file)
print(f"{name}.git_commit: {params['git_commit']!r}",
file=print_file)
if 'version' in params:
print(f"{name}.version: {params['version']!r}",
file=print_file)
print(f"{name}.exclude_raw: {params['exclude_raw']!r}",
file=print_file)
if not params['clean'] and not ignore_dirty:
warnings.warn(f'{name} not clean,'
' pass ignore_dirty=True to disable') # pragma: no cover
@registry.mapped
class Producer:
"""Name and version of the package that created a __dataset__."""
__tablename__ = '__producer__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
name = sa.Column(sa.Text, sa.CheckConstraint("name != ''"),
unique=True, nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"),
nullable=False)
@classmethod
def get_producer(cls, *, bind):
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
return result
@classmethod
def log_producer(cls, params, *, also_print=False, print_file=None):
name = cls.__tablename__
log.info('%s.name: %s', name, params['name'])
log.info('%s.version: %s', name, params['version'])
if also_print or print_file is not None:
print(f"{name}.name: {params['name']}", file=print_file)
print(f"{name}.version: {params['version']}", file=print_file)
@registry.mapped
class Config:
"""Configuration setting from ``glottolog/config/*.ini``."""
__tablename__ = '_config'
filename = sa.Column(sa.String, sa.CheckConstraint("filename != ''"),
primary_key=True)
section = sa.Column(sa.String, sa.CheckConstraint("section != ''"),
primary_key=True)
option = sa.Column(sa.String, sa.CheckConstraint("option != ''"),
primary_key=True)
value = sa.Column(sa.Text, sa.CheckConstraint("value != ''"),
nullable=False)
line = sa.Column(sa.Integer, sa.CheckConstraint('line > 0'),
nullable=False)
__table_args__ = (sa.UniqueConstraint(filename, line),
{'info': {'without_rowid': True}})
@classmethod
def load(cls, filename: str, *, bind,
_groupby_section=_tools.groupby_itemgetter(0)):
select_values = (sa.select(Config.section, Config.option, Config.value)
.filter_by(filename=filename)
.order_by('section', 'option'))
result = _backend.iterrows(select_values, bind=bind)
return {section: {option: value for _, option, value in grp}
for section, grp in _groupby_section(result)}
|
###
# (C) Copyright [2019-2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from simplivity.ovc_client import OVC
from simplivity.exceptions import HPESimpliVityException
import pprint
pp = pprint.PrettyPrinter(indent=4)
config = {
"ip": "<ovc_ip>",
"credentials": {
"username": "<username>",
"password": "<password>"
}
}
ovc = OVC(config)
policies = ovc.policies
hosts = ovc.hosts
clusters = ovc.omnistack_clusters
cluster_groups = ovc.cluster_groups
print("\n\nget_all with default params")
all_policies = policies.get_all()
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
policy_object = all_policies[0]
print("\n\nget_all with filters")
all_policies = policies.get_all(filters={'name': policy_object.data["name"]})
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
print("\n\nget_all with pagination")
pagination = policies.get_all(limit=105, pagination=True, page_size=50)
end = False
while not end:
data = pagination.data
print("Page size:", len(data["resources"]))
print(f"{pp.pformat(data)}")
try:
pagination.next_page()
except HPESimpliVityException:
end = True
print("\n\nget_by_id")
policy = policies.get_by_id(policy_object.data["id"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_by_name")
policy = policies.get_by_name(policy_object.data["name"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_all VMs using this policy")
vms = policy.get_vms()
print(policy.data)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"{pp.pformat(vms)} \n")
print("\n\ncreate policy")
policy_name = "fixed_frequency_retention_policy"
policy = policies.create(policy_name)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
multiple_rules = [
{
"start_time": "14:30",
"end_time": "15:30",
"application_consistent": False,
"frequency": 3,
"retention": 5
},
{
"frequency": 5,
"retention": 6
}
]
print("\n\nadd rules to policy")
policy.create_rules(multiple_rules)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
single_rule = {
"frequency": 10,
"retention": 12
}
policy.create_rules(single_rule)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget rule")
all_rules = policy.data["rules"]
for rule in all_rules:
rule_obj = policy.get_rule(rule.get('id'))
print(f"{pp.pformat(rule_obj)} \n")
print("\n\ndelete rule")
rule_id = policy.data["rules"][0]['id']
policy.delete_rule(rule_id)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nsuspend policy on host")
host = hosts.get_all()[0]
policies.suspend(host)
print("\n\nsuspend policy on omnistack_cluster")
cluster = clusters.get_all()[0]
policies.suspend(cluster)
""" cluster_group options works only with setup having MVA, please use below code for setup with MVA
cluster_group = cluster_groups.get_all()[0]
print(f"{cluster_group}")
print(f"{pp.pformat(cluster_group.data)} \n")
policies.suspend(cluster_group)
"""
""" federation options works only with setup NOT having MVA, please use below code for setup without MVA
print("\n\nsuspend policy on federation")
policies.suspend()
"""
print("\n\nrename policy")
policy.rename(f"renamed_{policy.data["name"]}")
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\ndelete policy")
policy.delete()
| ###
# (C) Copyright [2019-2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from simplivity.ovc_client import OVC
from simplivity.exceptions import HPESimpliVityException
import pprint
pp = pprint.PrettyPrinter(indent=4)
config = {
"ip": "<ovc_ip>",
"credentials": {
"username": "<username>",
"password": "<password>"
}
}
ovc = OVC(config)
policies = ovc.policies
hosts = ovc.hosts
clusters = ovc.omnistack_clusters
cluster_groups = ovc.cluster_groups
print("\n\nget_all with default params")
all_policies = policies.get_all()
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
policy_object = all_policies[0]
print("\n\nget_all with filters")
all_policies = policies.get_all(filters={'name': policy_object.data["name"]})
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
print("\n\nget_all with pagination")
pagination = policies.get_all(limit=105, pagination=True, page_size=50)
end = False
while not end:
data = pagination.data
print("Page size:", len(data["resources"]))
print(f"{pp.pformat(data)}")
try:
pagination.next_page()
except HPESimpliVityException:
end = True
print("\n\nget_by_id")
policy = policies.get_by_id(policy_object.data["id"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_by_name")
policy = policies.get_by_name(policy_object.data["name"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_all VMs using this policy")
vms = policy.get_vms()
print(policy.data)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"{pp.pformat(vms)} \n")
print("\n\ncreate policy")
policy_name = "fixed_frequency_retention_policy"
policy = policies.create(policy_name)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
multiple_rules = [
{
"start_time": "14:30",
"end_time": "15:30",
"application_consistent": False,
"frequency": 3,
"retention": 5
},
{
"frequency": 5,
"retention": 6
}
]
print("\n\nadd rules to policy")
policy.create_rules(multiple_rules)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
single_rule = {
"frequency": 10,
"retention": 12
}
policy.create_rules(single_rule)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget rule")
all_rules = policy.data["rules"]
for rule in all_rules:
rule_obj = policy.get_rule(rule.get('id'))
print(f"{pp.pformat(rule_obj)} \n")
print("\n\ndelete rule")
rule_id = policy.data["rules"][0]['id']
policy.delete_rule(rule_id)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nsuspend policy on host")
host = hosts.get_all()[0]
policies.suspend(host)
print("\n\nsuspend policy on omnistack_cluster")
cluster = clusters.get_all()[0]
policies.suspend(cluster)
""" cluster_group options works only with setup having MVA, please use below code for setup with MVA
cluster_group = cluster_groups.get_all()[0]
print(f"{cluster_group}")
print(f"{pp.pformat(cluster_group.data)} \n")
policies.suspend(cluster_group)
"""
""" federation options works only with setup NOT having MVA, please use below code for setup without MVA
print("\n\nsuspend policy on federation")
policies.suspend()
"""
print("\n\nrename policy")
policy.rename(f"renamed_{policy.data['name']}")
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\ndelete policy")
policy.delete()
|
#!/usr/bin/env python3
import sys
import os
import re
import argparse
import requests
from bs4 import BeautifulSoup as bs
version=1.1
print("""\033[1;36m
╦ ╦╔═╗╔╗ ╦═╗╔═╗╔═╗╔╦╗╔═╗╦═╗
║║║║╣ ╠╩╗ ╠╦╝║╣ ╠═╣║║║║╣ ╠╦╝
╚╩╝╚═╝╚═╝────╩╚═╚═╝╩ ╩╩ ╩╚═╝╩╚═
🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥
--> Coded by FEBIN 🛡️🌐
\033[1;39m""")
def febrev_fuzz(url):
import requests
os.system("clear")
feblist=open("admin-panel.txt","r+")
text=str(feblist.read())
adminpages=list(text.split())
feblist.close()
print(f"""
[\033[1;37m+\033[1;39m] STARTED CRAWLING TO FIND ADMIN PANEL OF URL : \033[1;34m{url}
""")
try:
if url.startswith("https://") or url.startswith("http://"):
url=url
else:
print("Error : INVALID URL ! URL must start with 'http://' or 'https://'")
exit()
if url.endswith("/"):
url=url
server=requests.get(url).headers.get('Server')
print(f"\033[1;37m SERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
else:
url=f"{url}/"
server=requests.get(url).headers.get('Server')
print(f"\033[1;37mSERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
for i in range(len(adminpages)):
reqresp=requests.get(f"{url}/{adminpages[i]}",timeout=10)
if reqresp.status_code == 200:
print(f"\033[1;39m FOUND ==> {url}{adminpages[i]} \033[1;34m")
elif reqresp.status_code == 302:
print("\033[1;39m FOUND 302 ==> {url}{adminpages[i]} \033[1;34m")
else:
pass
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Connection to the Server Failed, May be invalid URL or bad Internet connection. Check Your Internet connection,URL and try again\n ")
except requests.exceptions.ReadTimeout:
print("\033[1;31m [\033[1;31m-\033[1;39m] Error : EXECUTION STOPPED DUE TO !TIMED OUT! ERROR, YOUR INTERNET MAY BE DISCONNECTED!!!....EXITTED")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def sub_brute(domain,sublist):
if os.path.isfile(sublist):
print(f"[\033[1;37m+\033[1;39m] Subdomain wordlist {sublist} loaded -> OK")
print("")
pass
else:
print(f"[\033[1;31m-\033[1;39m] Wordlist {sublist} not found!!")
exit()
sub=open(sublist,"r+")
subs=sub.read().split("\n")
sub.close()
for host in subs:
try:
req=requests.get(f"http://{host}.{domain}")
print(f"\033[1;39m{host}.{domain} --> \033[1;37m{req.status_code}")
except requests.exceptions.ConnectionError:
pass
except UnicodeError:
pass
print("")
print("[\033[1;37m+\033[1;39m] Finshed!")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def wordlistgen(url,filepath):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[-] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
for string in feb:
count=feb.count(string)
strinit=0
while strinit < count:
feb.remove(string)
strinit=strinit+1
feb.sort()
for i in range(len(feb)):
try:
file=open(filepath,"a+")
file.write("\n"+feb[i])
file.close()
except FileNotFoundError:
homedir=os.environ.get('HOME')
file=open(f"{homedir}/fr-wordlist.txt","a+")
file.write("\n"+feb[i])
file.close()
if os.path.isfile(filepath):
print("")
print(f"\033[1;39m[\033[1;37m+\033[1;39m]Wordlist {filepath} successfully witten")
else:
print("\033[1;31m[-]Sorry:Path not Found!! The Path You Specified Doesn't Exist")
print("So Saved the wordlist as fr-wordlist.txt in the HOME Directory of the current User.....")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def word_analyze(url):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
print("\033[1;32m-"*74)
print("\033[1;32m| Words | count/frequency | Graph | ")
print("\033[1;32m-"*74)
for string in feb:
count=feb.count(string)
for i in range(count):
feb.remove(string)
print(f"\033[1;34m| {string + " " * (22 - len(string)) + "| "}{str(count) +" " * (22 - len(str(count)))}| \033[1;32m{"█" * count} " )
print("\033[1;33m-"*74)
def endpoint_harvest(url):
print(f"[\033[1;37m+\033[1;39m] Collecting Endpoints / Links from the webpage {url}")
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
endpoint_pattern1=re.compile('(?:href=")(.*?)"')
endpoint_pattern2=re.compile('(?:src=")(.*?)"')
endpoint1=endpoint_pattern1.findall(pagedata)
endpoint2=endpoint_pattern2.findall(pagedata)
for link in endpoint1:
print(link.replace("href=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
for src in endpoint2:
print(src.replace("src=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
print("")
print("[\033[1;37m+\033[1;39m] Finished!")
def param(url):
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
params=soup.find_all("input")
print("[\033[1;37m+\033[1;39m] Extracting Parameters from the WebPage!\n")
for param in params:
print(param.get("name"))
print("[\033[1;37m+\033[1;39m] Finished!")
parser = argparse.ArgumentParser(description='Parse the domain, wordlist etc..')
parser.add_argument('-link',dest='link', action='store_true',help='Extract Endpoints from url!')
parser.add_argument('-admin',dest='admin', action='store_true',help='Find Admin Panel of the given URL !')
parser.add_argument('-sub',dest='sub', action='store_true',help='Subdomain brute force of the given domain !')
parser.add_argument('-param',dest='param', action='store_true',help='Find hidden parameters from the given URL !')
parser.add_argument('-wordlist',dest='wordlist', action='store_true',help='Create targeted wordlist from the given URL !')
parser.add_argument('-analyze',dest='analyze', action='store_true',help='Analyze words and their frequencies from the given URL !')
parser.add_argument('-u',"--url",dest='url', action='store',help='The URL of the webpage!')
parser.add_argument('-d',"--domain",dest='domain', action='store',help='The domain name for sub domain brute-force!')
parser.add_argument('-w',"--wordlist",dest='list', action='store',help='Extract Endpoints from url!')
parser.add_argument('-o',"--outfile",dest='outfile', action='store',help='Output file to save the generated wordlist!!')
parser.add_argument('-v',"--version",dest='version', action='store_true',help='Version / Update Check !')
args=parser.parse_args()
try:
if args.link and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
endpoint_harvest(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.admin and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
febrev_fuzz(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.sub and args.domain and args.list:
if args.domain.startswith("http://") or args.domain.startswith("https://"):
print("[\033[1;31m-\033[1;39m] Expected Domain name not URL!")
exit()
else:
sub_brute(args.domain,args.list)
elif args.wordlist and args.url and args.outfile:
if args.url.startswith("http://") or args.url.startswith("https://"):
wordlistgen(args.url,args.outfile)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.analyze and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
word_analyze(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.param and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
param(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.version:
print(f"CURRENT VERSION : {version}")
try:
verq=requests.get("http://raw.githubusercontent.com/febinrev/web_reamer/master/version")
ver=float(verq.text.split()[0])
if ver > version:
print(f"[\033[1;37m+\033[1;39m] New Version {ver} of WEB_REAMER is available : https://github.com/febinrev/web_reamer.git")
else:
print("[\033[1;37m+\033[1;39m] WEB_REAMER is up-to-date!")
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Error Connecting github !")
else:
print("""\033[1;33m
Usage:
\033[1;32m1. Endpoint / Link Extraction:
\033[1;39m ./web_reamer.py -link -u http://sample.com/ \033[1;32m
2. Admin Panel fuzzing:
\033[1;39m ./web_reamer.py -admin -u http://sample.com/ \033[1;32m
3. Subdomain Brute Force:
\033[1;39m ./web_reamer.py -sub -d sample.com -w subdomains.txt \033[1;32m
4. Find hidden parameters from webpage:
\033[1;39m ./web_reamer.py -param -u http://sample.com/ \033[1;32m
5. Create Targetted Wordlist from webpage:
\033[1;39m ./web_reamer.py -wordlist -u http://sample.com/ -o outfile_wordlist.txt \033[1;32m
6. Analyze Word frequencies from the WebPage :
\033[1;39m ./web_reamer.py -analyze -u http://sample.com/ \033[1;32m
7. Help :
\033[1;39m ./web_reamer.py -h \033[1;32m
\033[1;39m ./web_reamer.py --help \033[1;32m
8. Version / Update Check :
\033[1;39m ./web_reamer.py -v \033[1;32m
\033[1;39m ./web_reamer.py --version \033[1;32m
""")
except KeyboardInterrupt:
print("\n\033[1;39m[\033[1;31m-\033[1;39m] User Interruption! Exit!")
exit()
| #!/usr/bin/env python3
import sys
import os
import re
import argparse
import requests
from bs4 import BeautifulSoup as bs
version=1.1
print("""\033[1;36m
╦ ╦╔═╗╔╗ ╦═╗╔═╗╔═╗╔╦╗╔═╗╦═╗
║║║║╣ ╠╩╗ ╠╦╝║╣ ╠═╣║║║║╣ ╠╦╝
╚╩╝╚═╝╚═╝────╩╚═╚═╝╩ ╩╩ ╩╚═╝╩╚═
🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥
--> Coded by FEBIN 🛡️🌐
\033[1;39m""")
def febrev_fuzz(url):
import requests
os.system("clear")
feblist=open("admin-panel.txt","r+")
text=str(feblist.read())
adminpages=list(text.split())
feblist.close()
print(f"""
[\033[1;37m+\033[1;39m] STARTED CRAWLING TO FIND ADMIN PANEL OF URL : \033[1;34m{url}
""")
try:
if url.startswith("https://") or url.startswith("http://"):
url=url
else:
print("Error : INVALID URL ! URL must start with 'http://' or 'https://'")
exit()
if url.endswith("/"):
url=url
server=requests.get(url).headers.get('Server')
print(f"\033[1;37m SERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
else:
url=f"{url}/"
server=requests.get(url).headers.get('Server')
print(f"\033[1;37mSERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
for i in range(len(adminpages)):
reqresp=requests.get(f"{url}/{adminpages[i]}",timeout=10)
if reqresp.status_code == 200:
print(f"\033[1;39m FOUND ==> {url}{adminpages[i]} \033[1;34m")
elif reqresp.status_code == 302:
print("\033[1;39m FOUND 302 ==> {url}{adminpages[i]} \033[1;34m")
else:
pass
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Connection to the Server Failed, May be invalid URL or bad Internet connection. Check Your Internet connection,URL and try again\n ")
except requests.exceptions.ReadTimeout:
print("\033[1;31m [\033[1;31m-\033[1;39m] Error : EXECUTION STOPPED DUE TO !TIMED OUT! ERROR, YOUR INTERNET MAY BE DISCONNECTED!!!....EXITTED")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def sub_brute(domain,sublist):
if os.path.isfile(sublist):
print(f"[\033[1;37m+\033[1;39m] Subdomain wordlist {sublist} loaded -> OK")
print("")
pass
else:
print(f"[\033[1;31m-\033[1;39m] Wordlist {sublist} not found!!")
exit()
sub=open(sublist,"r+")
subs=sub.read().split("\n")
sub.close()
for host in subs:
try:
req=requests.get(f"http://{host}.{domain}")
print(f"\033[1;39m{host}.{domain} --> \033[1;37m{req.status_code}")
except requests.exceptions.ConnectionError:
pass
except UnicodeError:
pass
print("")
print("[\033[1;37m+\033[1;39m] Finshed!")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def wordlistgen(url,filepath):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[-] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
for string in feb:
count=feb.count(string)
strinit=0
while strinit < count:
feb.remove(string)
strinit=strinit+1
feb.sort()
for i in range(len(feb)):
try:
file=open(filepath,"a+")
file.write("\n"+feb[i])
file.close()
except FileNotFoundError:
homedir=os.environ.get('HOME')
file=open(f"{homedir}/fr-wordlist.txt","a+")
file.write("\n"+feb[i])
file.close()
if os.path.isfile(filepath):
print("")
print(f"\033[1;39m[\033[1;37m+\033[1;39m]Wordlist {filepath} successfully witten")
else:
print("\033[1;31m[-]Sorry:Path not Found!! The Path You Specified Doesn't Exist")
print("So Saved the wordlist as fr-wordlist.txt in the HOME Directory of the current User.....")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def word_analyze(url):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
print("\033[1;32m-"*74)
print("\033[1;32m| Words | count/frequency | Graph | ")
print("\033[1;32m-"*74)
for string in feb:
count=feb.count(string)
for i in range(count):
feb.remove(string)
print(f"\033[1;34m| {string + ' ' * (22 - len(string)) + '| '}{str(count) +' ' * (22 - len(str(count)))}| \033[1;32m{'█' * count} " )
print("\033[1;33m-"*74)
def endpoint_harvest(url):
print(f"[\033[1;37m+\033[1;39m] Collecting Endpoints / Links from the webpage {url}")
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
endpoint_pattern1=re.compile('(?:href=")(.*?)"')
endpoint_pattern2=re.compile('(?:src=")(.*?)"')
endpoint1=endpoint_pattern1.findall(pagedata)
endpoint2=endpoint_pattern2.findall(pagedata)
for link in endpoint1:
print(link.replace("href=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
for src in endpoint2:
print(src.replace("src=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
print("")
print("[\033[1;37m+\033[1;39m] Finished!")
def param(url):
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
params=soup.find_all("input")
print("[\033[1;37m+\033[1;39m] Extracting Parameters from the WebPage!\n")
for param in params:
print(param.get("name"))
print("[\033[1;37m+\033[1;39m] Finished!")
parser = argparse.ArgumentParser(description='Parse the domain, wordlist etc..')
parser.add_argument('-link',dest='link', action='store_true',help='Extract Endpoints from url!')
parser.add_argument('-admin',dest='admin', action='store_true',help='Find Admin Panel of the given URL !')
parser.add_argument('-sub',dest='sub', action='store_true',help='Subdomain brute force of the given domain !')
parser.add_argument('-param',dest='param', action='store_true',help='Find hidden parameters from the given URL !')
parser.add_argument('-wordlist',dest='wordlist', action='store_true',help='Create targeted wordlist from the given URL !')
parser.add_argument('-analyze',dest='analyze', action='store_true',help='Analyze words and their frequencies from the given URL !')
parser.add_argument('-u',"--url",dest='url', action='store',help='The URL of the webpage!')
parser.add_argument('-d',"--domain",dest='domain', action='store',help='The domain name for sub domain brute-force!')
parser.add_argument('-w',"--wordlist",dest='list', action='store',help='Extract Endpoints from url!')
parser.add_argument('-o',"--outfile",dest='outfile', action='store',help='Output file to save the generated wordlist!!')
parser.add_argument('-v',"--version",dest='version', action='store_true',help='Version / Update Check !')
args=parser.parse_args()
try:
if args.link and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
endpoint_harvest(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.admin and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
febrev_fuzz(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.sub and args.domain and args.list:
if args.domain.startswith("http://") or args.domain.startswith("https://"):
print("[\033[1;31m-\033[1;39m] Expected Domain name not URL!")
exit()
else:
sub_brute(args.domain,args.list)
elif args.wordlist and args.url and args.outfile:
if args.url.startswith("http://") or args.url.startswith("https://"):
wordlistgen(args.url,args.outfile)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.analyze and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
word_analyze(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.param and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
param(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.version:
print(f"CURRENT VERSION : {version}")
try:
verq=requests.get("http://raw.githubusercontent.com/febinrev/web_reamer/master/version")
ver=float(verq.text.split()[0])
if ver > version:
print(f"[\033[1;37m+\033[1;39m] New Version {ver} of WEB_REAMER is available : https://github.com/febinrev/web_reamer.git")
else:
print("[\033[1;37m+\033[1;39m] WEB_REAMER is up-to-date!")
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Error Connecting github !")
else:
print("""\033[1;33m
Usage:
\033[1;32m1. Endpoint / Link Extraction:
\033[1;39m ./web_reamer.py -link -u http://sample.com/ \033[1;32m
2. Admin Panel fuzzing:
\033[1;39m ./web_reamer.py -admin -u http://sample.com/ \033[1;32m
3. Subdomain Brute Force:
\033[1;39m ./web_reamer.py -sub -d sample.com -w subdomains.txt \033[1;32m
4. Find hidden parameters from webpage:
\033[1;39m ./web_reamer.py -param -u http://sample.com/ \033[1;32m
5. Create Targetted Wordlist from webpage:
\033[1;39m ./web_reamer.py -wordlist -u http://sample.com/ -o outfile_wordlist.txt \033[1;32m
6. Analyze Word frequencies from the WebPage :
\033[1;39m ./web_reamer.py -analyze -u http://sample.com/ \033[1;32m
7. Help :
\033[1;39m ./web_reamer.py -h \033[1;32m
\033[1;39m ./web_reamer.py --help \033[1;32m
8. Version / Update Check :
\033[1;39m ./web_reamer.py -v \033[1;32m
\033[1;39m ./web_reamer.py --version \033[1;32m
""")
except KeyboardInterrupt:
print("\n\033[1;39m[\033[1;31m-\033[1;39m] User Interruption! Exit!")
exit()
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
API_KEY = demisto.getParam('APIKey')
SERVER_URL = 'https://analyze.intezer.com/api'
API_VERSION = '/v2-0'
BASE_URL = SERVER_URL + API_VERSION
IS_AVAILABLE_URL = 'is-available'
ERROR_PREFIX = 'Error from Intezer:'
ACCEPTABLE_HTTP_CODES = {200, 201, 202}
USE_SSL = not demisto.params().get('insecure', False)
http_status_to_error_massage = {
400: '400 Bad Request - Wrong or invalid parameters',
401: '401 Unauthorized - Wrong or invalid api key',
403: '403 Forbidden - The account is not allowed to preform this task',
404: '404 Not Found - Analysis was not found',
410: '410 Gone - Analysis no longer exists in the service',
500: '500 Internal Server Error - Internal error',
503: '503 Service Unavailable'
}
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
''' HELPER FUNCTIONS '''
def handle_response(response, acceptable_http_status_codes):
if response.status_code not in acceptable_http_status_codes:
error_msg = http_status_to_error_massage.get(response.status_code, "Failed to perform request")
return_error(f'{ERROR_PREFIX} {error_msg}')
try:
return response.json()
except json.decoder.JSONDecodeError:
# This error is unlikely to happen, as the return code should indicate of error beforehand
return_error(f'Response returned with no data. This might be an issue with Intezer.\nPlease try again later\n'
f'Response content:\n{response.content}')
def get_session():
response = requests.post(BASE_URL + '/get-access-token', json={'api_key': API_KEY}, verify=USE_SSL)
response = handle_response(response, {200})
session = requests.session()
session.headers['Authorization'] = f'Bearer {response['result']}'
return session
''' COMMANDS '''
def check_is_available():
url = f'{SERVER_URL}/{IS_AVAILABLE_URL}'
result = SESSION.get(url, verify=USE_SSL)
return 'ok' if result.json()['is_available'] else None
def analyze_by_hash_command():
file_hash = demisto.getArg('file_hash')
response = make_analyze_by_hash_request(file_hash)
handle_analyze_by_hash_response(response, file_hash)
def get_latest_result_command():
file_hash = demisto.getArg('file_hash')
response = make_get_latest_report_request(file_hash)
handle_get_latest_result_response(response, file_hash)
def make_analyze_by_hash_request(file_hash):
data = {'hash': file_hash}
return SESSION.post(BASE_URL + '/analyze-by-hash', json=data, verify=USE_SSL)
def make_get_latest_report_request(file_hash):
return SESSION.get(f'{BASE_URL}/files/{file_hash}', verify=USE_SSL)
def handle_analyze_by_hash_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
handle_analyze_response(response)
def handle_get_latest_result_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
analysis_result = response.json()
enrich_dbot_and_display_file_analysis_results(analysis_result['result'])
def analyze_by_uploaded_file_command():
response = make_analyze_by_file_request(demisto.getArg('file_entry_id'))
handle_analyze_response(response)
def make_analyze_by_file_request(file_id):
file_data = demisto.getFilePath(file_id)
with open(file_data['path'], 'rb') as file_to_upload:
files = {'file': (file_data['name'], file_to_upload)}
return SESSION.post(BASE_URL + '/analyze', files=files, verify=USE_SSL)
def handle_analyze_response(response):
response = handle_response(response, ACCEPTABLE_HTTP_CODES)
result_url = response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(obj.ID === val.ID)': {'ID': analysis_id, 'Status': 'Created', 'type': 'File'}}
return_outputs('Analysis created successfully: {}'.format(analysis_id), context_json, response)
def check_analysis_status_and_get_results_command():
analysis_type = demisto.args().get('analysis_type', 'File')
analysis_ids = argToList(demisto.args().get('analysis_id'))
indicator_name = demisto.args().get('indicator_name')
for analysis_id in analysis_ids:
response = make_analysis_status_request(analysis_id, analysis_type)
analysis_result = handle_analysis_result(response)
if analysis_result and analysis_type == 'Endpoint':
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name)
elif analysis_result and analysis_type == 'File':
enrich_dbot_and_display_file_analysis_results(analysis_result)
def make_analysis_status_request(analysis_id, analysis_type):
analysis_endpoint = 'endpoint-analyses/' if analysis_type == 'Endpoint' else 'analyses/'
result_url = f'{BASE_URL}/{analysis_endpoint}{analysis_id}'
return SESSION.get(result_url, verify=USE_SSL)
def handle_analysis_result(response):
json_response = handle_response(response, ACCEPTABLE_HTTP_CODES)
if response.status_code != 200:
result_url = json_response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id,
'Status': 'InProgress'}}
return_outputs('Analysis is still in progress', context_json)
return
return json_response['result']
def enrich_dbot_and_display_file_analysis_results(result):
verdict = result.get('verdict')
sha256 = result.get('sha256')
analysis_id = result.get('analysis_id')
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': sha256,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
file = {'SHA256': sha256, 'Metadata': result, 'ExistsInIntezer': True}
if verdict == 'malicious':
file['Malicious'] = {'Vendor': 'Intezer'}
presentable_result = '## Intezer File analysis result\n'
presentable_result += f' SHA256: {sha256}\n'
presentable_result += f' Verdict: **{verdict}** ({result['sub_verdict']})\n'
if 'family_name' in result:
presentable_result += f'Family: **{result['family_name']}**\n'
presentable_result += f'[Analysis Link]({result['analysis_url']})\n'
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {
outputPaths['dbotscore']: dbot,
outputPaths['file']: file,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}},
'HumanReadable': presentable_result,
'ContentsFormat': formats['json'],
'Contents': result
})
def enrich_dbot_and_display_endpoint_analysis_results(result, indicator_name=None):
verdict = result['verdict']
computer_name = result['computer_name']
analysis_id = result['analysis_id']
dbot = {
'Vendor': 'Intezer',
'Type': 'hostname',
'Indicator': indicator_name if indicator_name else computer_name,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
endpoint = {'Metadata': result}
presentable_result = '## Intezer Endpoint analysis result\n'
presentable_result += f'Host Name: {computer_name}\n'
presentable_result += f' Verdict: **{verdict}**\n'
if result.get('families') is not None:
presentable_result += f'Families: **{result['families']}**\n'
presentable_result += f' Scan Time: {result['scan_start_time']}\n'
presentable_result += f'[Analysis Link]({result['analysis_url']})\n'
ec = {
'DBotScore': dbot,
'Endpoint': endpoint,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
return_outputs(presentable_result, ec, result)
''' EXECUTION CODE '''
try:
SESSION = get_session()
except Exception as e:
return_error(str(e))
def main():
try:
handle_proxy()
if demisto.command() == 'test-module':
demisto.results(check_is_available())
elif demisto.command() == 'intezer-analyze-by-hash':
analyze_by_hash_command()
elif demisto.command() == 'intezer-analyze-by-file':
analyze_by_uploaded_file_command()
elif demisto.command() == 'intezer-get-latest-report':
get_latest_result_command()
elif demisto.command() == 'intezer-get-analysis-result':
check_analysis_status_and_get_results_command()
except Exception as e:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
API_KEY = demisto.getParam('APIKey')
SERVER_URL = 'https://analyze.intezer.com/api'
API_VERSION = '/v2-0'
BASE_URL = SERVER_URL + API_VERSION
IS_AVAILABLE_URL = 'is-available'
ERROR_PREFIX = 'Error from Intezer:'
ACCEPTABLE_HTTP_CODES = {200, 201, 202}
USE_SSL = not demisto.params().get('insecure', False)
http_status_to_error_massage = {
400: '400 Bad Request - Wrong or invalid parameters',
401: '401 Unauthorized - Wrong or invalid api key',
403: '403 Forbidden - The account is not allowed to preform this task',
404: '404 Not Found - Analysis was not found',
410: '410 Gone - Analysis no longer exists in the service',
500: '500 Internal Server Error - Internal error',
503: '503 Service Unavailable'
}
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
''' HELPER FUNCTIONS '''
def handle_response(response, acceptable_http_status_codes):
if response.status_code not in acceptable_http_status_codes:
error_msg = http_status_to_error_massage.get(response.status_code, "Failed to perform request")
return_error(f'{ERROR_PREFIX} {error_msg}')
try:
return response.json()
except json.decoder.JSONDecodeError:
# This error is unlikely to happen, as the return code should indicate of error beforehand
return_error(f'Response returned with no data. This might be an issue with Intezer.\nPlease try again later\n'
f'Response content:\n{response.content}')
def get_session():
response = requests.post(BASE_URL + '/get-access-token', json={'api_key': API_KEY}, verify=USE_SSL)
response = handle_response(response, {200})
session = requests.session()
session.headers['Authorization'] = f'Bearer {response["result"]}'
return session
''' COMMANDS '''
def check_is_available():
url = f'{SERVER_URL}/{IS_AVAILABLE_URL}'
result = SESSION.get(url, verify=USE_SSL)
return 'ok' if result.json()['is_available'] else None
def analyze_by_hash_command():
file_hash = demisto.getArg('file_hash')
response = make_analyze_by_hash_request(file_hash)
handle_analyze_by_hash_response(response, file_hash)
def get_latest_result_command():
file_hash = demisto.getArg('file_hash')
response = make_get_latest_report_request(file_hash)
handle_get_latest_result_response(response, file_hash)
def make_analyze_by_hash_request(file_hash):
data = {'hash': file_hash}
return SESSION.post(BASE_URL + '/analyze-by-hash', json=data, verify=USE_SSL)
def make_get_latest_report_request(file_hash):
return SESSION.get(f'{BASE_URL}/files/{file_hash}', verify=USE_SSL)
def handle_analyze_by_hash_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
handle_analyze_response(response)
def handle_get_latest_result_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
analysis_result = response.json()
enrich_dbot_and_display_file_analysis_results(analysis_result['result'])
def analyze_by_uploaded_file_command():
response = make_analyze_by_file_request(demisto.getArg('file_entry_id'))
handle_analyze_response(response)
def make_analyze_by_file_request(file_id):
file_data = demisto.getFilePath(file_id)
with open(file_data['path'], 'rb') as file_to_upload:
files = {'file': (file_data['name'], file_to_upload)}
return SESSION.post(BASE_URL + '/analyze', files=files, verify=USE_SSL)
def handle_analyze_response(response):
response = handle_response(response, ACCEPTABLE_HTTP_CODES)
result_url = response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(obj.ID === val.ID)': {'ID': analysis_id, 'Status': 'Created', 'type': 'File'}}
return_outputs('Analysis created successfully: {}'.format(analysis_id), context_json, response)
def check_analysis_status_and_get_results_command():
analysis_type = demisto.args().get('analysis_type', 'File')
analysis_ids = argToList(demisto.args().get('analysis_id'))
indicator_name = demisto.args().get('indicator_name')
for analysis_id in analysis_ids:
response = make_analysis_status_request(analysis_id, analysis_type)
analysis_result = handle_analysis_result(response)
if analysis_result and analysis_type == 'Endpoint':
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name)
elif analysis_result and analysis_type == 'File':
enrich_dbot_and_display_file_analysis_results(analysis_result)
def make_analysis_status_request(analysis_id, analysis_type):
analysis_endpoint = 'endpoint-analyses/' if analysis_type == 'Endpoint' else 'analyses/'
result_url = f'{BASE_URL}/{analysis_endpoint}{analysis_id}'
return SESSION.get(result_url, verify=USE_SSL)
def handle_analysis_result(response):
json_response = handle_response(response, ACCEPTABLE_HTTP_CODES)
if response.status_code != 200:
result_url = json_response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id,
'Status': 'InProgress'}}
return_outputs('Analysis is still in progress', context_json)
return
return json_response['result']
def enrich_dbot_and_display_file_analysis_results(result):
verdict = result.get('verdict')
sha256 = result.get('sha256')
analysis_id = result.get('analysis_id')
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': sha256,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
file = {'SHA256': sha256, 'Metadata': result, 'ExistsInIntezer': True}
if verdict == 'malicious':
file['Malicious'] = {'Vendor': 'Intezer'}
presentable_result = '## Intezer File analysis result\n'
presentable_result += f' SHA256: {sha256}\n'
presentable_result += f' Verdict: **{verdict}** ({result["sub_verdict"]})\n'
if 'family_name' in result:
presentable_result += f'Family: **{result["family_name"]}**\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {
outputPaths['dbotscore']: dbot,
outputPaths['file']: file,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}},
'HumanReadable': presentable_result,
'ContentsFormat': formats['json'],
'Contents': result
})
def enrich_dbot_and_display_endpoint_analysis_results(result, indicator_name=None):
verdict = result['verdict']
computer_name = result['computer_name']
analysis_id = result['analysis_id']
dbot = {
'Vendor': 'Intezer',
'Type': 'hostname',
'Indicator': indicator_name if indicator_name else computer_name,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
endpoint = {'Metadata': result}
presentable_result = '## Intezer Endpoint analysis result\n'
presentable_result += f'Host Name: {computer_name}\n'
presentable_result += f' Verdict: **{verdict}**\n'
if result.get('families') is not None:
presentable_result += f'Families: **{result["families"]}**\n'
presentable_result += f' Scan Time: {result["scan_start_time"]}\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
ec = {
'DBotScore': dbot,
'Endpoint': endpoint,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
return_outputs(presentable_result, ec, result)
''' EXECUTION CODE '''
try:
SESSION = get_session()
except Exception as e:
return_error(str(e))
def main():
try:
handle_proxy()
if demisto.command() == 'test-module':
demisto.results(check_is_available())
elif demisto.command() == 'intezer-analyze-by-hash':
analyze_by_hash_command()
elif demisto.command() == 'intezer-analyze-by-file':
analyze_by_uploaded_file_command()
elif demisto.command() == 'intezer-get-latest-report':
get_latest_result_command()
elif demisto.command() == 'intezer-get-analysis-result':
check_analysis_status_and_get_results_command()
except Exception as e:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
|
"""api_gw_test"""
# Remove warnings when using pytest fixtures
# pylint: disable=redefined-outer-name
import json
from test.conftest import ENDPOINT_URL
# warning disabled, this is used as a pylint fixture
from test.elasticsearch_test import ( # pylint: disable=unused-import
es_client,
populate_es_test_case_1,
)
from urllib.parse import urlencode
import boto3
import pytest
import requests
def to_localstack_url(api_id: str, url: str):
"""
Converts a API GW url to localstack
"""
return url.replace("4566", f"4566/restapis/{api_id}").replace(
"dev", "dev/_user_request_"
)
def api_gw_lambda_integrate_deploy(
api_client,
api: dict,
api_resource: dict,
lambda_func: dict,
http_method: str = "GET",
) -> str:
"""
Integrate lambda with api gw method and deploy api.
Return the invokation URL
"""
lambda_integration_arn = (
"arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
f"{lambda_func["FunctionArn"]}/invocations"
)
api_client.put_integration(
restApiId=api["id"],
resourceId=api_resource["id"],
httpMethod=http_method,
type="AWS",
integrationHttpMethod="POST",
uri=lambda_integration_arn,
)
api_client.create_deployment(
restApiId=api["id"], stageName="dev",
)
return f"http://localhost:4566/restapis/{api["id"]}/dev/_user_request_{api_resource["path"]}"
@pytest.fixture
def api_gw_method(request):
"""api gw for testing"""
marker = request.node.get_closest_marker("api_gw_method_args")
put_method_args = marker.args[0]["put_method_args"]
put_method_response_args = marker.args[0]["put_method_response_args"]
api = None
def fin():
"""fixture finalizer"""
if api:
api_client.delete_rest_api(restApiId=api["id"])
# Hook teardown (finalizer) code
request.addfinalizer(fin)
api_client = boto3.client("apigateway", endpoint_url=ENDPOINT_URL)
api = api_client.create_rest_api(name="testapi")
root_resource_id = api_client.get_resources(restApiId=api["id"])["items"][0]["id"]
api_resource = api_client.create_resource(
restApiId=api["id"], parentId=root_resource_id, pathPart="test"
)
api_client.put_method(
restApiId=api["id"],
resourceId=api_resource["id"],
authorizationType="NONE",
**put_method_args,
)
api_client.put_method_response(
restApiId=api["id"],
resourceId=api_resource["id"],
statusCode="200",
**put_method_response_args,
)
return api_client, api, api_resource
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "stac_endpoint",
"handler": "code.handler",
"environment": {"CBERS_STAC_BUCKET": "bucket",},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_root(api_gw_method, lambda_function):
"""
test_root_endpoint
"""
# Based on
# https://stackoverflow.com/questions/58859917/creating-aws-lambda-integrated-api-gateway-resource-with-boto3
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
url = api_gw_lambda_integrate_deploy(api_client, api, api_resource, lambda_func)
req = requests.get(url)
assert req.status_code == 200
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_get(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals,too-many-statements
"""
test_item_search_get
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
# Empty GET, return all 2 items
original_url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func
)
req = requests.get(original_url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Single collection, return single item
url = f"{original_url}?collections=CBERS4-MUX"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["collection"] == "CBERS4-MUX"
# Two collections, return all items
url = f"{original_url}?collections=CBERS4-MUX,CBERS4-AWFI"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Paging, no next case
url = f"{original_url}"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
url = f"{original_url}?limit=1"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.get(next_href)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
url = f"{original_url}?ids=CBERS_4_MUX_20170528_090_084_L2"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# query extension
url = f"{original_url}?"
url += urlencode({"query": '{"cbers:data_type": {"eq":"L4"}}'})
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_AWFI_20170409_167_123_L4"
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "POST",},
"put_method_response_args": {"httpMethod": "POST",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_post(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals
"""
test_item_search_post
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func, http_method="POST"
)
# POST with invalid bbox order, check error status code and message
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [160.6, -55.95, -170, -25.89],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 400, req.text
assert "First lon corner is not western" in req.text
# Same as above with fixed bbox
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [-170, -25.89, 160.6, -55.95],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 200, req.text
# Paging, no next case
req = requests.post(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
body = {"limit": 1}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.post(
next_href, data=json.dumps({**body, **fcol["links"][0]["body"]})
)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
body = {"ids": ["CBERS_4_MUX_20170528_090_084_L2"]}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
| """api_gw_test"""
# Remove warnings when using pytest fixtures
# pylint: disable=redefined-outer-name
import json
from test.conftest import ENDPOINT_URL
# warning disabled, this is used as a pylint fixture
from test.elasticsearch_test import ( # pylint: disable=unused-import
es_client,
populate_es_test_case_1,
)
from urllib.parse import urlencode
import boto3
import pytest
import requests
def to_localstack_url(api_id: str, url: str):
"""
Converts a API GW url to localstack
"""
return url.replace("4566", f"4566/restapis/{api_id}").replace(
"dev", "dev/_user_request_"
)
def api_gw_lambda_integrate_deploy(
api_client,
api: dict,
api_resource: dict,
lambda_func: dict,
http_method: str = "GET",
) -> str:
"""
Integrate lambda with api gw method and deploy api.
Return the invokation URL
"""
lambda_integration_arn = (
"arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
f"{lambda_func['FunctionArn']}/invocations"
)
api_client.put_integration(
restApiId=api["id"],
resourceId=api_resource["id"],
httpMethod=http_method,
type="AWS",
integrationHttpMethod="POST",
uri=lambda_integration_arn,
)
api_client.create_deployment(
restApiId=api["id"], stageName="dev",
)
return f"http://localhost:4566/restapis/{api['id']}/dev/_user_request_{api_resource['path']}"
@pytest.fixture
def api_gw_method(request):
"""api gw for testing"""
marker = request.node.get_closest_marker("api_gw_method_args")
put_method_args = marker.args[0]["put_method_args"]
put_method_response_args = marker.args[0]["put_method_response_args"]
api = None
def fin():
"""fixture finalizer"""
if api:
api_client.delete_rest_api(restApiId=api["id"])
# Hook teardown (finalizer) code
request.addfinalizer(fin)
api_client = boto3.client("apigateway", endpoint_url=ENDPOINT_URL)
api = api_client.create_rest_api(name="testapi")
root_resource_id = api_client.get_resources(restApiId=api["id"])["items"][0]["id"]
api_resource = api_client.create_resource(
restApiId=api["id"], parentId=root_resource_id, pathPart="test"
)
api_client.put_method(
restApiId=api["id"],
resourceId=api_resource["id"],
authorizationType="NONE",
**put_method_args,
)
api_client.put_method_response(
restApiId=api["id"],
resourceId=api_resource["id"],
statusCode="200",
**put_method_response_args,
)
return api_client, api, api_resource
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "stac_endpoint",
"handler": "code.handler",
"environment": {"CBERS_STAC_BUCKET": "bucket",},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_root(api_gw_method, lambda_function):
"""
test_root_endpoint
"""
# Based on
# https://stackoverflow.com/questions/58859917/creating-aws-lambda-integrated-api-gateway-resource-with-boto3
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
url = api_gw_lambda_integrate_deploy(api_client, api, api_resource, lambda_func)
req = requests.get(url)
assert req.status_code == 200
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_get(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals,too-many-statements
"""
test_item_search_get
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
# Empty GET, return all 2 items
original_url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func
)
req = requests.get(original_url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Single collection, return single item
url = f"{original_url}?collections=CBERS4-MUX"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["collection"] == "CBERS4-MUX"
# Two collections, return all items
url = f"{original_url}?collections=CBERS4-MUX,CBERS4-AWFI"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Paging, no next case
url = f"{original_url}"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
url = f"{original_url}?limit=1"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.get(next_href)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
url = f"{original_url}?ids=CBERS_4_MUX_20170528_090_084_L2"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# query extension
url = f"{original_url}?"
url += urlencode({"query": '{"cbers:data_type": {"eq":"L4"}}'})
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_AWFI_20170409_167_123_L4"
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "POST",},
"put_method_response_args": {"httpMethod": "POST",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_post(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals
"""
test_item_search_post
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func, http_method="POST"
)
# POST with invalid bbox order, check error status code and message
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [160.6, -55.95, -170, -25.89],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 400, req.text
assert "First lon corner is not western" in req.text
# Same as above with fixed bbox
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [-170, -25.89, 160.6, -55.95],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 200, req.text
# Paging, no next case
req = requests.post(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
body = {"limit": 1}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.post(
next_href, data=json.dumps({**body, **fcol["links"][0]["body"]})
)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
body = {"ids": ["CBERS_4_MUX_20170528_090_084_L2"]}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
|
import copy
import sys
import pprint
import os, os.path as op
from datetime import date, datetime, timedelta
from collections import OrderedDict
from functools import partial
from urllib.parse import urlparse
import yaml
from natsort import natsorted, ns
from pykwalify.core import Core
def abort(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def validate(item, key):
for name in names(item):
if not (isinstance(name, str) or
(len(name) == 2 and
all(isinstance(x, str) for x in name))):
abort('Error: %r should be a string or a list of two strings' % name)
games = item[key]
if (not isinstance(games, list) or
not all(isinstance(x, dict) for x in games)):
print('Error: this should be a list of dicts:')
abort(pprint.pformat(games))
return names, games
def names(item):
return [item['name']] + item.get('names', [])
def game_name(game):
return game['name'][0] if isinstance(game['name'], list) else game['name']
def parse_tag(tag):
return tag.replace(' ', '-').lower()
def parse_unicode(text):
if isinstance(text, str):
return text
if isinstance(text, (list, tuple)):
result = []
for item in text:
result.append(parse_unicode(item))
return result
def parse_unicode_tag(tag):
return parse_tag(parse_unicode(tag))
def parse_tags(entry, keys):
tags = []
for key in keys:
if key in entry:
val = entry.get(key)
if isinstance(val, str):
tags.append(parse_tag(val))
tags.append(parse_unicode_tag(val))
elif isinstance(val, list):
tags += [parse_tag(v) for v in val]
tags += [parse_unicode_tag(v) for v in val]
else:
abort('Error: %s\'s key "%s" is not valid (%s)' %
(entry['name'], key, type(val).__name__))
result = []
for tag in tags:
if tag not in result:
result.append(tag)
return result
def parse_global_tags(site, item, tag, item_key: str):
if tag in item:
if not getattr(site, tag, False):
setattr(site, tag, {})
if isinstance(item[tag], str):
item[tag] = [item[tag]]
for t in item[tag]:
tagObj = getattr(site, tag, False)
if not tagObj.get(t, False):
tagObj[t] = {'tag_count': 0, 'keys': set()}
if item_key not in tagObj[t]['keys']:
tagObj[t]['tag_count'] += 1
tagObj[t]['keys'].add(item_key)
setattr(site, tag, OrderedDict(sorted(getattr(site, tag, {}).items())))
def parse_item(entry, entry_tags=[], meta={}, meta_tags=[]):
updated = entry.get('updated') or date(1970, 1, 1)
if isinstance(updated, str):
updated = datetime.strptime(updated, "%Y-%m-%d").date()
result = dict(entry,
new=(date.today() - updated) < timedelta(days=30),
tags=parse_tags(entry, entry_tags) + parse_tags(meta, meta_tags),
updated=updated)
if "repo" in result:
# Try to add extra repo information, like icons, badges
repo_parsed = urlparse(result["repo"])
domain = repo_parsed.netloc
ext = os.path.splitext(result["repo"])[1]
if "github.com" in domain:
try:
# https://github.com/<user>/<repo>
_, user, repo, *_ = repo_parsed.path.split("/")
except ValueError:
result["repoiconname"] = "github"
result["repoiconstyle"] = "fab"
result["repotitle"] = "GitHub"
else:
result["repobadge"] = f'<img class="badge lazyload" alt="GitHub stars" data-src="https://img.shields.io/github/stars/{user}/{repo}?style=flat-square&logo=github" src="https://img.shields.io/badge/stars-%3F-blue?style=flat-square&logo=github">'
elif (".google.com" in domain or
"googlecode.com" in domain):
result["repoiconname"] = "google"
result["repoiconstyle"] = "fab"
result["repotitle"] = "Google Code"
elif "bitbucket.org" in domain:
result["repoiconname"] = "bitbucket"
result["repoiconstyle"] = "fab"
result["repotitle"] = "Bitbucket"
elif "gitlab.com" in domain or domain.startswith("gitlab."):
result["repoiconname"] = "gitlab"
result["repoiconstyle"] = "fab"
result["repotitle"] = "GitLab"
elif "sourceforge.net" in domain:
try:
# https://sourceforge.net/projects/<repo>
_, _, repo, *_ = repo_parsed.path.split("/")
except ValueError:
pass
else:
result["repobadge"] = f'<img class="badge lazyload" alt="Sourceforge downloads" data-src="https://img.shields.io/sourceforge/dt/{repo}?style=flat-square" src="https://img.shields.io/badge/downloads-%3F-brightgreen?style=flat-square">'
elif ext in (".gz", ".zip", ".tar", ".tgz", ".tbz2", ".bz2", ".xz", ".rar"):
result["repoiconname"] = "box"
result["repoiconstyle"] = "fas"
result["repotitle"] = "Archive"
return result
def parse_items(site, item, key):
if not (item.get(key) and validate(item, key)):
return
if not getattr(site, key, False):
setattr(site, key, [])
meta_tags = ['genre', 'subgenre', 'theme']
game_tags = [
'status',
'development',
'lang',
'framework',
'content',
'license',
'multiplayer',
'type'
]
meta = item.get('meta', {})
meta["names_ascii"] = parse_unicode(names(item))
meta["external"] = item.get('external', {})
parse_global_tags(site, meta, 'genre', item['name'])
parse_global_tags(site, meta, 'subgenre', item['name'])
parse_global_tags(site, meta, 'theme', item['name'])
parse_fn = partial(parse_item, entry_tags=game_tags, meta=meta, meta_tags=meta_tags)
for game in item[key]:
parse_global_tags(site, game, 'lang', game['name'])
item = (names(item), meta, [parse_fn(i) for i in item[key]])
getattr(site, key).append(item)
def show_error(game_name, error_str):
print(f'\033[91m {game_name}\033[0m')
print(f' {error_str}')
def show_errors(errors):
print('\n')
for error in errors:
show_error(error["name"], error["error"])
print(f'\n {len(errors)} errors\n')
sys.exit(1)
def show_validation_errors(data, validation_errors):
errors = []
for error in validation_errors:
path = error.path.split('/')
game = data[int(path[1])]
name = game_name(game)
errors.append({"name": name, "error": error.__repr__()})
show_errors(errors)
def validate_with_schema(source_data, schema_file):
core = Core(source_data=source_data, schema_files=[schema_file])
try:
core.validate(raise_exception=True)
except Exception as error:
if len(core.errors) > 0:
show_validation_errors(source_data, core.errors)
else:
raise error
def parse_data(site):
base = op.dirname(__file__)
originals = []
for fn in os.listdir(op.join(base, 'originals')):
if fn.endswith('.yaml'):
originals.extend(yaml.safe_load(open(op.join(base, 'originals', fn), encoding="utf-8")))
def sort_key(game):
name = game_name(game)
# Always sort SCUMM first
if name == 'SCUMM':
return '0'
if name.startswith('The '):
return name[4:]
return name
originals = natsorted(originals, key=sort_key, alg=ns.IGNORECASE)
print(str(len(originals)) + ' games in total')
validate_with_schema(originals, 'schema/originals.yaml')
clones = []
for fn in sorted(os.listdir(op.join(base, 'games'))):
if fn.endswith('.yaml'):
clones.extend(yaml.safe_load(open(op.join(base, 'games', fn), encoding="utf-8")))
print(str(len(clones)) + ' clones in total')
validate_with_schema(clones, 'schema/games.yaml')
errors = []
originals_map = {}
for item in originals:
name = game_name(item)
if name in originals_map:
errors.append({
"name": name,
"error": "Duplicate original game '%s'" % name
})
originals_map[name] = item
if len(errors) > 0:
show_errors(errors)
for clone in clones:
if 'originals' not in clone:
show_errors([{
"name": clone["name"],
"error": "Unable to find 'remakes' or 'clones' in game"
}])
for original in clone['originals']:
if original not in originals_map:
errors.append({
"name": clone["name"],
"error": "Original game '%s' not found" % original
})
if "updated" not in clone:
print(f"{clone["name"]} has no updated field")
else:
if isinstance(clone['updated'], str):
clone['updated'] = datetime.strptime(clone['updated'], "%Y-%m-%d").date()
if "status" not in clone:
print(f"{clone["name"]} has no status field")
oldest_games = sorted([(clone['name'], clone['updated']) for clone in clones if 'updated' in clone], key=lambda x: x[1])[:5]
print(f"Oldest 5 games: {oldest_games}")
if len(errors) > 0:
show_errors(errors)
for item in originals:
# Recombine originals and clones
combined = copy.deepcopy(item)
name = game_name(combined)
combined['games'] = [
clone for clone in clones
if name in clone['originals']
]
parse_items(site, combined, 'games')
| import copy
import sys
import pprint
import os, os.path as op
from datetime import date, datetime, timedelta
from collections import OrderedDict
from functools import partial
from urllib.parse import urlparse
import yaml
from natsort import natsorted, ns
from pykwalify.core import Core
def abort(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def validate(item, key):
for name in names(item):
if not (isinstance(name, str) or
(len(name) == 2 and
all(isinstance(x, str) for x in name))):
abort('Error: %r should be a string or a list of two strings' % name)
games = item[key]
if (not isinstance(games, list) or
not all(isinstance(x, dict) for x in games)):
print('Error: this should be a list of dicts:')
abort(pprint.pformat(games))
return names, games
def names(item):
return [item['name']] + item.get('names', [])
def game_name(game):
return game['name'][0] if isinstance(game['name'], list) else game['name']
def parse_tag(tag):
return tag.replace(' ', '-').lower()
def parse_unicode(text):
if isinstance(text, str):
return text
if isinstance(text, (list, tuple)):
result = []
for item in text:
result.append(parse_unicode(item))
return result
def parse_unicode_tag(tag):
return parse_tag(parse_unicode(tag))
def parse_tags(entry, keys):
tags = []
for key in keys:
if key in entry:
val = entry.get(key)
if isinstance(val, str):
tags.append(parse_tag(val))
tags.append(parse_unicode_tag(val))
elif isinstance(val, list):
tags += [parse_tag(v) for v in val]
tags += [parse_unicode_tag(v) for v in val]
else:
abort('Error: %s\'s key "%s" is not valid (%s)' %
(entry['name'], key, type(val).__name__))
result = []
for tag in tags:
if tag not in result:
result.append(tag)
return result
def parse_global_tags(site, item, tag, item_key: str):
if tag in item:
if not getattr(site, tag, False):
setattr(site, tag, {})
if isinstance(item[tag], str):
item[tag] = [item[tag]]
for t in item[tag]:
tagObj = getattr(site, tag, False)
if not tagObj.get(t, False):
tagObj[t] = {'tag_count': 0, 'keys': set()}
if item_key not in tagObj[t]['keys']:
tagObj[t]['tag_count'] += 1
tagObj[t]['keys'].add(item_key)
setattr(site, tag, OrderedDict(sorted(getattr(site, tag, {}).items())))
def parse_item(entry, entry_tags=[], meta={}, meta_tags=[]):
updated = entry.get('updated') or date(1970, 1, 1)
if isinstance(updated, str):
updated = datetime.strptime(updated, "%Y-%m-%d").date()
result = dict(entry,
new=(date.today() - updated) < timedelta(days=30),
tags=parse_tags(entry, entry_tags) + parse_tags(meta, meta_tags),
updated=updated)
if "repo" in result:
# Try to add extra repo information, like icons, badges
repo_parsed = urlparse(result["repo"])
domain = repo_parsed.netloc
ext = os.path.splitext(result["repo"])[1]
if "github.com" in domain:
try:
# https://github.com/<user>/<repo>
_, user, repo, *_ = repo_parsed.path.split("/")
except ValueError:
result["repoiconname"] = "github"
result["repoiconstyle"] = "fab"
result["repotitle"] = "GitHub"
else:
result["repobadge"] = f'<img class="badge lazyload" alt="GitHub stars" data-src="https://img.shields.io/github/stars/{user}/{repo}?style=flat-square&logo=github" src="https://img.shields.io/badge/stars-%3F-blue?style=flat-square&logo=github">'
elif (".google.com" in domain or
"googlecode.com" in domain):
result["repoiconname"] = "google"
result["repoiconstyle"] = "fab"
result["repotitle"] = "Google Code"
elif "bitbucket.org" in domain:
result["repoiconname"] = "bitbucket"
result["repoiconstyle"] = "fab"
result["repotitle"] = "Bitbucket"
elif "gitlab.com" in domain or domain.startswith("gitlab."):
result["repoiconname"] = "gitlab"
result["repoiconstyle"] = "fab"
result["repotitle"] = "GitLab"
elif "sourceforge.net" in domain:
try:
# https://sourceforge.net/projects/<repo>
_, _, repo, *_ = repo_parsed.path.split("/")
except ValueError:
pass
else:
result["repobadge"] = f'<img class="badge lazyload" alt="Sourceforge downloads" data-src="https://img.shields.io/sourceforge/dt/{repo}?style=flat-square" src="https://img.shields.io/badge/downloads-%3F-brightgreen?style=flat-square">'
elif ext in (".gz", ".zip", ".tar", ".tgz", ".tbz2", ".bz2", ".xz", ".rar"):
result["repoiconname"] = "box"
result["repoiconstyle"] = "fas"
result["repotitle"] = "Archive"
return result
def parse_items(site, item, key):
if not (item.get(key) and validate(item, key)):
return
if not getattr(site, key, False):
setattr(site, key, [])
meta_tags = ['genre', 'subgenre', 'theme']
game_tags = [
'status',
'development',
'lang',
'framework',
'content',
'license',
'multiplayer',
'type'
]
meta = item.get('meta', {})
meta["names_ascii"] = parse_unicode(names(item))
meta["external"] = item.get('external', {})
parse_global_tags(site, meta, 'genre', item['name'])
parse_global_tags(site, meta, 'subgenre', item['name'])
parse_global_tags(site, meta, 'theme', item['name'])
parse_fn = partial(parse_item, entry_tags=game_tags, meta=meta, meta_tags=meta_tags)
for game in item[key]:
parse_global_tags(site, game, 'lang', game['name'])
item = (names(item), meta, [parse_fn(i) for i in item[key]])
getattr(site, key).append(item)
def show_error(game_name, error_str):
print(f'\033[91m {game_name}\033[0m')
print(f' {error_str}')
def show_errors(errors):
print('\n')
for error in errors:
show_error(error["name"], error["error"])
print(f'\n {len(errors)} errors\n')
sys.exit(1)
def show_validation_errors(data, validation_errors):
errors = []
for error in validation_errors:
path = error.path.split('/')
game = data[int(path[1])]
name = game_name(game)
errors.append({"name": name, "error": error.__repr__()})
show_errors(errors)
def validate_with_schema(source_data, schema_file):
core = Core(source_data=source_data, schema_files=[schema_file])
try:
core.validate(raise_exception=True)
except Exception as error:
if len(core.errors) > 0:
show_validation_errors(source_data, core.errors)
else:
raise error
def parse_data(site):
base = op.dirname(__file__)
originals = []
for fn in os.listdir(op.join(base, 'originals')):
if fn.endswith('.yaml'):
originals.extend(yaml.safe_load(open(op.join(base, 'originals', fn), encoding="utf-8")))
def sort_key(game):
name = game_name(game)
# Always sort SCUMM first
if name == 'SCUMM':
return '0'
if name.startswith('The '):
return name[4:]
return name
originals = natsorted(originals, key=sort_key, alg=ns.IGNORECASE)
print(str(len(originals)) + ' games in total')
validate_with_schema(originals, 'schema/originals.yaml')
clones = []
for fn in sorted(os.listdir(op.join(base, 'games'))):
if fn.endswith('.yaml'):
clones.extend(yaml.safe_load(open(op.join(base, 'games', fn), encoding="utf-8")))
print(str(len(clones)) + ' clones in total')
validate_with_schema(clones, 'schema/games.yaml')
errors = []
originals_map = {}
for item in originals:
name = game_name(item)
if name in originals_map:
errors.append({
"name": name,
"error": "Duplicate original game '%s'" % name
})
originals_map[name] = item
if len(errors) > 0:
show_errors(errors)
for clone in clones:
if 'originals' not in clone:
show_errors([{
"name": clone["name"],
"error": "Unable to find 'remakes' or 'clones' in game"
}])
for original in clone['originals']:
if original not in originals_map:
errors.append({
"name": clone["name"],
"error": "Original game '%s' not found" % original
})
if "updated" not in clone:
print(f"{clone['name']} has no updated field")
else:
if isinstance(clone['updated'], str):
clone['updated'] = datetime.strptime(clone['updated'], "%Y-%m-%d").date()
if "status" not in clone:
print(f"{clone['name']} has no status field")
oldest_games = sorted([(clone['name'], clone['updated']) for clone in clones if 'updated' in clone], key=lambda x: x[1])[:5]
print(f"Oldest 5 games: {oldest_games}")
if len(errors) > 0:
show_errors(errors)
for item in originals:
# Recombine originals and clones
combined = copy.deepcopy(item)
name = game_name(combined)
combined['games'] = [
clone for clone in clones
if name in clone['originals']
]
parse_items(site, combined, 'games')
|
from configparser import ConfigParser
import feedparser
import re
import requests
import tweepy
def get_id(xkcd_link: str) -> int:
"""
Exctract comic id from xkcd link
"""
match = re.search(r"\d+", xkcd_link)
if match:
return int(match.group())
else:
return 0
def get_xkcd_rss_entries(url: str):
"""
Load latest XKCD RSS feed and extract latest entry
"""
# get latest rss feed
feed = feedparser.parse(url)
return feed.get("entries")
def get_latest_rss_entry(entries: list):
"""
Extract latest entry from XKCD RSS feed and
parse the ID
"""
entry = entries[0]
id_ = get_id(xkcd_link=entry.get("id"))
return id_, entry
def downdload_comic(entry: dict, filename: str) -> None:
"""
Download latest image and store it in
current working directory
"""
match = re.search(r'src="(.*png)"', entry["summary"])
if match:
img_url = match.groups()[0]
r = requests.get(img_url)
r.raise_for_status()
with open(filename, "wb") as f:
f.write(r.content)
return None
def initialize_twitter_api(config: ConfigParser):
"""
Do authentication and return read-to-use
twitter api object
"""
twitter_config = config["twitter"]
auth = tweepy.OAuthHandler(
twitter_config.get("consumer_key"), twitter_config.get("consumer_secret")
)
auth.set_access_token(
twitter_config.get("access_token"), twitter_config.get("access_secret")
)
api = tweepy.API(auth)
return api
def send_twitter_post(entry: dict, api: tweepy.API, img_fname: str) -> None:
"""
Post tweet on twitter
"""
match = re.search("title=(.*)/>", entry["summary"])
if match:
msg = match.groups()[0]
msg += f"\n {entry["link"]}"
else:
msg = "-- No Title --"
api.update_with_media(status=msg, filename=img_fname)
return None
| from configparser import ConfigParser
import feedparser
import re
import requests
import tweepy
def get_id(xkcd_link: str) -> int:
"""
Exctract comic id from xkcd link
"""
match = re.search(r"\d+", xkcd_link)
if match:
return int(match.group())
else:
return 0
def get_xkcd_rss_entries(url: str):
"""
Load latest XKCD RSS feed and extract latest entry
"""
# get latest rss feed
feed = feedparser.parse(url)
return feed.get("entries")
def get_latest_rss_entry(entries: list):
"""
Extract latest entry from XKCD RSS feed and
parse the ID
"""
entry = entries[0]
id_ = get_id(xkcd_link=entry.get("id"))
return id_, entry
def downdload_comic(entry: dict, filename: str) -> None:
"""
Download latest image and store it in
current working directory
"""
match = re.search(r'src="(.*png)"', entry["summary"])
if match:
img_url = match.groups()[0]
r = requests.get(img_url)
r.raise_for_status()
with open(filename, "wb") as f:
f.write(r.content)
return None
def initialize_twitter_api(config: ConfigParser):
"""
Do authentication and return read-to-use
twitter api object
"""
twitter_config = config["twitter"]
auth = tweepy.OAuthHandler(
twitter_config.get("consumer_key"), twitter_config.get("consumer_secret")
)
auth.set_access_token(
twitter_config.get("access_token"), twitter_config.get("access_secret")
)
api = tweepy.API(auth)
return api
def send_twitter_post(entry: dict, api: tweepy.API, img_fname: str) -> None:
"""
Post tweet on twitter
"""
match = re.search("title=(.*)/>", entry["summary"])
if match:
msg = match.groups()[0]
msg += f"\n {entry['link']}"
else:
msg = "-- No Title --"
api.update_with_media(status=msg, filename=img_fname)
return None
|
import logging
import os
import sys
import warnings
from collections import namedtuple
from typing import *
import matplotlib.image
import matplotlib.pyplot as plt
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from booster import Diagnostic
from .datatracker import DataTracker
BestScore = namedtuple('BestScore', ['step', 'epoch', 'value', 'summary'])
class BaseLogger():
def __init__(self, key, logdir):
self.key = key
self.logdir = logdir
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
raise NotImplementedError
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
raise NotImplementedError
class TensorboardLogger(BaseLogger):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.writer = SummaryWriter(os.path.join(self.logdir, self.key))
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
summary.log(self.writer, global_step)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
self.writer.add_image(key, img_tensor, global_step=global_step)
class LoggingLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.logger = logging.getLogger(self.key)
# logFormatter = logging.Formatter('%(asctime)s %(name)-4s %(levelname)-4s %(message)s')
#
# fileHandler = logging.FileHandler(os.path.join(self.logdir, 'run.log'))
# fileHandler.setFormatter(logFormatter)
# self.logger.addHandler(fileHandler)
#
# consoleHandler = logging.StreamHandler(sys.stdout)
# consoleHandler.setFormatter(logFormatter)
# self.logger.addHandler(consoleHandler)
self.logger.setLevel(logging.INFO)
self.diagnostic_keys = diagnostic_keys
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, best_score: Optional[BestScore] = None,
**kwargs):
for stats_key in self.diagnostic_keys:
if not stats_key in summary.keys():
self.logger.warning('key ' + str(stats_key) + ' not in summary.')
else:
message = f'[{global_step} / {epoch}] '
message += ''.join([f'{k} {v:6.2f} ' for k, v in summary.get(stats_key).items()])
if "info" in summary.keys() and "elapsed-time" in summary["info"].keys():
message += f'({summary['info']['elapsed-time']:.2f}s /iter)'
else:
warnings.warn(
f"Summary does not contain the key info/elapsed-time. The elapsed time won't be displayed.")
if best_score is not None:
message += f' (best: {best_score.value:6.2f} [{best_score.step} | {best_score.epoch}])'
self.logger.info(message)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
pass
class PlotLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.diagnostic_keys = diagnostic_keys
self.tracker = DataTracker(label=self.key)
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
for key in self.diagnostic_keys:
self.tracker.append(global_step, summary[key])
def plot(self, *args, **kwargs):
self.tracker.plot(*args, **kwargs)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
img = img_tensor.data.permute(1, 2, 0).cpu().numpy()
matplotlib.image.imsave(os.path.join(self.logdir, f"{key}.png"), img)
class PlotHandler(List):
def __init__(self, logdir, *args, **kwargs):
super().__init__(*args, **kwargs)
self.path = os.path.join(logdir, "curves.png")
def plot(self):
if len(self):
logger = self[0]
keys = logger.tracker.data.keys()
plt.figure(figsize=(4 * len(keys), 3))
for i, key in enumerate(keys):
plt.subplot(1, len(keys), i + 1)
plt.title(key)
for logger in self:
logger.plot(key)
plt.legend()
plt.savefig(self.path)
class Logger(BaseLogger):
def __init__(self, key, logdir, tensorboard=True, logging=True, plot=True, **kwargs):
super().__init__(key, logdir)
self.loggers = []
if tensorboard:
self.loggers += [TensorboardLogger(key, logdir, **kwargs)]
if logging:
self.loggers += [LoggingLogger(key, logdir, **kwargs)]
if plot:
self.loggers += [PlotLogger(key, logdir, **kwargs)]
def log_diagnostic(self, *args, **kwargs):
for logger in self.loggers:
logger.log_diagnostic(*args, **kwargs)
def log_image(self, *args, **kwargs):
for logger in self.loggers:
logger.log_image(*args, **kwargs)
class LoggerManager():
def __init__(self, logdir, **kwargs):
self.logdir = logdir
self.kwargs = kwargs
self.loggers = {}
self.plot_handler = PlotHandler(self.logdir)
def init_logger(self, key):
self.loggers[key] = Logger(key, self.logdir, **self.kwargs)
# mappend PlotLogger to PlotHandler
for logger in self.loggers[key].loggers:
if isinstance(logger, PlotLogger):
self.plot_handler.append(logger)
def log_diagnostic(self, key, step, epoch, summary, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_diagnostic(step, epoch, summary, **kwargs)
self.plot_handler.plot()
def log_image(self, key, image_key, step, epoch, img_tensor, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_image(image_key, step, epoch, img_tensor, **kwargs)
| import logging
import os
import sys
import warnings
from collections import namedtuple
from typing import *
import matplotlib.image
import matplotlib.pyplot as plt
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from booster import Diagnostic
from .datatracker import DataTracker
BestScore = namedtuple('BestScore', ['step', 'epoch', 'value', 'summary'])
class BaseLogger():
def __init__(self, key, logdir):
self.key = key
self.logdir = logdir
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
raise NotImplementedError
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
raise NotImplementedError
class TensorboardLogger(BaseLogger):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.writer = SummaryWriter(os.path.join(self.logdir, self.key))
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
summary.log(self.writer, global_step)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
self.writer.add_image(key, img_tensor, global_step=global_step)
class LoggingLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.logger = logging.getLogger(self.key)
# logFormatter = logging.Formatter('%(asctime)s %(name)-4s %(levelname)-4s %(message)s')
#
# fileHandler = logging.FileHandler(os.path.join(self.logdir, 'run.log'))
# fileHandler.setFormatter(logFormatter)
# self.logger.addHandler(fileHandler)
#
# consoleHandler = logging.StreamHandler(sys.stdout)
# consoleHandler.setFormatter(logFormatter)
# self.logger.addHandler(consoleHandler)
self.logger.setLevel(logging.INFO)
self.diagnostic_keys = diagnostic_keys
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, best_score: Optional[BestScore] = None,
**kwargs):
for stats_key in self.diagnostic_keys:
if not stats_key in summary.keys():
self.logger.warning('key ' + str(stats_key) + ' not in summary.')
else:
message = f'[{global_step} / {epoch}] '
message += ''.join([f'{k} {v:6.2f} ' for k, v in summary.get(stats_key).items()])
if "info" in summary.keys() and "elapsed-time" in summary["info"].keys():
message += f'({summary["info"]["elapsed-time"]:.2f}s /iter)'
else:
warnings.warn(
f"Summary does not contain the key info/elapsed-time. The elapsed time won't be displayed.")
if best_score is not None:
message += f' (best: {best_score.value:6.2f} [{best_score.step} | {best_score.epoch}])'
self.logger.info(message)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
pass
class PlotLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.diagnostic_keys = diagnostic_keys
self.tracker = DataTracker(label=self.key)
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
for key in self.diagnostic_keys:
self.tracker.append(global_step, summary[key])
def plot(self, *args, **kwargs):
self.tracker.plot(*args, **kwargs)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
img = img_tensor.data.permute(1, 2, 0).cpu().numpy()
matplotlib.image.imsave(os.path.join(self.logdir, f"{key}.png"), img)
class PlotHandler(List):
def __init__(self, logdir, *args, **kwargs):
super().__init__(*args, **kwargs)
self.path = os.path.join(logdir, "curves.png")
def plot(self):
if len(self):
logger = self[0]
keys = logger.tracker.data.keys()
plt.figure(figsize=(4 * len(keys), 3))
for i, key in enumerate(keys):
plt.subplot(1, len(keys), i + 1)
plt.title(key)
for logger in self:
logger.plot(key)
plt.legend()
plt.savefig(self.path)
class Logger(BaseLogger):
def __init__(self, key, logdir, tensorboard=True, logging=True, plot=True, **kwargs):
super().__init__(key, logdir)
self.loggers = []
if tensorboard:
self.loggers += [TensorboardLogger(key, logdir, **kwargs)]
if logging:
self.loggers += [LoggingLogger(key, logdir, **kwargs)]
if plot:
self.loggers += [PlotLogger(key, logdir, **kwargs)]
def log_diagnostic(self, *args, **kwargs):
for logger in self.loggers:
logger.log_diagnostic(*args, **kwargs)
def log_image(self, *args, **kwargs):
for logger in self.loggers:
logger.log_image(*args, **kwargs)
class LoggerManager():
def __init__(self, logdir, **kwargs):
self.logdir = logdir
self.kwargs = kwargs
self.loggers = {}
self.plot_handler = PlotHandler(self.logdir)
def init_logger(self, key):
self.loggers[key] = Logger(key, self.logdir, **self.kwargs)
# mappend PlotLogger to PlotHandler
for logger in self.loggers[key].loggers:
if isinstance(logger, PlotLogger):
self.plot_handler.append(logger)
def log_diagnostic(self, key, step, epoch, summary, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_diagnostic(step, epoch, summary, **kwargs)
self.plot_handler.plot()
def log_image(self, key, image_key, step, epoch, img_tensor, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_image(image_key, step, epoch, img_tensor, **kwargs)
|
"""Test the creation of all inventories."""
import stewi
from stewi.globals import paths, STEWI_VERSION, config
year = 2018
def test_inventory_generation():
# Create new local path
paths.local_path = paths.local_path + "_" + STEWI_VERSION
error_list = []
for inventory in config()['databases']:
# skip RCRAInfo due to browswer download
if inventory in ['RCRAInfo']:
continue
df = stewi.getInventory(inventory, year)
error = df is None
if not error:
error = len(df) == 0
if error:
error_list.append(inventory)
assert len(error_list) == 0, f"Generation of {",".join(error_list)} unsuccessful"
if __name__ == "__main__":
test_inventory_generation()
| """Test the creation of all inventories."""
import stewi
from stewi.globals import paths, STEWI_VERSION, config
year = 2018
def test_inventory_generation():
# Create new local path
paths.local_path = paths.local_path + "_" + STEWI_VERSION
error_list = []
for inventory in config()['databases']:
# skip RCRAInfo due to browswer download
if inventory in ['RCRAInfo']:
continue
df = stewi.getInventory(inventory, year)
error = df is None
if not error:
error = len(df) == 0
if error:
error_list.append(inventory)
assert len(error_list) == 0, f"Generation of {','.join(error_list)} unsuccessful"
if __name__ == "__main__":
test_inventory_generation()
|
"""Report routes."""
import os
from urllib import parse
import bottle
import requests
from pymongo.database import Database
from database import sessions
from database.datamodels import latest_datamodel
from database.measurements import recent_measurements_by_metric_uuid
from database.reports import insert_new_report, latest_reports
from initialization.report import import_json_report
from model.actions import copy_report
from model.data import ReportData
from model.transformations import hide_credentials, summarize_report
from server_utilities.functions import report_date_time, uuid
from server_utilities.type import ReportId
@bottle.post("/api/v3/report/import")
def post_report_import(database: Database):
"""Import a preconfigured report into the database."""
report = dict(bottle.request.json)
result = import_json_report(database, report)
result["new_report_uuid"] = report["report_uuid"]
return result
@bottle.post("/api/v3/report/new")
def post_report_new(database: Database):
"""Add a new report."""
report_uuid = uuid()
user = sessions.user(database)
report = dict(
report_uuid=report_uuid, title="New report", subjects={},
delta=dict(uuids=[report_uuid], email=user["email"], description=f"{user["user"]} created a new report."))
result = insert_new_report(database, report)
result["new_report_uuid"] = report_uuid
return result
@bottle.post("/api/v3/report/<report_uuid>/copy")
def post_report_copy(report_uuid: ReportId, database: Database):
"""Copy a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
report_copy = copy_report(data.report, data.datamodel)
user = sessions.user(database)
report_copy["delta"] = dict(
uuids=[report_uuid, report_copy["report_uuid"]], email=user["email"],
description=f"{user["user"]} copied the report '{data.report_name}'.")
result = insert_new_report(database, report_copy)
result["new_report_uuid"] = report_copy["report_uuid"]
return result
@bottle.get("/api/v3/report/<report_uuid>/pdf")
def export_report_as_pdf(report_uuid: ReportId):
"""Download the report as pdf."""
renderer_host = os.environ.get("RENDERER_HOST", "renderer")
renderer_port = os.environ.get("RENDERER_PORT", "9000")
render_url = f"http://{renderer_host}:{renderer_port}/api/render"
proxy_host = os.environ.get("PROXY_HOST", "www")
proxy_port = os.environ.get("PROXY_PORT", "80")
query_string = f"?{bottle.request.query_string}" if bottle.request.query_string else ""
report_url = parse.quote(f"http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}")
margins = "&".join([f"pdf.margin.{side}=25" for side in ("top", "bottom", "left", "right")])
# Set pdf scale to 70% or otherwise the dashboard falls off the page
options = f"emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}"
response = requests.get(f"{render_url}?url={report_url}&{options}")
response.raise_for_status()
bottle.response.content_type = "application/pdf"
return response.content
@bottle.delete("/api/v3/report/<report_uuid>")
def delete_report(report_uuid: ReportId, database: Database):
"""Delete a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
data.report["deleted"] = "true"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user["user"]} deleted the report '{data.report_name}'.")
return insert_new_report(database, data.report)
@bottle.post("/api/v3/report/<report_uuid>/attribute/<report_attribute>")
def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database):
"""Set a report attribute."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
value = dict(bottle.request.json)[report_attribute]
old_value = data.report.get(report_attribute) or ""
data.report[report_attribute] = value
value_change_description = "" if report_attribute == "layout" else f" from '{old_value}' to '{value}'"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user["user"]} changed the {report_attribute} of report '{data.report_name}'"
f"{value_change_description}.")
return insert_new_report(database, data.report)
@bottle.get("/api/v3/tagreport/<tag>")
def get_tag_report(tag: str, database: Database):
"""Get a report with all metrics that have the specified tag."""
date_time = report_date_time()
reports = latest_reports(database, date_time)
data_model = latest_datamodel(database, date_time)
subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag)
tag_report = dict(
title=f'Report for tag "{tag}"', subtitle="Note: tag reports are read-only", report_uuid=f"tag-{tag}",
timestamp=date_time, subjects=subjects)
hide_credentials(data_model, tag_report)
summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model)
return tag_report
def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str):
"""Return all subjects and metrics that have the tag."""
subjects = {}
for report in reports:
for subject_uuid, subject in list(report.get("subjects", {}).items()):
for metric_uuid, metric in list(subject.get("metrics", {}).items()):
if tag not in metric.get("tags", []):
del subject["metrics"][metric_uuid]
if subject.get("metrics", {}):
subject_name = subject.get("name") or data_model["subjects"][subject["type"]]["name"]
subject["name"] = report["title"] + " / " + subject_name
subjects[subject_uuid] = subject
return subjects
| """Report routes."""
import os
from urllib import parse
import bottle
import requests
from pymongo.database import Database
from database import sessions
from database.datamodels import latest_datamodel
from database.measurements import recent_measurements_by_metric_uuid
from database.reports import insert_new_report, latest_reports
from initialization.report import import_json_report
from model.actions import copy_report
from model.data import ReportData
from model.transformations import hide_credentials, summarize_report
from server_utilities.functions import report_date_time, uuid
from server_utilities.type import ReportId
@bottle.post("/api/v3/report/import")
def post_report_import(database: Database):
"""Import a preconfigured report into the database."""
report = dict(bottle.request.json)
result = import_json_report(database, report)
result["new_report_uuid"] = report["report_uuid"]
return result
@bottle.post("/api/v3/report/new")
def post_report_new(database: Database):
"""Add a new report."""
report_uuid = uuid()
user = sessions.user(database)
report = dict(
report_uuid=report_uuid, title="New report", subjects={},
delta=dict(uuids=[report_uuid], email=user["email"], description=f"{user['user']} created a new report."))
result = insert_new_report(database, report)
result["new_report_uuid"] = report_uuid
return result
@bottle.post("/api/v3/report/<report_uuid>/copy")
def post_report_copy(report_uuid: ReportId, database: Database):
"""Copy a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
report_copy = copy_report(data.report, data.datamodel)
user = sessions.user(database)
report_copy["delta"] = dict(
uuids=[report_uuid, report_copy["report_uuid"]], email=user["email"],
description=f"{user['user']} copied the report '{data.report_name}'.")
result = insert_new_report(database, report_copy)
result["new_report_uuid"] = report_copy["report_uuid"]
return result
@bottle.get("/api/v3/report/<report_uuid>/pdf")
def export_report_as_pdf(report_uuid: ReportId):
"""Download the report as pdf."""
renderer_host = os.environ.get("RENDERER_HOST", "renderer")
renderer_port = os.environ.get("RENDERER_PORT", "9000")
render_url = f"http://{renderer_host}:{renderer_port}/api/render"
proxy_host = os.environ.get("PROXY_HOST", "www")
proxy_port = os.environ.get("PROXY_PORT", "80")
query_string = f"?{bottle.request.query_string}" if bottle.request.query_string else ""
report_url = parse.quote(f"http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}")
margins = "&".join([f"pdf.margin.{side}=25" for side in ("top", "bottom", "left", "right")])
# Set pdf scale to 70% or otherwise the dashboard falls off the page
options = f"emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}"
response = requests.get(f"{render_url}?url={report_url}&{options}")
response.raise_for_status()
bottle.response.content_type = "application/pdf"
return response.content
@bottle.delete("/api/v3/report/<report_uuid>")
def delete_report(report_uuid: ReportId, database: Database):
"""Delete a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
data.report["deleted"] = "true"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} deleted the report '{data.report_name}'.")
return insert_new_report(database, data.report)
@bottle.post("/api/v3/report/<report_uuid>/attribute/<report_attribute>")
def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database):
"""Set a report attribute."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
value = dict(bottle.request.json)[report_attribute]
old_value = data.report.get(report_attribute) or ""
data.report[report_attribute] = value
value_change_description = "" if report_attribute == "layout" else f" from '{old_value}' to '{value}'"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} changed the {report_attribute} of report '{data.report_name}'"
f"{value_change_description}.")
return insert_new_report(database, data.report)
@bottle.get("/api/v3/tagreport/<tag>")
def get_tag_report(tag: str, database: Database):
"""Get a report with all metrics that have the specified tag."""
date_time = report_date_time()
reports = latest_reports(database, date_time)
data_model = latest_datamodel(database, date_time)
subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag)
tag_report = dict(
title=f'Report for tag "{tag}"', subtitle="Note: tag reports are read-only", report_uuid=f"tag-{tag}",
timestamp=date_time, subjects=subjects)
hide_credentials(data_model, tag_report)
summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model)
return tag_report
def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str):
"""Return all subjects and metrics that have the tag."""
subjects = {}
for report in reports:
for subject_uuid, subject in list(report.get("subjects", {}).items()):
for metric_uuid, metric in list(subject.get("metrics", {}).items()):
if tag not in metric.get("tags", []):
del subject["metrics"][metric_uuid]
if subject.get("metrics", {}):
subject_name = subject.get("name") or data_model["subjects"][subject["type"]]["name"]
subject["name"] = report["title"] + " / " + subject_name
subjects[subject_uuid] = subject
return subjects
|
import glob
import shutil
import subprocess
import os
import sys
import argparse
# Read and save metadata from file
def exiftool_metadata(path):
metadata = {}
exifToolPath = 'exifTool.exe'
''' use Exif tool to get the metadata '''
process = subprocess.Popen(
[
exifToolPath,
path
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True
)
''' get the tags in dict '''
for tag in process.stdout:
tag = tag.strip()
key = tag[:tag.find(':')].strip()
value = tag[tag.find(':') + 1:].strip()
metadata[key] = value
return metadata
class File:
def __init__(self, path):
self.metadata = exiftool_metadata(path)
def _get_file_metadata(self, key, no=''):
if key in self.metadata:
return self.metadata[key]
else:
return no
def copyCore(self, source, dst_dir: str, copy_duplicate=False):
logs = []
# if value of metadata not exists - folder name
no_metadata = 'none'
date = File._get_file_metadata(self, 'Date/Time Original')
if date == '':
date = File._get_file_metadata(self, 'Create Date', no_metadata)
mime_type = File._get_file_metadata(self, 'MIME Type', no_metadata)
dst_dir += f'''/{mime_type[:mime_type.find('/')]}/{date[:4]}/{date[5:7]}'''
filename = File._get_file_metadata(self, 'File Name')
f_name = filename
dst = dst_dir + '/' + filename
# File with the same name exists in dst. If source and dst have same size then determines 'copy_exists'
if os.path.isfile(dst):
i = 0
f_pth = File(dst)
if_same_size: bool = f_pth._get_file_metadata("File Size") == File._get_file_metadata(self, 'File Size')
if (not if_same_size) or copy_duplicate:
while os.path.isfile(dst):
filename = f'''{f_name[:f_name.find('.')]}_D{str(i)}.{File._get_file_metadata(self, 'File Type Extension')}'''
dst = f'''{dst_dir}/{filename}'''
i = i + 1
if if_same_size:
logs.append(f"Warning: file already exists but I must copy all files"
f" [copy_duplicate={copy_duplicate}], so I try do it ...")
else:
logs.append(f"Warning: file already exists but have other size, so I try copy it ...")
else:
logs.append(f"Warning: file already duplicate [copy_exists={copy_duplicate}]."
f"\nCopy aboard: {source} -> {dst}")
return logs
try:
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
logs.append(f"New directory created: {dst_dir}")
shutil.copy(source, dst)
logs.append(f'''Copy done: {source} -> {dst}''')
except Exception as e:
logs.append(f'''Copy error [{e}]: {source} -> {dst}''')
return logs
def main():
# Arguments from console
parser = argparse.ArgumentParser()
parser.add_argument('-s', help="Obligatory: source directory path")
parser.add_argument('-d', help="Obligatory: destination folder path")
parser.add_argument('-e', help="Obligatory: copy duplicate files (T/True/F/False)")
args = parser.parse_args(sys.argv[1:])
# Setup variable
source_dir = args.s
dst_dir = args.d
df = {
"T": True,
"TRUE": True,
"F": False,
"FALSE": False
}
try:
copy_duplicate = df.get(args.e.upper(), False)
except AttributeError:
copy_duplicate = False
print(f"app.py: error: unrecognized arguments. Use -h or --help to see options")
exit(1)
# Number of log
l_lpm = 0
# source_dir = 'C:/Users'
# dst_dir = 'C:/Users'
# copy_duplicate = False
for f_inx, source in enumerate(glob.glob(source_dir + '/**/*.*', recursive=True)):
try:
f = File(source)
print("----------")
for log in f.copyCore(source, dst_dir, copy_duplicate):
l_lpm = l_lpm + 1
print(f'''{str(l_lpm)}.{f_inx + 1}) {log}''')
except Exception as e:
print(f'Copy error [{e}]: {source}')
if __name__ == '__main__':
main()
| import glob
import shutil
import subprocess
import os
import sys
import argparse
# Read and save metadata from file
def exiftool_metadata(path):
metadata = {}
exifToolPath = 'exifTool.exe'
''' use Exif tool to get the metadata '''
process = subprocess.Popen(
[
exifToolPath,
path
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True
)
''' get the tags in dict '''
for tag in process.stdout:
tag = tag.strip()
key = tag[:tag.find(':')].strip()
value = tag[tag.find(':') + 1:].strip()
metadata[key] = value
return metadata
class File:
def __init__(self, path):
self.metadata = exiftool_metadata(path)
def _get_file_metadata(self, key, no=''):
if key in self.metadata:
return self.metadata[key]
else:
return no
def copyCore(self, source, dst_dir: str, copy_duplicate=False):
logs = []
# if value of metadata not exists - folder name
no_metadata = 'none'
date = File._get_file_metadata(self, 'Date/Time Original')
if date == '':
date = File._get_file_metadata(self, 'Create Date', no_metadata)
mime_type = File._get_file_metadata(self, 'MIME Type', no_metadata)
dst_dir += f'''/{mime_type[:mime_type.find('/')]}/{date[:4]}/{date[5:7]}'''
filename = File._get_file_metadata(self, 'File Name')
f_name = filename
dst = dst_dir + '/' + filename
# File with the same name exists in dst. If source and dst have same size then determines 'copy_exists'
if os.path.isfile(dst):
i = 0
f_pth = File(dst)
if_same_size: bool = f_pth._get_file_metadata("File Size") == File._get_file_metadata(self, 'File Size')
if (not if_same_size) or copy_duplicate:
while os.path.isfile(dst):
filename = f'''{f_name[:f_name.find('.')]}_D{str(i)}.{File._get_file_metadata(self, 'File Type Extension')}'''
dst = f'''{dst_dir}/{filename}'''
i = i + 1
if if_same_size:
logs.append(f"Warning: file already exists but I must copy all files"
f" [copy_duplicate={copy_duplicate}], so I try do it ...")
else:
logs.append(f"Warning: file already exists but have other size, so I try copy it ...")
else:
logs.append(f"Warning: file already duplicate [copy_exists={copy_duplicate}]."
f"\nCopy aboard: {source} -> {dst}")
return logs
try:
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
logs.append(f"New directory created: {dst_dir}")
shutil.copy(source, dst)
logs.append(f'''Copy done: {source} -> {dst}''')
except Exception as e:
logs.append(f'''Copy error [{e}]: {source} -> {dst}''')
return logs
def main():
# Arguments from console
parser = argparse.ArgumentParser()
parser.add_argument('-s', help="Obligatory: source directory path")
parser.add_argument('-d', help="Obligatory: destination folder path")
parser.add_argument('-e', help="Obligatory: copy duplicate files (T/True/F/False)")
args = parser.parse_args(sys.argv[1:])
# Setup variable
source_dir = args.s
dst_dir = args.d
df = {
"T": True,
"TRUE": True,
"F": False,
"FALSE": False
}
try:
copy_duplicate = df.get(args.e.upper(), False)
except AttributeError:
copy_duplicate = False
print(f"app.py: error: unrecognized arguments. Use -h or --help to see options")
exit(1)
# Number of log
l_lpm = 0
# source_dir = 'C:/Users'
# dst_dir = 'C:/Users'
# copy_duplicate = False
for f_inx, source in enumerate(glob.glob(source_dir + '/**/*.*', recursive=True)):
try:
f = File(source)
print("----------")
for log in f.copyCore(source, dst_dir, copy_duplicate):
l_lpm = l_lpm + 1
print(f'''{str(l_lpm)}.{f_inx + 1}) {log}''')
except Exception as e:
print(f'Copy error [{e}]: {source}')
if __name__ == '__main__':
main()
|
import os
import shutil
import subprocess
import re
import string
import pathlib
import timeit
import jmhbenchmark
class JHaskellBenchmark(jmhbenchmark.JMHBenchmark):
def __init__(self, name, source_path, compiler_args=None):
if compiler_args is None:
compiler_args = []
source_path = pathlib.Path(source_path)
super().__init__(name, source_path.stem.lower(), source_path.stem.capitalize())
self._source_path = source_path
self._compiler_args = compiler_args.copy()
def __enter__(self):
ret = super().__enter__()
self._output_jar = (self._temp_dir / self._name).with_suffix(".jar")
return ret
def get_run_args(self):
return ["-jar", f"{self._name}.jar"]
def _compile(self):
self._run_jhaskell_compiler()
def _post_compile(self):
self._results["size"] = jmhbenchmark.get_jar_entry_size(
self._output_jar,
[
f"{self._package_name}/{s}.class"
for s in [self._class_name, "Data", "Function", "BoxedData", "HeapObject"]
],
)
return super()._post_compile()
def _get_classpath(self):
return [f"{self._name}.jar"]
def _run_jhaskell_compiler(self, extra_args=None):
if extra_args is None:
extra_args = []
original_dir = pathlib.Path.cwd()
# Build the source program
args = (
[
"compiler-exe",
"--build-dir",
f"{self._temp_dir / "out"}",
"--output-jar",
str(self._output_jar),
"--output-class",
self._class_name,
"--runtime-file-dir",
str(original_dir.parent / "runtime"),
]
+ self._compiler_args
+ extra_args
+ [f"programs/{self._package_name}.hs"]
)
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
print(e.stdout.decode())
raise
# For JHaskell, time for each stage of the compiler
def _benchmark_compilation(self, iterations=50):
number = 1
# Record the output of each invocation
outputs = []
def bench_func():
outputs.append(self._run_jhaskell_compiler(["--time-stages"]).decode())
overall_times = timeit.repeat(stmt=bench_func, setup=self._pre_compile, number=number, repeat=iterations)
time_data = []
data_extractor = re.compile(r"(.+): (.+)ms")
for output, overall_time in zip(outputs, overall_times):
cumulative_time = 0
this_run_data = []
for line in output.splitlines():
match = data_extractor.fullmatch(line)
if match is None:
raise RuntimeError("Invalid line from compiler: " + line)
this_time = float(match.group(2))
this_run_data.append((match.group(1), this_time))
cumulative_time += this_time
#this_run_data.append(("Other", overall_time * 1000 - cumulative_time))
time_data.append(this_run_data)
self._results["times"] = time_data
| import os
import shutil
import subprocess
import re
import string
import pathlib
import timeit
import jmhbenchmark
class JHaskellBenchmark(jmhbenchmark.JMHBenchmark):
def __init__(self, name, source_path, compiler_args=None):
if compiler_args is None:
compiler_args = []
source_path = pathlib.Path(source_path)
super().__init__(name, source_path.stem.lower(), source_path.stem.capitalize())
self._source_path = source_path
self._compiler_args = compiler_args.copy()
def __enter__(self):
ret = super().__enter__()
self._output_jar = (self._temp_dir / self._name).with_suffix(".jar")
return ret
def get_run_args(self):
return ["-jar", f"{self._name}.jar"]
def _compile(self):
self._run_jhaskell_compiler()
def _post_compile(self):
self._results["size"] = jmhbenchmark.get_jar_entry_size(
self._output_jar,
[
f"{self._package_name}/{s}.class"
for s in [self._class_name, "Data", "Function", "BoxedData", "HeapObject"]
],
)
return super()._post_compile()
def _get_classpath(self):
return [f"{self._name}.jar"]
def _run_jhaskell_compiler(self, extra_args=None):
if extra_args is None:
extra_args = []
original_dir = pathlib.Path.cwd()
# Build the source program
args = (
[
"compiler-exe",
"--build-dir",
f"{self._temp_dir / 'out'}",
"--output-jar",
str(self._output_jar),
"--output-class",
self._class_name,
"--runtime-file-dir",
str(original_dir.parent / "runtime"),
]
+ self._compiler_args
+ extra_args
+ [f"programs/{self._package_name}.hs"]
)
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
print(e.stdout.decode())
raise
# For JHaskell, time for each stage of the compiler
def _benchmark_compilation(self, iterations=50):
number = 1
# Record the output of each invocation
outputs = []
def bench_func():
outputs.append(self._run_jhaskell_compiler(["--time-stages"]).decode())
overall_times = timeit.repeat(stmt=bench_func, setup=self._pre_compile, number=number, repeat=iterations)
time_data = []
data_extractor = re.compile(r"(.+): (.+)ms")
for output, overall_time in zip(outputs, overall_times):
cumulative_time = 0
this_run_data = []
for line in output.splitlines():
match = data_extractor.fullmatch(line)
if match is None:
raise RuntimeError("Invalid line from compiler: " + line)
this_time = float(match.group(2))
this_run_data.append((match.group(1), this_time))
cumulative_time += this_time
#this_run_data.append(("Other", overall_time * 1000 - cumulative_time))
time_data.append(this_run_data)
self._results["times"] = time_data
|
r"""
Early Stopping
^^^^^^^^^^^^^^
Monitor a validation metric and stop training when it stops improving.
"""
from copy import deepcopy
import numpy as np
import torch
import torch.distributed as dist
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
torch_inf = torch.tensor(np.Inf)
try:
import torch_xla
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class EarlyStopping(Callback):
r"""
Args:
monitor: quantity to be monitored. Default: ``'val_loss'``.
.. note:: Has no effect when using `EvalResult` or `TrainResult`
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than `min_delta`, will count as no
improvement. Default: ``0.0``.
patience: number of validation epochs with no improvement
after which training will be stopped. Default: ``3``.
verbose: verbosity mode. Default: ``False``.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity. Default: ``'auto'``.
strict: whether to crash the training if `monitor` is
not found in the validation metrics. Default: ``True``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import EarlyStopping
>>> early_stopping = EarlyStopping('val_loss')
>>> trainer = Trainer(early_stop_callback=early_stopping)
"""
mode_dict = {
'min': torch.lt,
'max': torch.gt,
}
def __init__(self, monitor: str = 'val_loss', min_delta: float = 0.0, patience: int = 3,
verbose: bool = False, mode: str = 'auto', strict: bool = True):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.strict = strict
self.min_delta = min_delta
self.wait_count = 0
self.stopped_epoch = 0
self.mode = mode
if mode not in self.mode_dict:
if self.verbose > 0:
log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
if self.mode == 'auto':
if self.monitor == 'acc':
self.mode = 'max'
else:
self.mode = 'min'
if self.verbose > 0:
log.info(f'EarlyStopping mode set to {self.mode} for monitoring {self.monitor}.')
self.min_delta *= 1 if self.monitor_op == torch.gt else -1
self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
def _validate_condition_metric(self, logs):
monitor_val = logs.get(self.monitor)
error_msg = (f'Early stopping conditioned on metric `{self.monitor}`'
f' which is not available. Either add `{self.monitor}` to the return of '
f' validation_epoch end or modify your EarlyStopping callback to use any of the '
f'following: `{'`, `'.join(list(logs.keys()))}`')
if monitor_val is None:
if self.strict:
raise RuntimeError(error_msg)
if self.verbose > 0:
rank_zero_warn(error_msg, RuntimeWarning)
return False
return True
@property
def monitor_op(self):
return self.mode_dict[self.mode]
def state_dict(self):
return {
'wait_count': self.wait_count,
'stopped_epoch': self.stopped_epoch,
'best_score': self.best_score,
'patience': self.patience
}
def load_state_dict(self, state_dict):
state_dict = deepcopy(state_dict)
self.wait_count = state_dict['wait_count']
self.stopped_epoch = state_dict['stopped_epoch']
self.best_score = state_dict['best_score']
self.patience = state_dict['patience']
def on_validation_end(self, trainer, pl_module):
self._run_early_stopping_check(trainer, pl_module)
def on_validation_epoch_end(self, trainer, pl_module):
val_es_key = 'val_early_stop_on'
if trainer.callback_metrics.get(val_es_key) is not None:
self.monitor = val_es_key
# disable strict checking when using structured results
if val_es_key in trainer.callback_metrics:
self.strict = False
self._validate_condition_metric(trainer.callback_metrics)
def on_train_epoch_end(self, trainer, pl_module):
# disable early stopping in train loop when there's a val loop
if self.monitor == 'val_early_stop_on':
return
# early stopping can also work in the train loop when there is no val loop and when using structured results
should_check_early_stop = False
train_es_key = 'early_stop_on'
if trainer.callback_metrics.get(train_es_key, None) is not None:
self.monitor = train_es_key
should_check_early_stop = True
if should_check_early_stop:
self._run_early_stopping_check(trainer, pl_module)
def _run_early_stopping_check(self, trainer, pl_module):
logs = trainer.callback_metrics
if not self._validate_condition_metric(logs):
return # short circuit if metric not present
current = logs.get(self.monitor)
# when in dev debugging
trainer.dev_debugger.track_early_stopping_history(current)
if not isinstance(current, torch.Tensor):
current = torch.tensor(current, device=pl_module.device)
if trainer.use_tpu and XLA_AVAILABLE:
current = current.cpu()
if self.monitor_op(current - self.min_delta, self.best_score):
self.best_score = current
self.wait_count = 0
else:
self.wait_count += 1
should_stop = self.wait_count >= self.patience
if bool(should_stop):
self.stopped_epoch = trainer.current_epoch
trainer.should_stop = True
# stop every ddp process if any world process decides to stop
self._stop_distributed_training(trainer, pl_module)
def _stop_distributed_training(self, trainer, pl_module):
# in ddp make sure all processes stop when one is flagged
if trainer.use_ddp or trainer.use_ddp2:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device)
dist.all_reduce(stop, op=dist.reduce_op.SUM)
dist.barrier()
trainer.should_stop = stop == trainer.world_size
if trainer.use_tpu:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device, dtype=torch.int32)
stop = xm.mesh_reduce("stop_signal", stop, torch.cat)
torch_xla.core.xla_model.rendezvous("pl.EarlyStoppingCallback.stop_distributed_training_check")
trainer.should_stop = int(stop.item()) == trainer.world_size
def on_train_end(self, trainer, pl_module):
if self.stopped_epoch > 0 and self.verbose > 0:
rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,'
' but will start from "0" in v0.8.0.', DeprecationWarning)
log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')
| r"""
Early Stopping
^^^^^^^^^^^^^^
Monitor a validation metric and stop training when it stops improving.
"""
from copy import deepcopy
import numpy as np
import torch
import torch.distributed as dist
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
torch_inf = torch.tensor(np.Inf)
try:
import torch_xla
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class EarlyStopping(Callback):
r"""
Args:
monitor: quantity to be monitored. Default: ``'val_loss'``.
.. note:: Has no effect when using `EvalResult` or `TrainResult`
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than `min_delta`, will count as no
improvement. Default: ``0.0``.
patience: number of validation epochs with no improvement
after which training will be stopped. Default: ``3``.
verbose: verbosity mode. Default: ``False``.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity. Default: ``'auto'``.
strict: whether to crash the training if `monitor` is
not found in the validation metrics. Default: ``True``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import EarlyStopping
>>> early_stopping = EarlyStopping('val_loss')
>>> trainer = Trainer(early_stop_callback=early_stopping)
"""
mode_dict = {
'min': torch.lt,
'max': torch.gt,
}
def __init__(self, monitor: str = 'val_loss', min_delta: float = 0.0, patience: int = 3,
verbose: bool = False, mode: str = 'auto', strict: bool = True):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.strict = strict
self.min_delta = min_delta
self.wait_count = 0
self.stopped_epoch = 0
self.mode = mode
if mode not in self.mode_dict:
if self.verbose > 0:
log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
if self.mode == 'auto':
if self.monitor == 'acc':
self.mode = 'max'
else:
self.mode = 'min'
if self.verbose > 0:
log.info(f'EarlyStopping mode set to {self.mode} for monitoring {self.monitor}.')
self.min_delta *= 1 if self.monitor_op == torch.gt else -1
self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
def _validate_condition_metric(self, logs):
monitor_val = logs.get(self.monitor)
error_msg = (f'Early stopping conditioned on metric `{self.monitor}`'
f' which is not available. Either add `{self.monitor}` to the return of '
f' validation_epoch end or modify your EarlyStopping callback to use any of the '
f'following: `{"`, `".join(list(logs.keys()))}`')
if monitor_val is None:
if self.strict:
raise RuntimeError(error_msg)
if self.verbose > 0:
rank_zero_warn(error_msg, RuntimeWarning)
return False
return True
@property
def monitor_op(self):
return self.mode_dict[self.mode]
def state_dict(self):
return {
'wait_count': self.wait_count,
'stopped_epoch': self.stopped_epoch,
'best_score': self.best_score,
'patience': self.patience
}
def load_state_dict(self, state_dict):
state_dict = deepcopy(state_dict)
self.wait_count = state_dict['wait_count']
self.stopped_epoch = state_dict['stopped_epoch']
self.best_score = state_dict['best_score']
self.patience = state_dict['patience']
def on_validation_end(self, trainer, pl_module):
self._run_early_stopping_check(trainer, pl_module)
def on_validation_epoch_end(self, trainer, pl_module):
val_es_key = 'val_early_stop_on'
if trainer.callback_metrics.get(val_es_key) is not None:
self.monitor = val_es_key
# disable strict checking when using structured results
if val_es_key in trainer.callback_metrics:
self.strict = False
self._validate_condition_metric(trainer.callback_metrics)
def on_train_epoch_end(self, trainer, pl_module):
# disable early stopping in train loop when there's a val loop
if self.monitor == 'val_early_stop_on':
return
# early stopping can also work in the train loop when there is no val loop and when using structured results
should_check_early_stop = False
train_es_key = 'early_stop_on'
if trainer.callback_metrics.get(train_es_key, None) is not None:
self.monitor = train_es_key
should_check_early_stop = True
if should_check_early_stop:
self._run_early_stopping_check(trainer, pl_module)
def _run_early_stopping_check(self, trainer, pl_module):
logs = trainer.callback_metrics
if not self._validate_condition_metric(logs):
return # short circuit if metric not present
current = logs.get(self.monitor)
# when in dev debugging
trainer.dev_debugger.track_early_stopping_history(current)
if not isinstance(current, torch.Tensor):
current = torch.tensor(current, device=pl_module.device)
if trainer.use_tpu and XLA_AVAILABLE:
current = current.cpu()
if self.monitor_op(current - self.min_delta, self.best_score):
self.best_score = current
self.wait_count = 0
else:
self.wait_count += 1
should_stop = self.wait_count >= self.patience
if bool(should_stop):
self.stopped_epoch = trainer.current_epoch
trainer.should_stop = True
# stop every ddp process if any world process decides to stop
self._stop_distributed_training(trainer, pl_module)
def _stop_distributed_training(self, trainer, pl_module):
# in ddp make sure all processes stop when one is flagged
if trainer.use_ddp or trainer.use_ddp2:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device)
dist.all_reduce(stop, op=dist.reduce_op.SUM)
dist.barrier()
trainer.should_stop = stop == trainer.world_size
if trainer.use_tpu:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device, dtype=torch.int32)
stop = xm.mesh_reduce("stop_signal", stop, torch.cat)
torch_xla.core.xla_model.rendezvous("pl.EarlyStoppingCallback.stop_distributed_training_check")
trainer.should_stop = int(stop.item()) == trainer.world_size
def on_train_end(self, trainer, pl_module):
if self.stopped_epoch > 0 and self.verbose > 0:
rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,'
' but will start from "0" in v0.8.0.', DeprecationWarning)
log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')
|
import base64
import os
import tkinter as tk
import tkinter.messagebox as msg
import tkinter.ttk as ttk
from functools import partial
from chatwindow import ChatWindow
from requester import Requester
from avatarwindow import AvatarWindow
from addfriendwindow import AddFriendWindow
friend_avatars_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/friends"))
default_avatar_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/default.png"))
class FriendsList(tk.Tk):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.title('Tk Chat')
self.geometry('700x500')
self.menu = tk.Menu(self, bg="lightgrey", fg="black", tearoff=0)
self.friends_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0)
self.friends_menu.add_command(label="Add Friend", command=self.show_add_friend_window)
self.avatar_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0)
self.avatar_menu.add_command(label="Change Avatar", command=self.change_avatar)
self.menu.add_cascade(label="Friends", menu=self.friends_menu)
self.menu.add_cascade(label="Avatar", menu=self.avatar_menu)
self.requester = Requester()
self.show_login_screen()
def show_login_screen(self):
self.login_frame = ttk.Frame(self)
username_label = ttk.Label(self.login_frame, text="Username")
self.username_entry = ttk.Entry(self.login_frame)
self.username_entry.focus_force()
real_name_label = ttk.Label(self.login_frame, text="Real Name")
self.real_name_entry = ttk.Entry(self.login_frame)
login_button = ttk.Button(self.login_frame, text="Login", command=self.login)
create_account_button = ttk.Button(self.login_frame, text="Create Account", command=self.create_account)
username_label.grid(row=0, column=0, sticky='e')
self.username_entry.grid(row=0, column=1)
real_name_label.grid(row=1, column=0, sticky='e')
self.real_name_entry.grid(row=1, column=1)
login_button.grid(row=2, column=0, sticky='e')
create_account_button.grid(row=2, column=1)
for i in range(3):
tk.Grid.rowconfigure(self.login_frame, i, weight=1)
tk.Grid.columnconfigure(self.login_frame, i, weight=1)
self.login_frame.pack(fill=tk.BOTH, expand=1)
self.login_event = self.bind("<Return>", self.login)
def login(self, event=None):
username = self.username_entry.get()
real_name = self.real_name_entry.get()
if self.requester.login(username, real_name):
self.username = username
self.real_name = real_name
self.unbind("<Return>", self.login_event)
self.show_friends()
else:
msg.showerror("Failed", f"Could not log in as {username}")
def create_account(self):
username = self.username_entry.get()
real_name = self.real_name_entry.get()
if self.requester.create_account(username, real_name):
self.username = username
self.real_name = real_name
self.show_friends()
else:
msg.showerror("Failed", "Account already exists!")
def show_friends(self):
self.configure(menu=self.menu)
self.login_frame.pack_forget()
self.canvas = tk.Canvas(self, bg="white")
self.canvas_frame = tk.Frame(self.canvas)
self.scrollbar = ttk.Scrollbar(self, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.scrollbar.set)
self.scrollbar.pack(side=tk.LEFT, fill=tk.Y)
self.canvas.pack(side=tk.LEFT, expand=1, fill=tk.BOTH)
self.friends_area = self.canvas.create_window((0, 0), window=self.canvas_frame, anchor="nw")
self.bind_events()
self.load_friends()
def bind_events(self):
self.bind('<Configure>', self.on_frame_resized)
self.canvas.bind('<Configure>', self.friends_width)
def friends_width(self, event):
canvas_width = event.width
self.canvas.itemconfig(self.friends_area, width=canvas_width)
def on_frame_resized(self, event=None):
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def load_friends(self):
my_friends = self.requester.get_friends(self.username)
for user in my_friends["friends"]:
if user['username'] != self.username:
friend_frame = ttk.Frame(self.canvas_frame)
friend_avatar_path = os.path.join(friend_avatars_dir, f"{user["username"]}.png")
if user["avatar"]:
with open(friend_avatar_path, 'wb') as friend_avatar:
img = base64.urlsafe_b64decode(user['avatar'])
friend_avatar.write(img)
else:
friend_avatar_path = default_avatar_path
profile_photo = tk.PhotoImage(file=friend_avatar_path)
profile_photo_label = ttk.Label(friend_frame, image=profile_photo)
profile_photo_label.image = profile_photo
friend_name = ttk.Label(friend_frame, text=user['real_name'], anchor=tk.W)
message_this_friend = partial(self.open_chat_window, username=user["username"], real_name=user["real_name"], avatar=friend_avatar_path)
block_this_friend = partial(self.block_friend, username=user["username"])
message_button = ttk.Button(friend_frame, text="Chat", command=message_this_friend)
block_button = ttk.Button(friend_frame, text="Block", command=block_this_friend)
profile_photo_label.pack(side=tk.LEFT)
friend_name.pack(side=tk.LEFT)
message_button.pack(side=tk.RIGHT)
block_button.pack(side=tk.RIGHT, padx=(0, 30))
friend_frame.pack(fill=tk.X, expand=1)
def reload_friends(self):
for child in self.canvas_frame.winfo_children():
child.pack_forget()
self.load_friends()
def show_add_friend_window(self):
AddFriendWindow(self)
def add_friend(self, username):
if self.requester.add_friend(self.username, username):
msg.showinfo("Friend Added", "Friend Added")
success = True
self.reload_friends()
else:
msg.showerror("Add Failed", "Friend was not found")
success = False
return success
def open_chat_window(self, username, real_name, avatar):
cw = ChatWindow(self, real_name, username, avatar)
def block_friend(self, username):
self.requester.block_friend(self.username, username)
self.reload_friends()
def change_avatar(self):
AvatarWindow(self)
if __name__ == '__main__':
f = FriendsList()
f.mainloop()
| import base64
import os
import tkinter as tk
import tkinter.messagebox as msg
import tkinter.ttk as ttk
from functools import partial
from chatwindow import ChatWindow
from requester import Requester
from avatarwindow import AvatarWindow
from addfriendwindow import AddFriendWindow
friend_avatars_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/friends"))
default_avatar_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/default.png"))
class FriendsList(tk.Tk):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.title('Tk Chat')
self.geometry('700x500')
self.menu = tk.Menu(self, bg="lightgrey", fg="black", tearoff=0)
self.friends_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0)
self.friends_menu.add_command(label="Add Friend", command=self.show_add_friend_window)
self.avatar_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0)
self.avatar_menu.add_command(label="Change Avatar", command=self.change_avatar)
self.menu.add_cascade(label="Friends", menu=self.friends_menu)
self.menu.add_cascade(label="Avatar", menu=self.avatar_menu)
self.requester = Requester()
self.show_login_screen()
def show_login_screen(self):
self.login_frame = ttk.Frame(self)
username_label = ttk.Label(self.login_frame, text="Username")
self.username_entry = ttk.Entry(self.login_frame)
self.username_entry.focus_force()
real_name_label = ttk.Label(self.login_frame, text="Real Name")
self.real_name_entry = ttk.Entry(self.login_frame)
login_button = ttk.Button(self.login_frame, text="Login", command=self.login)
create_account_button = ttk.Button(self.login_frame, text="Create Account", command=self.create_account)
username_label.grid(row=0, column=0, sticky='e')
self.username_entry.grid(row=0, column=1)
real_name_label.grid(row=1, column=0, sticky='e')
self.real_name_entry.grid(row=1, column=1)
login_button.grid(row=2, column=0, sticky='e')
create_account_button.grid(row=2, column=1)
for i in range(3):
tk.Grid.rowconfigure(self.login_frame, i, weight=1)
tk.Grid.columnconfigure(self.login_frame, i, weight=1)
self.login_frame.pack(fill=tk.BOTH, expand=1)
self.login_event = self.bind("<Return>", self.login)
def login(self, event=None):
username = self.username_entry.get()
real_name = self.real_name_entry.get()
if self.requester.login(username, real_name):
self.username = username
self.real_name = real_name
self.unbind("<Return>", self.login_event)
self.show_friends()
else:
msg.showerror("Failed", f"Could not log in as {username}")
def create_account(self):
username = self.username_entry.get()
real_name = self.real_name_entry.get()
if self.requester.create_account(username, real_name):
self.username = username
self.real_name = real_name
self.show_friends()
else:
msg.showerror("Failed", "Account already exists!")
def show_friends(self):
self.configure(menu=self.menu)
self.login_frame.pack_forget()
self.canvas = tk.Canvas(self, bg="white")
self.canvas_frame = tk.Frame(self.canvas)
self.scrollbar = ttk.Scrollbar(self, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.scrollbar.set)
self.scrollbar.pack(side=tk.LEFT, fill=tk.Y)
self.canvas.pack(side=tk.LEFT, expand=1, fill=tk.BOTH)
self.friends_area = self.canvas.create_window((0, 0), window=self.canvas_frame, anchor="nw")
self.bind_events()
self.load_friends()
def bind_events(self):
self.bind('<Configure>', self.on_frame_resized)
self.canvas.bind('<Configure>', self.friends_width)
def friends_width(self, event):
canvas_width = event.width
self.canvas.itemconfig(self.friends_area, width=canvas_width)
def on_frame_resized(self, event=None):
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def load_friends(self):
my_friends = self.requester.get_friends(self.username)
for user in my_friends["friends"]:
if user['username'] != self.username:
friend_frame = ttk.Frame(self.canvas_frame)
friend_avatar_path = os.path.join(friend_avatars_dir, f"{user['username']}.png")
if user["avatar"]:
with open(friend_avatar_path, 'wb') as friend_avatar:
img = base64.urlsafe_b64decode(user['avatar'])
friend_avatar.write(img)
else:
friend_avatar_path = default_avatar_path
profile_photo = tk.PhotoImage(file=friend_avatar_path)
profile_photo_label = ttk.Label(friend_frame, image=profile_photo)
profile_photo_label.image = profile_photo
friend_name = ttk.Label(friend_frame, text=user['real_name'], anchor=tk.W)
message_this_friend = partial(self.open_chat_window, username=user["username"], real_name=user["real_name"], avatar=friend_avatar_path)
block_this_friend = partial(self.block_friend, username=user["username"])
message_button = ttk.Button(friend_frame, text="Chat", command=message_this_friend)
block_button = ttk.Button(friend_frame, text="Block", command=block_this_friend)
profile_photo_label.pack(side=tk.LEFT)
friend_name.pack(side=tk.LEFT)
message_button.pack(side=tk.RIGHT)
block_button.pack(side=tk.RIGHT, padx=(0, 30))
friend_frame.pack(fill=tk.X, expand=1)
def reload_friends(self):
for child in self.canvas_frame.winfo_children():
child.pack_forget()
self.load_friends()
def show_add_friend_window(self):
AddFriendWindow(self)
def add_friend(self, username):
if self.requester.add_friend(self.username, username):
msg.showinfo("Friend Added", "Friend Added")
success = True
self.reload_friends()
else:
msg.showerror("Add Failed", "Friend was not found")
success = False
return success
def open_chat_window(self, username, real_name, avatar):
cw = ChatWindow(self, real_name, username, avatar)
def block_friend(self, username):
self.requester.block_friend(self.username, username)
self.reload_friends()
def change_avatar(self):
AvatarWindow(self)
if __name__ == '__main__':
f = FriendsList()
f.mainloop()
|
#!/usr/bin/env python
import os
import logging
import requests
import json
import configparser
import sys
import time
import re
from os.path import dirname
from config import (
instanceA_url, instanceA_key, instanceA_path, instanceA_profile,
instanceA_profile_id, instanceA_profile_filter, instanceA_profile_filter_id,
instanceA_language_id, instanceA_language, instanceA_quality_match,
instanceA_tag_filter_id, instanceA_tag_filter, instanceA_blacklist,
instanceB_url, instanceB_key, instanceB_path, instanceB_profile,
instanceB_profile_id, instanceB_profile_filter, instanceB_profile_filter_id,
instanceB_language_id, instanceB_language, instanceB_quality_match,
instanceB_tag_filter_id, instanceB_tag_filter, instanceB_blacklist,
content_id_key, logger, is_sonarr, is_radarr, is_lidarr,
get_status_path, get_content_path, get_profile_path, get_language_path, get_tag_path, get_content_put_path,
is_in_docker, instance_sync_interval_seconds,
sync_bidirectionally, auto_search, skip_missing, monitor_new_content,
api_version, is_test_run, sync_monitor
)
def get_content_details(content, instance_path, instance_profile_id, instance_url, instance_language_id=None):
"""gets details of a content item"""
global monitor_new_content, auto_search
images = content.get('images')
for image in images:
image['url'] = '{0}{1}'.format(instance_url, image.get('url'))
monitored = content.get('monitored')
if monitor_new_content is not None:
monitored = True if monitor_new_content else False
payload = {
content_id_key: content.get(content_id_key),
'qualityProfileId': int(instance_profile_id or content.get('qualityProfileId')),
'monitored': monitored,
'rootFolderPath': instance_path,
'images': images,
}
add_options = content.get('addOptions', {})
search_missing = True if auto_search else False
if is_sonarr:
payload['title'] = content.get('title')
payload['titleSlug'] = content.get('titleSlug')
payload['seasons'] = content.get('seasons')
payload['year'] = content.get('year')
payload['tvRageId'] = content.get('tvRageId')
payload['seasonFolder'] = content.get('seasonFolder')
payload['languageProfileId'] = instance_language_id if instance_language_id else content.get(
'languageProfileId')
payload['tags'] = content.get('tags')
payload['seriesType'] = content.get('seriesType')
payload['useSceneNumbering'] = content.get('useSceneNumbering')
payload['addOptions'] = {
**add_options,
**{'searchForMissingEpisodes': search_missing}
}
elif is_radarr:
payload['title'] = content.get('title')
payload['year'] = content.get('year')
payload['tmdbId'] = content.get('tmdbId')
payload['titleSlug'] = content.get('titleSlug')
payload['addOptions'] = {
**add_options,
**{'searchForMovie': search_missing}
}
elif is_lidarr:
payload['artistName'] = content.get('artistName')
payload['albumFolder'] = content.get('albumFolder')
payload['metadataProfileId'] = content.get('metadataProfileId')
payload['addOptions'] = {
**add_options,
**{
"monitored": monitored,
"searchForMissingAlbums": search_missing
}
}
logger.debug(payload)
return payload
def get_quality_profiles(instance_session, instance_url, instance_key):
instance_profile_url = get_profile_path(instance_url, instance_key)
profiles_response = instance_session.get(instance_profile_url)
if profiles_response.status_code != 200:
logger.error(f'Could not get profile id from {instance_profile_url}')
exit_system()
instance_profiles = None
try:
instance_profiles = profiles_response.json()
return instance_profiles
except:
logger.error(f'Could not decode profile id from {instance_profile_url}')
exit_system()
def get_profile_from_id(instance_session, instance_url, instance_key, instance_profile, instance_name=''):
instance_profiles = get_quality_profiles(instance_session=instance_session, instance_url=instance_url, instance_key=instance_key)
profile = next((item for item in instance_profiles if item["name"].lower() == instance_profile.lower()), False)
if not profile:
logger.error('Could not find profile_id for instance {} profile {}'.format(instance_name, instance_profile))
exit_system()
instance_profile_id = profile.get('id')
logger.debug(f'found profile_id (instance{instance_name}) "{instance_profile_id}" from profile "{instance_profile}"')
return instance_profile_id
def get_tag_from_id(instance_session, instance_url, instance_key, instance_tag, instance_name=''):
instance_tag_url = get_tag_path(instance_url, instance_key)
tag_response = instance_session.get(instance_tag_url)
if tag_response.status_code != 200:
logger.error(f'Could not get tag id from (instance{instance_name}) {instance_tag_url} - only works on Sonarr')
exit_system()
instance_tags = None
try:
instance_tags = tag_response.json()
except:
logger.error(f'Could not decode tag id from {instance_tag_url}')
exit_system()
tag_ids = []
for item in instance_tags:
for instance_item in instance_tag:
if item.get('label').lower() == instance_item.lower():
tag_ids.append(item)
if not tag_ids:
logger.error(f'Could not find tag_id for instance {instance_name} and tag {instance_tags}')
exit_system()
instance_tag_ids = [tag.get('id') for tag in tag_ids]
logger.debug(f'found id "{instance_tag_ids}" from tag "{instance_tag}" for instance {instance_name}')
if instance_tag_ids is None:
logger.error(f'tag_id is None for instance {instance_name} and tag {instance_tag}')
exit_system()
return instance_tag_ids
def get_language_from_id(instance_session, instance_url, instance_key, instance_language, instance_name=''):
instance_language_url = get_language_path(instance_url, instance_key)
language_response = instance_session.get(instance_language_url)
if language_response.status_code != 200:
logger.error(f'Could not get language id from (instance{instance_name}) {instance_language_url} - only works on sonarr v3')
exit_system()
instance_languages = None
try:
instance_languages = language_response.json()
except:
logger.error(f'Could not decode language id from {instance_language_url}')
exit_system()
instance_languages = instance_languages[0]['languages']
language = next((item for item in instance_languages if item.get('language', {}).get('name').lower() == instance_language.lower()), False)
if not language:
logger.error(f'Could not find language_id for instance {instance_name} and language {instance_language}')
exit_system()
instance_language_id = language.get('language', {}).get('id')
logger.debug(f'found id "{instance_language_id}" from language "{instance_language}" for instance {instance_name}')
if instance_language_id is None:
logger.error(f'language_id is None for instance {instance_name} and language {instance_language}')
exit_system()
return instance_language_id
def sync_servers(instanceA_contents, instanceB_language_id, instanceB_contentIds,
instanceB_path, instanceB_profile_id, instanceA_profile_filter_id,
instanceB_session, instanceB_url, instanceB_key, instanceA_quality_match,
instanceA_tag_filter_id, instanceA_blacklist, instanceB_contents):
global is_radarr, is_sonarr, is_test_run, sync_monitor
search_ids = []
# if given instance A profile id then we want to filter out content without that id
if instanceA_profile_filter_id:
logging.info(f'only filtering content with instanceA_profile_filter_id {instanceA_profile_filter_id}')
# for each content id in instance A, check if it needs to be synced to instance B
for content in instanceA_contents:
content_not_synced = content[content_id_key] not in instanceB_contentIds
# only skip alrerady synced items if we arent syncing monitoring as well
if content_not_synced or sync_monitor:
title = content.get('title') or content.get('artistName')
instance_path = instanceB_path or dirname(content.get('path'))
# if skipping missing files, we want to skip any that don't have files
if is_radarr and skip_missing:
content_has_file = content.get('hasFile')
if not content_has_file:
logging.debug(f'Skipping content {title} - file missing')
continue
# if given this, we want to filter from instance by profile id
if instanceA_profile_filter_id:
quality_profile_id = content.get('qualityProfileId')
if instanceA_profile_filter_id != quality_profile_id:
logging.debug(f'Skipping content {title} - mismatched quality_profile_id {quality_profile_id} with instanceA_profile_filter_id {instanceA_profile_filter_id}')
continue
# if given quality filter we want to filter if quality from instanceA isnt high enough yet
if is_radarr and instanceA_quality_match:
content_quality = content.get('movieFile', {}).get('quality', {}).get('quality', {}).get('name', '')
if content_quality and not re.match(instanceA_quality_match, content_quality):
logging.debug(f'Skipping content {title} - mismatched content_quality {content_quality} with instanceA_quality_match {instanceA_quality_match}')
continue
# if given tag filter then filter by tag - (Sonarr/Radarr v3 only)
if (is_sonarr or is_radarr) and instanceA_tag_filter_id:
content_tag_ids = content.get('tags')
if not (set(content_tag_ids) & set(instanceA_tag_filter_id)):
logging.debug(f'Skipping content {title} - mismatched content_tag_ids {content_tag_ids} with instanceA_tag_filter_id {instanceA_tag_filter_id}')
continue
# if black list given then dont sync matching slugs/ids
if instanceA_blacklist:
title_slug = content.get('titleSlug') or content.get('foreignArtistId')
if title_slug in instanceA_blacklist:
logging.debug(f'Skipping content {title} - blacklist slug: {title_slug}')
continue
content_id = str(content.get('id'))
if content_id in instanceA_blacklist:
logging.debug(f'Skipping content {title} - blacklist ID: {content_id}')
continue
# generate content from instance A to sync into instance B
formatted_content = get_content_details(
content=dict(content),
instance_path=instance_path,
instance_profile_id=instanceB_profile_id,
instance_url=instanceB_url,
instance_language_id=instanceB_language_id,
)
instanceB_content_url = get_content_path(instanceB_url, instanceB_key)
if is_test_run:
logging.info('content title "{0}" synced successfully (test only)'.format(title))
elif content_not_synced:
# sync content if not synced
logging.info(f'syncing content title "{title}"')
sync_response = instanceB_session.post(instanceB_content_url, json=formatted_content)
# check response and save content id for searching later on if success
if sync_response.status_code != 201 and sync_response.status_code != 200:
logger.error(f'server sync error for {title} - response: {sync_response.text}')
else:
try:
search_ids.append(int(sync_response.json()['id']))
except:
logger.error(f'Could not decode sync response from {instanceB_content_url}')
logging.info('content title "{0}" synced successfully'.format(title))
elif sync_monitor:
# else if is already synced and we want to sync monitoring then sync that now
# find matching content from instance B to check monitored status
matching_content_instanceB = list(filter(lambda content_instanceB: content_instanceB['titleSlug'] == content.get('titleSlug'), instanceB_contents))
if(len(matching_content_instanceB) == 1):
matching_content_instanceB = matching_content_instanceB[0]
# if we found a content match from instance B, then check monitored status - if different then sync from A to B
if matching_content_instanceB['monitored'] != content['monitored']:
matching_content_instanceB['monitored'] = content['monitored']
instanceB_content_url = get_content_put_path(instanceB_url, instanceB_key, matching_content_instanceB.get('id'))
sync_response = instanceB_session.put(instanceB_content_url, json=matching_content_instanceB)
# check response and save content id for searching later on if success
if sync_response.status_code != 202:
logger.error(f'server monitoring sync error for {title} - response: {sync_response.text}')
else:
try:
search_ids.append(int(sync_response.json()['id']))
except:
logger.error(f'Could not decode sync response from {instanceB_content_url}')
logging.info('content title "{0}" monitoring synced successfully'.format(title))
logging.info(f'{len(search_ids)} contents synced successfully')
def get_instance_contents(instance_url, instance_key, instance_session, instance_name=''):
instance_contentIds = []
instance_content_url = get_content_path(instance_url, instance_key)
instance_contents = instance_session.get(instance_content_url)
if instance_contents.status_code != 200:
logger.error('instance{} server error - response {}'.format(instance_name, instance_contents.status_code))
exit_system()
else:
try:
instance_contents = instance_contents.json()
except:
logger.error(f'Could not decode contents from {instance_content_url}')
exit_system()
for content_to_sync in instance_contents:
instance_contentIds.append(content_to_sync[content_id_key])
logger.debug('{} contents in instance {}'.format(len(instance_contentIds), instance_name))
return instance_contents, instance_contentIds
def check_status(instance_session, instance_url, instance_key, instance_name=''):
global api_version
instance_status_url = get_status_path(instance_url, instance_key)
error_message = f'Could not connect to instance{instance_name}: {instance_status_url}'
status_response = None
try:
status_response = instance_session.get(instance_status_url)
if status_response.status_code != 200:
logger.error(error_message)
exit_system()
except:
logger.error(error_message)
exit_system()
if status_response is None:
logger.error(error_message)
exit_system()
else:
try:
status_response = status_response.json()
except Exception as error:
if not isinstance(status_response, dict):
logger.error(
f"Could not retrieve status for {instance_status_url}: {status_response} - {error}")
exit_system()
if(status_response.get('error')):
logger.error(f"{instance_status_url} error {status_response.get("error")}")
exit_system()
logger.debug(f"{instance_status_url} version {status_response.get("version")}")
return status_response
def sync_content():
global instanceA_profile_id, instanceA_profile, instanceB_profile_id, instanceB_profile, instanceA_profile_filter, instanceA_profile_filter_id, instanceB_profile_filter, instanceB_profile_filter_id, tested_api_version, instanceA_language_id, instanceA_language, instanceB_language_id, instanceB_language, instanceA_quality_match, instanceB_quality_match, is_sonarr, instanceA_tag_filter_id, instanceA_tag_filter, instanceB_tag_filter_id, instanceB_tag_filter, is_radarr, instanceA_blacklist, instanceB_blacklist
# get sessions
instanceA_session = requests.Session()
instanceA_session.trust_env = False
instanceB_session = requests.Session()
instanceB_session.trust_env = False
# if given a profile instead of a profile id then try to find the profile id
if not instanceA_profile_id and instanceA_profile:
instanceA_profile_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile, 'A')
if not instanceB_profile_id and instanceB_profile:
instanceB_profile_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile, 'B')
logger.debug({
'instanceA_profile_id': instanceA_profile_id,
'instanceA_profile': instanceA_profile,
'instanceB_profile_id': instanceB_profile_id,
'instanceB_profile': instanceB_profile,
})
# do the same for profile id filters if they exist
if not instanceA_profile_filter_id and instanceA_profile_filter:
instanceA_profile_filter_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile_filter, 'A')
if not instanceB_profile_filter_id and instanceB_profile_filter:
instanceB_profile_filter_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile_filter, 'B')
logger.debug({
'instanceAprofile_filter_id': instanceA_profile_filter_id,
'instanceAprofile_filter': instanceA_profile_filter,
'instanceBprofile_filter_id': instanceB_profile_filter_id,
'instanceBprofile_filter': instanceB_profile_filter,
})
# do the same for tag id filters if they exist - (only Sonarr)
if is_sonarr or is_radarr:
if not instanceA_tag_filter_id and instanceA_tag_filter:
instanceA_tag_filter_id = get_tag_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_tag_filter, 'A')
if not instanceB_tag_filter_id and instanceB_tag_filter:
instanceB_tag_filter_id = get_tag_from_id(instanceB_session, instanceB_url, instanceB_key, instanceA_tag_filter, 'B')
logger.debug({
'instanceA_tag_filter': instanceA_tag_filter,
'instanceA_profile_filter': instanceA_profile_filter,
'instanceB_tag_filter_id': instanceB_tag_filter_id,
'instanceB_tag_filter': instanceB_tag_filter,
})
# if given language instead of language id then try to find the lanaguage id - (only Sonarr v3)
if is_sonarr:
if not instanceA_language_id and instanceA_language:
instanceA_language_id = get_language_from_id(
instance_session=instanceA_session,
instance_url=instanceA_url,
instance_key=instanceA_key,
instance_language=instanceA_language,
instance_name='A'
)
if not instanceB_language_id and instanceB_language:
instanceB_language_id = get_language_from_id(
instance_session=instanceB_session,
instance_url=instanceB_url,
instance_key=instanceB_key,
instance_language=instanceB_language,
instance_name='B'
)
logger.debug({
'instanceA_language_id': instanceA_language_id,
'instanceA_language': instanceA_language,
'instanceB_language_id': instanceB_language_id,
'instanceB_language': instanceB_language,
'is_sonarr': is_sonarr,
'api_version': api_version,
})
# get contents to compare
instanceA_contents, instanceA_contentIds = get_instance_contents(instanceA_url, instanceA_key, instanceA_session, instance_name='A')
instanceB_contents, instanceB_contentIds = get_instance_contents(instanceB_url, instanceB_key, instanceB_session, instance_name='B')
logger.info('syncing content from instance A to instance B')
sync_servers(
instanceA_contents=instanceA_contents,
instanceB_contents=instanceB_contents,
instanceB_contentIds=instanceB_contentIds,
instanceB_language_id=instanceB_language_id,
instanceB_path=instanceB_path,
instanceB_profile_id=instanceB_profile_id,
instanceB_session=instanceB_session,
instanceB_url=instanceB_url,
instanceA_profile_filter_id=instanceA_profile_filter_id,
instanceB_key=instanceB_key,
instanceA_quality_match=instanceA_quality_match,
instanceA_tag_filter_id=instanceA_tag_filter_id,
instanceA_blacklist=instanceA_blacklist
)
# if given bidirectional flag then sync from instance B to instance A
if sync_bidirectionally:
logger.info('syncing content from instance B to instance A')
sync_servers(
instanceA_contents=instanceB_contents,
instanceB_contents=instanceA_contents,
instanceB_contentIds=instanceA_contentIds,
instanceB_language_id=instanceA_language_id,
instanceB_path=instanceA_path,
instanceB_profile_id=instanceA_profile_id,
instanceB_session=instanceA_session,
instanceB_url=instanceA_url,
instanceA_profile_filter_id=instanceB_profile_filter_id,
instanceB_key=instanceA_key,
instanceA_quality_match=instanceB_quality_match,
instanceA_tag_filter_id=instanceB_tag_filter_id,
instanceA_blacklist=instanceB_blacklist
)
########################################################################################################################
def exit_system():
"""we dont want to exit if in docker"""
if is_in_docker:
raise Exception
else:
sys.exit(0)
if is_in_docker:
logger.info('syncing every {} seconds'.format(instance_sync_interval_seconds))
sync_content()
if is_in_docker:
while True:
try:
time.sleep(instance_sync_interval_seconds)
sync_content()
except Exception as inst:
d = inst
| #!/usr/bin/env python
import os
import logging
import requests
import json
import configparser
import sys
import time
import re
from os.path import dirname
from config import (
instanceA_url, instanceA_key, instanceA_path, instanceA_profile,
instanceA_profile_id, instanceA_profile_filter, instanceA_profile_filter_id,
instanceA_language_id, instanceA_language, instanceA_quality_match,
instanceA_tag_filter_id, instanceA_tag_filter, instanceA_blacklist,
instanceB_url, instanceB_key, instanceB_path, instanceB_profile,
instanceB_profile_id, instanceB_profile_filter, instanceB_profile_filter_id,
instanceB_language_id, instanceB_language, instanceB_quality_match,
instanceB_tag_filter_id, instanceB_tag_filter, instanceB_blacklist,
content_id_key, logger, is_sonarr, is_radarr, is_lidarr,
get_status_path, get_content_path, get_profile_path, get_language_path, get_tag_path, get_content_put_path,
is_in_docker, instance_sync_interval_seconds,
sync_bidirectionally, auto_search, skip_missing, monitor_new_content,
api_version, is_test_run, sync_monitor
)
def get_content_details(content, instance_path, instance_profile_id, instance_url, instance_language_id=None):
"""gets details of a content item"""
global monitor_new_content, auto_search
images = content.get('images')
for image in images:
image['url'] = '{0}{1}'.format(instance_url, image.get('url'))
monitored = content.get('monitored')
if monitor_new_content is not None:
monitored = True if monitor_new_content else False
payload = {
content_id_key: content.get(content_id_key),
'qualityProfileId': int(instance_profile_id or content.get('qualityProfileId')),
'monitored': monitored,
'rootFolderPath': instance_path,
'images': images,
}
add_options = content.get('addOptions', {})
search_missing = True if auto_search else False
if is_sonarr:
payload['title'] = content.get('title')
payload['titleSlug'] = content.get('titleSlug')
payload['seasons'] = content.get('seasons')
payload['year'] = content.get('year')
payload['tvRageId'] = content.get('tvRageId')
payload['seasonFolder'] = content.get('seasonFolder')
payload['languageProfileId'] = instance_language_id if instance_language_id else content.get(
'languageProfileId')
payload['tags'] = content.get('tags')
payload['seriesType'] = content.get('seriesType')
payload['useSceneNumbering'] = content.get('useSceneNumbering')
payload['addOptions'] = {
**add_options,
**{'searchForMissingEpisodes': search_missing}
}
elif is_radarr:
payload['title'] = content.get('title')
payload['year'] = content.get('year')
payload['tmdbId'] = content.get('tmdbId')
payload['titleSlug'] = content.get('titleSlug')
payload['addOptions'] = {
**add_options,
**{'searchForMovie': search_missing}
}
elif is_lidarr:
payload['artistName'] = content.get('artistName')
payload['albumFolder'] = content.get('albumFolder')
payload['metadataProfileId'] = content.get('metadataProfileId')
payload['addOptions'] = {
**add_options,
**{
"monitored": monitored,
"searchForMissingAlbums": search_missing
}
}
logger.debug(payload)
return payload
def get_quality_profiles(instance_session, instance_url, instance_key):
instance_profile_url = get_profile_path(instance_url, instance_key)
profiles_response = instance_session.get(instance_profile_url)
if profiles_response.status_code != 200:
logger.error(f'Could not get profile id from {instance_profile_url}')
exit_system()
instance_profiles = None
try:
instance_profiles = profiles_response.json()
return instance_profiles
except:
logger.error(f'Could not decode profile id from {instance_profile_url}')
exit_system()
def get_profile_from_id(instance_session, instance_url, instance_key, instance_profile, instance_name=''):
instance_profiles = get_quality_profiles(instance_session=instance_session, instance_url=instance_url, instance_key=instance_key)
profile = next((item for item in instance_profiles if item["name"].lower() == instance_profile.lower()), False)
if not profile:
logger.error('Could not find profile_id for instance {} profile {}'.format(instance_name, instance_profile))
exit_system()
instance_profile_id = profile.get('id')
logger.debug(f'found profile_id (instance{instance_name}) "{instance_profile_id}" from profile "{instance_profile}"')
return instance_profile_id
def get_tag_from_id(instance_session, instance_url, instance_key, instance_tag, instance_name=''):
instance_tag_url = get_tag_path(instance_url, instance_key)
tag_response = instance_session.get(instance_tag_url)
if tag_response.status_code != 200:
logger.error(f'Could not get tag id from (instance{instance_name}) {instance_tag_url} - only works on Sonarr')
exit_system()
instance_tags = None
try:
instance_tags = tag_response.json()
except:
logger.error(f'Could not decode tag id from {instance_tag_url}')
exit_system()
tag_ids = []
for item in instance_tags:
for instance_item in instance_tag:
if item.get('label').lower() == instance_item.lower():
tag_ids.append(item)
if not tag_ids:
logger.error(f'Could not find tag_id for instance {instance_name} and tag {instance_tags}')
exit_system()
instance_tag_ids = [tag.get('id') for tag in tag_ids]
logger.debug(f'found id "{instance_tag_ids}" from tag "{instance_tag}" for instance {instance_name}')
if instance_tag_ids is None:
logger.error(f'tag_id is None for instance {instance_name} and tag {instance_tag}')
exit_system()
return instance_tag_ids
def get_language_from_id(instance_session, instance_url, instance_key, instance_language, instance_name=''):
instance_language_url = get_language_path(instance_url, instance_key)
language_response = instance_session.get(instance_language_url)
if language_response.status_code != 200:
logger.error(f'Could not get language id from (instance{instance_name}) {instance_language_url} - only works on sonarr v3')
exit_system()
instance_languages = None
try:
instance_languages = language_response.json()
except:
logger.error(f'Could not decode language id from {instance_language_url}')
exit_system()
instance_languages = instance_languages[0]['languages']
language = next((item for item in instance_languages if item.get('language', {}).get('name').lower() == instance_language.lower()), False)
if not language:
logger.error(f'Could not find language_id for instance {instance_name} and language {instance_language}')
exit_system()
instance_language_id = language.get('language', {}).get('id')
logger.debug(f'found id "{instance_language_id}" from language "{instance_language}" for instance {instance_name}')
if instance_language_id is None:
logger.error(f'language_id is None for instance {instance_name} and language {instance_language}')
exit_system()
return instance_language_id
def sync_servers(instanceA_contents, instanceB_language_id, instanceB_contentIds,
instanceB_path, instanceB_profile_id, instanceA_profile_filter_id,
instanceB_session, instanceB_url, instanceB_key, instanceA_quality_match,
instanceA_tag_filter_id, instanceA_blacklist, instanceB_contents):
global is_radarr, is_sonarr, is_test_run, sync_monitor
search_ids = []
# if given instance A profile id then we want to filter out content without that id
if instanceA_profile_filter_id:
logging.info(f'only filtering content with instanceA_profile_filter_id {instanceA_profile_filter_id}')
# for each content id in instance A, check if it needs to be synced to instance B
for content in instanceA_contents:
content_not_synced = content[content_id_key] not in instanceB_contentIds
# only skip alrerady synced items if we arent syncing monitoring as well
if content_not_synced or sync_monitor:
title = content.get('title') or content.get('artistName')
instance_path = instanceB_path or dirname(content.get('path'))
# if skipping missing files, we want to skip any that don't have files
if is_radarr and skip_missing:
content_has_file = content.get('hasFile')
if not content_has_file:
logging.debug(f'Skipping content {title} - file missing')
continue
# if given this, we want to filter from instance by profile id
if instanceA_profile_filter_id:
quality_profile_id = content.get('qualityProfileId')
if instanceA_profile_filter_id != quality_profile_id:
logging.debug(f'Skipping content {title} - mismatched quality_profile_id {quality_profile_id} with instanceA_profile_filter_id {instanceA_profile_filter_id}')
continue
# if given quality filter we want to filter if quality from instanceA isnt high enough yet
if is_radarr and instanceA_quality_match:
content_quality = content.get('movieFile', {}).get('quality', {}).get('quality', {}).get('name', '')
if content_quality and not re.match(instanceA_quality_match, content_quality):
logging.debug(f'Skipping content {title} - mismatched content_quality {content_quality} with instanceA_quality_match {instanceA_quality_match}')
continue
# if given tag filter then filter by tag - (Sonarr/Radarr v3 only)
if (is_sonarr or is_radarr) and instanceA_tag_filter_id:
content_tag_ids = content.get('tags')
if not (set(content_tag_ids) & set(instanceA_tag_filter_id)):
logging.debug(f'Skipping content {title} - mismatched content_tag_ids {content_tag_ids} with instanceA_tag_filter_id {instanceA_tag_filter_id}')
continue
# if black list given then dont sync matching slugs/ids
if instanceA_blacklist:
title_slug = content.get('titleSlug') or content.get('foreignArtistId')
if title_slug in instanceA_blacklist:
logging.debug(f'Skipping content {title} - blacklist slug: {title_slug}')
continue
content_id = str(content.get('id'))
if content_id in instanceA_blacklist:
logging.debug(f'Skipping content {title} - blacklist ID: {content_id}')
continue
# generate content from instance A to sync into instance B
formatted_content = get_content_details(
content=dict(content),
instance_path=instance_path,
instance_profile_id=instanceB_profile_id,
instance_url=instanceB_url,
instance_language_id=instanceB_language_id,
)
instanceB_content_url = get_content_path(instanceB_url, instanceB_key)
if is_test_run:
logging.info('content title "{0}" synced successfully (test only)'.format(title))
elif content_not_synced:
# sync content if not synced
logging.info(f'syncing content title "{title}"')
sync_response = instanceB_session.post(instanceB_content_url, json=formatted_content)
# check response and save content id for searching later on if success
if sync_response.status_code != 201 and sync_response.status_code != 200:
logger.error(f'server sync error for {title} - response: {sync_response.text}')
else:
try:
search_ids.append(int(sync_response.json()['id']))
except:
logger.error(f'Could not decode sync response from {instanceB_content_url}')
logging.info('content title "{0}" synced successfully'.format(title))
elif sync_monitor:
# else if is already synced and we want to sync monitoring then sync that now
# find matching content from instance B to check monitored status
matching_content_instanceB = list(filter(lambda content_instanceB: content_instanceB['titleSlug'] == content.get('titleSlug'), instanceB_contents))
if(len(matching_content_instanceB) == 1):
matching_content_instanceB = matching_content_instanceB[0]
# if we found a content match from instance B, then check monitored status - if different then sync from A to B
if matching_content_instanceB['monitored'] != content['monitored']:
matching_content_instanceB['monitored'] = content['monitored']
instanceB_content_url = get_content_put_path(instanceB_url, instanceB_key, matching_content_instanceB.get('id'))
sync_response = instanceB_session.put(instanceB_content_url, json=matching_content_instanceB)
# check response and save content id for searching later on if success
if sync_response.status_code != 202:
logger.error(f'server monitoring sync error for {title} - response: {sync_response.text}')
else:
try:
search_ids.append(int(sync_response.json()['id']))
except:
logger.error(f'Could not decode sync response from {instanceB_content_url}')
logging.info('content title "{0}" monitoring synced successfully'.format(title))
logging.info(f'{len(search_ids)} contents synced successfully')
def get_instance_contents(instance_url, instance_key, instance_session, instance_name=''):
instance_contentIds = []
instance_content_url = get_content_path(instance_url, instance_key)
instance_contents = instance_session.get(instance_content_url)
if instance_contents.status_code != 200:
logger.error('instance{} server error - response {}'.format(instance_name, instance_contents.status_code))
exit_system()
else:
try:
instance_contents = instance_contents.json()
except:
logger.error(f'Could not decode contents from {instance_content_url}')
exit_system()
for content_to_sync in instance_contents:
instance_contentIds.append(content_to_sync[content_id_key])
logger.debug('{} contents in instance {}'.format(len(instance_contentIds), instance_name))
return instance_contents, instance_contentIds
def check_status(instance_session, instance_url, instance_key, instance_name=''):
global api_version
instance_status_url = get_status_path(instance_url, instance_key)
error_message = f'Could not connect to instance{instance_name}: {instance_status_url}'
status_response = None
try:
status_response = instance_session.get(instance_status_url)
if status_response.status_code != 200:
logger.error(error_message)
exit_system()
except:
logger.error(error_message)
exit_system()
if status_response is None:
logger.error(error_message)
exit_system()
else:
try:
status_response = status_response.json()
except Exception as error:
if not isinstance(status_response, dict):
logger.error(
f"Could not retrieve status for {instance_status_url}: {status_response} - {error}")
exit_system()
if(status_response.get('error')):
logger.error(f"{instance_status_url} error {status_response.get('error')}")
exit_system()
logger.debug(f"{instance_status_url} version {status_response.get('version')}")
return status_response
def sync_content():
global instanceA_profile_id, instanceA_profile, instanceB_profile_id, instanceB_profile, instanceA_profile_filter, instanceA_profile_filter_id, instanceB_profile_filter, instanceB_profile_filter_id, tested_api_version, instanceA_language_id, instanceA_language, instanceB_language_id, instanceB_language, instanceA_quality_match, instanceB_quality_match, is_sonarr, instanceA_tag_filter_id, instanceA_tag_filter, instanceB_tag_filter_id, instanceB_tag_filter, is_radarr, instanceA_blacklist, instanceB_blacklist
# get sessions
instanceA_session = requests.Session()
instanceA_session.trust_env = False
instanceB_session = requests.Session()
instanceB_session.trust_env = False
# if given a profile instead of a profile id then try to find the profile id
if not instanceA_profile_id and instanceA_profile:
instanceA_profile_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile, 'A')
if not instanceB_profile_id and instanceB_profile:
instanceB_profile_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile, 'B')
logger.debug({
'instanceA_profile_id': instanceA_profile_id,
'instanceA_profile': instanceA_profile,
'instanceB_profile_id': instanceB_profile_id,
'instanceB_profile': instanceB_profile,
})
# do the same for profile id filters if they exist
if not instanceA_profile_filter_id and instanceA_profile_filter:
instanceA_profile_filter_id = get_profile_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_profile_filter, 'A')
if not instanceB_profile_filter_id and instanceB_profile_filter:
instanceB_profile_filter_id = get_profile_from_id(instanceB_session, instanceB_url, instanceB_key, instanceB_profile_filter, 'B')
logger.debug({
'instanceAprofile_filter_id': instanceA_profile_filter_id,
'instanceAprofile_filter': instanceA_profile_filter,
'instanceBprofile_filter_id': instanceB_profile_filter_id,
'instanceBprofile_filter': instanceB_profile_filter,
})
# do the same for tag id filters if they exist - (only Sonarr)
if is_sonarr or is_radarr:
if not instanceA_tag_filter_id and instanceA_tag_filter:
instanceA_tag_filter_id = get_tag_from_id(instanceA_session, instanceA_url, instanceA_key, instanceA_tag_filter, 'A')
if not instanceB_tag_filter_id and instanceB_tag_filter:
instanceB_tag_filter_id = get_tag_from_id(instanceB_session, instanceB_url, instanceB_key, instanceA_tag_filter, 'B')
logger.debug({
'instanceA_tag_filter': instanceA_tag_filter,
'instanceA_profile_filter': instanceA_profile_filter,
'instanceB_tag_filter_id': instanceB_tag_filter_id,
'instanceB_tag_filter': instanceB_tag_filter,
})
# if given language instead of language id then try to find the lanaguage id - (only Sonarr v3)
if is_sonarr:
if not instanceA_language_id and instanceA_language:
instanceA_language_id = get_language_from_id(
instance_session=instanceA_session,
instance_url=instanceA_url,
instance_key=instanceA_key,
instance_language=instanceA_language,
instance_name='A'
)
if not instanceB_language_id and instanceB_language:
instanceB_language_id = get_language_from_id(
instance_session=instanceB_session,
instance_url=instanceB_url,
instance_key=instanceB_key,
instance_language=instanceB_language,
instance_name='B'
)
logger.debug({
'instanceA_language_id': instanceA_language_id,
'instanceA_language': instanceA_language,
'instanceB_language_id': instanceB_language_id,
'instanceB_language': instanceB_language,
'is_sonarr': is_sonarr,
'api_version': api_version,
})
# get contents to compare
instanceA_contents, instanceA_contentIds = get_instance_contents(instanceA_url, instanceA_key, instanceA_session, instance_name='A')
instanceB_contents, instanceB_contentIds = get_instance_contents(instanceB_url, instanceB_key, instanceB_session, instance_name='B')
logger.info('syncing content from instance A to instance B')
sync_servers(
instanceA_contents=instanceA_contents,
instanceB_contents=instanceB_contents,
instanceB_contentIds=instanceB_contentIds,
instanceB_language_id=instanceB_language_id,
instanceB_path=instanceB_path,
instanceB_profile_id=instanceB_profile_id,
instanceB_session=instanceB_session,
instanceB_url=instanceB_url,
instanceA_profile_filter_id=instanceA_profile_filter_id,
instanceB_key=instanceB_key,
instanceA_quality_match=instanceA_quality_match,
instanceA_tag_filter_id=instanceA_tag_filter_id,
instanceA_blacklist=instanceA_blacklist
)
# if given bidirectional flag then sync from instance B to instance A
if sync_bidirectionally:
logger.info('syncing content from instance B to instance A')
sync_servers(
instanceA_contents=instanceB_contents,
instanceB_contents=instanceA_contents,
instanceB_contentIds=instanceA_contentIds,
instanceB_language_id=instanceA_language_id,
instanceB_path=instanceA_path,
instanceB_profile_id=instanceA_profile_id,
instanceB_session=instanceA_session,
instanceB_url=instanceA_url,
instanceA_profile_filter_id=instanceB_profile_filter_id,
instanceB_key=instanceA_key,
instanceA_quality_match=instanceB_quality_match,
instanceA_tag_filter_id=instanceB_tag_filter_id,
instanceA_blacklist=instanceB_blacklist
)
########################################################################################################################
def exit_system():
"""we dont want to exit if in docker"""
if is_in_docker:
raise Exception
else:
sys.exit(0)
if is_in_docker:
logger.info('syncing every {} seconds'.format(instance_sync_interval_seconds))
sync_content()
if is_in_docker:
while True:
try:
time.sleep(instance_sync_interval_seconds)
sync_content()
except Exception as inst:
d = inst
|
# ble_command_load_group.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:57 PM
import sys
import asyncio
import logging
import argparse
from typing import Optional
from binascii import hexlify
from bleak import BleakClient
from tutorial_modules import GOPRO_BASE_UUID, connect_ble
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
async def main(identifier: Optional[str]) -> None:
# Synchronization event to wait until notification response is received
event = asyncio.Event()
# UUIDs to write to and receive responses from
COMMAND_REQ_UUID = GOPRO_BASE_UUID.format("0072")
COMMAND_RSP_UUID = GOPRO_BASE_UUID.format("0073")
response_uuid = COMMAND_RSP_UUID
client: BleakClient
def notification_handler(handle: int, data: bytes) -> None:
logger.info(f'Received response at {handle=}: {hexlify(data, ':')!r}')
# If this is the correct handle and the status is success, the command was a success
if client.services.characteristics[handle].uuid == response_uuid and data[2] == 0x00:
logger.info("Command sent successfully")
# Anything else is unexpected. This shouldn't happen
else:
logger.error("Unexpected response")
# Notify the writer
event.set()
client = await connect_ble(notification_handler, identifier)
# Write to command request BleUUID to load the video preset group
logger.info("Loading the video preset group...")
event.clear()
await client.write_gatt_char(COMMAND_REQ_UUID, bytearray([0x04, 0x3E, 0x02, 0x03, 0xE8]))
await event.wait() # Wait to receive the notification response
await client.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Connect to a GoPro camera, then change the Preset Group to Video."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. If not used, first discovered GoPro will be connected to",
default=None,
)
args = parser.parse_args()
try:
asyncio.run(main(args.identifier))
except:
sys.exit(-1)
else:
sys.exit(0)
| # ble_command_load_group.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:57 PM
import sys
import asyncio
import logging
import argparse
from typing import Optional
from binascii import hexlify
from bleak import BleakClient
from tutorial_modules import GOPRO_BASE_UUID, connect_ble
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
async def main(identifier: Optional[str]) -> None:
# Synchronization event to wait until notification response is received
event = asyncio.Event()
# UUIDs to write to and receive responses from
COMMAND_REQ_UUID = GOPRO_BASE_UUID.format("0072")
COMMAND_RSP_UUID = GOPRO_BASE_UUID.format("0073")
response_uuid = COMMAND_RSP_UUID
client: BleakClient
def notification_handler(handle: int, data: bytes) -> None:
logger.info(f'Received response at {handle=}: {hexlify(data, ":")!r}')
# If this is the correct handle and the status is success, the command was a success
if client.services.characteristics[handle].uuid == response_uuid and data[2] == 0x00:
logger.info("Command sent successfully")
# Anything else is unexpected. This shouldn't happen
else:
logger.error("Unexpected response")
# Notify the writer
event.set()
client = await connect_ble(notification_handler, identifier)
# Write to command request BleUUID to load the video preset group
logger.info("Loading the video preset group...")
event.clear()
await client.write_gatt_char(COMMAND_REQ_UUID, bytearray([0x04, 0x3E, 0x02, 0x03, 0xE8]))
await event.wait() # Wait to receive the notification response
await client.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Connect to a GoPro camera, then change the Preset Group to Video."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. If not used, first discovered GoPro will be connected to",
default=None,
)
args = parser.parse_args()
try:
asyncio.run(main(args.identifier))
except:
sys.exit(-1)
else:
sys.exit(0)
|
import cProfile
import json
import logging
import os
import pstats
import signal
import tempfile
import time
import traceback
from django.conf import settings
from django.utils.timezone import now as tz_now
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
import psutil
import redis
from awx.main.consumers import emit_channel_notification
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent, UnifiedJob,
Job)
from awx.main.tasks import handle_success_and_failure_notifications
from awx.main.models.events import emit_event_detail
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
'''
A worker implementation that deserializes callback event data and persists
it into the database.
The code that *generates* these types of messages is found in the
ansible-runner display callback plugin.
'''
MAX_RETRIES = 2
last_stats = time.time()
total = 0
last_event = ''
prof = None
def __init__(self):
self.buff = {}
self.pid = os.getpid()
self.redis = redis.Redis.from_url(settings.BROKER_URL)
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
self.redis.delete(key)
def read(self, queue):
try:
res = self.redis.blpop(settings.CALLBACK_QUEUE, timeout=settings.JOB_EVENT_BUFFER_SECONDS)
if res is None:
return {'event': 'FLUSH'}
self.total += 1
return json.loads(res[1])
except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis")
time.sleep(1)
except (json.JSONDecodeError, KeyError):
logger.exception("failed to decode JSON message from redis")
finally:
self.record_statistics()
return {'event': 'FLUSH'}
def record_statistics(self):
# buffer stat recording to once per (by default) 5s
if time.time() - self.last_stats > settings.JOB_EVENT_STATISTICS_INTERVAL:
try:
self.redis.set(f'awx_callback_receiver_statistics_{self.pid}', self.debug())
self.last_stats = time.time()
except Exception:
logger.exception("encountered an error communicating with redis")
self.last_stats = time.time()
def debug(self):
return f'. worker[pid:{self.pid}] sent={self.total} rss={self.mb}MB {self.last_event}'
@property
def mb(self):
return '{:0.3f}'.format(
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
)
def toggle_profiling(self, *args):
if self.prof:
self.prof.disable()
filename = f'callback-{self.pid}.pstats'
filepath = os.path.join(tempfile.gettempdir(), filename)
with open(filepath, 'w') as f:
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
self.prof = False
logger.error(f'profiling is disabled, wrote {filepath}')
else:
self.prof = cProfile.Profile()
self.prof.enable()
logger.error('profiling is enabled')
def work_loop(self, *args, **kw):
if settings.AWX_CALLBACK_PROFILE:
signal.signal(signal.SIGUSR1, self.toggle_profiling)
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
def flush(self, force=False):
now = tz_now()
if (
force or
any([len(events) >= 1000 for events in self.buff.values()])
):
for cls, events in self.buff.items():
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
for e in events:
if not e.created:
e.created = now
e.modified = now
try:
cls.objects.bulk_create(events)
except Exception:
# if an exception occurs, we should re-attempt to save the
# events one-by-one, because something in the list is
# broken/stale
for e in events:
try:
e.save()
except Exception:
logger.exception('Database Error Saving Job Event')
for e in events:
emit_event_detail(e)
self.buff = {}
def perform_work(self, body):
try:
flush = body.get('event') == 'FLUSH'
if flush:
self.last_event = ''
if not flush:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
job_identifier = 'unknown job'
for key, cls in event_map.items():
if key in body:
job_identifier = body[key]
break
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get('event', '')} {body.get('uuid', '')})' # noqa
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if isinstance(uj, Job):
# *actual playbooks* send their success/failure
# notifications in response to the playbook_on_stats
# event handling code in main.models.events
pass
elif hasattr(uj, 'send_notification_templates'):
handle_success_and_failure_notifications.apply_async([uj.id])
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
return
event = cls.create_from_data(**body)
self.buff.setdefault(cls, []).append(event)
retries = 0
while retries <= self.MAX_RETRIES:
try:
self.flush(force=flush)
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event')
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
| import cProfile
import json
import logging
import os
import pstats
import signal
import tempfile
import time
import traceback
from django.conf import settings
from django.utils.timezone import now as tz_now
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
import psutil
import redis
from awx.main.consumers import emit_channel_notification
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent, UnifiedJob,
Job)
from awx.main.tasks import handle_success_and_failure_notifications
from awx.main.models.events import emit_event_detail
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
'''
A worker implementation that deserializes callback event data and persists
it into the database.
The code that *generates* these types of messages is found in the
ansible-runner display callback plugin.
'''
MAX_RETRIES = 2
last_stats = time.time()
total = 0
last_event = ''
prof = None
def __init__(self):
self.buff = {}
self.pid = os.getpid()
self.redis = redis.Redis.from_url(settings.BROKER_URL)
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
self.redis.delete(key)
def read(self, queue):
try:
res = self.redis.blpop(settings.CALLBACK_QUEUE, timeout=settings.JOB_EVENT_BUFFER_SECONDS)
if res is None:
return {'event': 'FLUSH'}
self.total += 1
return json.loads(res[1])
except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis")
time.sleep(1)
except (json.JSONDecodeError, KeyError):
logger.exception("failed to decode JSON message from redis")
finally:
self.record_statistics()
return {'event': 'FLUSH'}
def record_statistics(self):
# buffer stat recording to once per (by default) 5s
if time.time() - self.last_stats > settings.JOB_EVENT_STATISTICS_INTERVAL:
try:
self.redis.set(f'awx_callback_receiver_statistics_{self.pid}', self.debug())
self.last_stats = time.time()
except Exception:
logger.exception("encountered an error communicating with redis")
self.last_stats = time.time()
def debug(self):
return f'. worker[pid:{self.pid}] sent={self.total} rss={self.mb}MB {self.last_event}'
@property
def mb(self):
return '{:0.3f}'.format(
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
)
def toggle_profiling(self, *args):
if self.prof:
self.prof.disable()
filename = f'callback-{self.pid}.pstats'
filepath = os.path.join(tempfile.gettempdir(), filename)
with open(filepath, 'w') as f:
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
self.prof = False
logger.error(f'profiling is disabled, wrote {filepath}')
else:
self.prof = cProfile.Profile()
self.prof.enable()
logger.error('profiling is enabled')
def work_loop(self, *args, **kw):
if settings.AWX_CALLBACK_PROFILE:
signal.signal(signal.SIGUSR1, self.toggle_profiling)
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
def flush(self, force=False):
now = tz_now()
if (
force or
any([len(events) >= 1000 for events in self.buff.values()])
):
for cls, events in self.buff.items():
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
for e in events:
if not e.created:
e.created = now
e.modified = now
try:
cls.objects.bulk_create(events)
except Exception:
# if an exception occurs, we should re-attempt to save the
# events one-by-one, because something in the list is
# broken/stale
for e in events:
try:
e.save()
except Exception:
logger.exception('Database Error Saving Job Event')
for e in events:
emit_event_detail(e)
self.buff = {}
def perform_work(self, body):
try:
flush = body.get('event') == 'FLUSH'
if flush:
self.last_event = ''
if not flush:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
job_identifier = 'unknown job'
for key, cls in event_map.items():
if key in body:
job_identifier = body[key]
break
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})' # noqa
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if isinstance(uj, Job):
# *actual playbooks* send their success/failure
# notifications in response to the playbook_on_stats
# event handling code in main.models.events
pass
elif hasattr(uj, 'send_notification_templates'):
handle_success_and_failure_notifications.apply_async([uj.id])
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
return
event = cls.create_from_data(**body)
self.buff.setdefault(cls, []).append(event)
retries = 0
while retries <= self.MAX_RETRIES:
try:
self.flush(force=flush)
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event')
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
|
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API '''
''' nonstoptimm@gmail.com '''
# Import required packages
import os
import glob
import json
import logging
import codecs
import helper as he
import azure.cognitiveservices.speech as speechsdk
import params as pa
# Load and set configuration parameters
pa.get_config()
def request_endpoint(audio, speech_config, output_directory, lexical):
"""Request the speech service endpoint
Args:
audio: Input data frame
speech_config: Choice between scoring and
output_folder: LUIS app ID
case: LUIS subscription key
lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00
Returns:
df: Scoring data frame with predicted intents and scores
Raises:
ConnectionError: If file is not found
"""
audio_config = speechsdk.audio.AudioConfig(filename = audio)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config)
result = speech_recognizer.recognize_once()
filename = audio[audio.rindex('\\')+1:]
text = process_recognition(result, filename, output_directory, lexical)
return text, filename
def process_recognition(result, filename, output_directory, lexical):
"""Process recognition received from the speech service
Args:
result: Result object returned by STT-service
filename: Filename for output file
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
Returns:
text: Processed recognition as string
"""
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
if lexical:
text = f"{format(result.text)}\t{json.loads(result.json)["NBest"][0]["Lexical"]}"
else:
text = f"{format(result.text)}"
logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}")
elif result.reason == speechsdk.ResultReason.NoMatch:
logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}")
text = ""
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}")
if cancellation_details.reason == speechsdk.CancellationReason.Error:
logging.error(f"Error details: {cancellation_details.error_details}")
text = ""
return text
# General Function
def write_transcription(output_directory, text):
"""Write transcription to file
Args:
text: Processed recognition as string
output_directory: Output directory for the file
Returns:
Writes output to file
"""
if not os.path.exists(f'{output_directory}/transcriptions.txt'):
transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig')
transfile.close()
logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.')
with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile:
transfile.write(f'{text}\n')
transfile.close()
def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv):
"""Main function for STT-functionality
Args:
speech_files: Directory of audio files to be transcribed
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
enable_proxy: Boolean to enable proxy function in case you need it
*argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str
Returns:
zip(filenames, results): Zipped lists of filenames and STT-results as string
"""
try:
speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region'])
except RuntimeError:
logging.error("[ERROR] - Could not retrieve speech config")
# If necessary, you can enable a proxy here:
# set_proxy(hostname: str, port: str, username: str, password: str)
if enable_proxy:
speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3])
# Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted
speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter)
if pa.config_data['stt_endpoint'] != "":
speech_config.endpoint_id = pa.config_data['stt_endpoint']
logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files')
results = []
filenames = []
for audio in glob.iglob(f'{speech_files}*av'):
result, filename = request_endpoint(audio, speech_config, output_directory, lexical)
results.append(result)
filenames.append(filename)
# Check the result
return zip(filenames, results)
if __name__ == '__main__':
main("input/audio/", "output/test/") | ''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API '''
''' nonstoptimm@gmail.com '''
# Import required packages
import os
import glob
import json
import logging
import codecs
import helper as he
import azure.cognitiveservices.speech as speechsdk
import params as pa
# Load and set configuration parameters
pa.get_config()
def request_endpoint(audio, speech_config, output_directory, lexical):
"""Request the speech service endpoint
Args:
audio: Input data frame
speech_config: Choice between scoring and
output_folder: LUIS app ID
case: LUIS subscription key
lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00
Returns:
df: Scoring data frame with predicted intents and scores
Raises:
ConnectionError: If file is not found
"""
audio_config = speechsdk.audio.AudioConfig(filename = audio)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config)
result = speech_recognizer.recognize_once()
filename = audio[audio.rindex('\\')+1:]
text = process_recognition(result, filename, output_directory, lexical)
return text, filename
def process_recognition(result, filename, output_directory, lexical):
"""Process recognition received from the speech service
Args:
result: Result object returned by STT-service
filename: Filename for output file
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
Returns:
text: Processed recognition as string
"""
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
if lexical:
text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}"
else:
text = f"{format(result.text)}"
logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}")
elif result.reason == speechsdk.ResultReason.NoMatch:
logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}")
text = ""
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}")
if cancellation_details.reason == speechsdk.CancellationReason.Error:
logging.error(f"Error details: {cancellation_details.error_details}")
text = ""
return text
# General Function
def write_transcription(output_directory, text):
"""Write transcription to file
Args:
text: Processed recognition as string
output_directory: Output directory for the file
Returns:
Writes output to file
"""
if not os.path.exists(f'{output_directory}/transcriptions.txt'):
transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig')
transfile.close()
logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.')
with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile:
transfile.write(f'{text}\n')
transfile.close()
def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv):
"""Main function for STT-functionality
Args:
speech_files: Directory of audio files to be transcribed
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
enable_proxy: Boolean to enable proxy function in case you need it
*argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str
Returns:
zip(filenames, results): Zipped lists of filenames and STT-results as string
"""
try:
speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region'])
except RuntimeError:
logging.error("[ERROR] - Could not retrieve speech config")
# If necessary, you can enable a proxy here:
# set_proxy(hostname: str, port: str, username: str, password: str)
if enable_proxy:
speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3])
# Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted
speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter)
if pa.config_data['stt_endpoint'] != "":
speech_config.endpoint_id = pa.config_data['stt_endpoint']
logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files')
results = []
filenames = []
for audio in glob.iglob(f'{speech_files}*av'):
result, filename = request_endpoint(audio, speech_config, output_directory, lexical)
results.append(result)
filenames.append(filename)
# Check the result
return zip(filenames, results)
if __name__ == '__main__':
main("input/audio/", "output/test/") |
#!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
"""Entry point for cwltool."""
import argparse
import copy
import functools
import io
import logging
import os
import signal
import subprocess # nosec
import sys
import time
import urllib
import warnings
from codecs import StreamWriter, getwriter
from collections.abc import MutableMapping, MutableSequence
from typing import (
IO,
Any,
Callable,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sized,
TextIO,
Tuple,
Union,
cast,
)
import argcomplete
import coloredlogs
import pkg_resources # part of setuptools
import ruamel.yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.main import YAML
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri, uri_file_path
from schema_salad.sourceline import cmap, strip_dup_lineno
from schema_salad.utils import ContextType, FetcherCallableType, json_dumps, yaml_no_ts
from . import CWL_CONTENT_TYPES, workflow
from .argparser import arg_parser, generate_parser, get_default_args
from .context import LoadingContext, RuntimeContext, getdefault
from .cwlrdf import printdot, printrdf
from .errors import (
ArgumentException,
GraphTargetMissingException,
UnsupportedRequirement,
WorkflowException,
)
from .executors import JobExecutor, MultithreadedJobExecutor, SingleJobExecutor
from .load_tool import (
default_loader,
fetch_document,
jobloaderctx,
load_overrides,
make_tool,
resolve_and_validate_document,
resolve_overrides,
resolve_tool_uri,
)
from .loghandler import _logger, configure_logging, defaultStreamHandler
from .mpi import MpiConfig
from .mutation import MutationManager
from .pack import pack
from .process import (
CWL_IANA,
Process,
add_sizes,
mergedirs,
scandeps,
shortname,
use_custom_schema,
use_standard_schema,
)
from .procgenerator import ProcessGenerator
from .provenance import ResearchObject, WritableBagFile
from .resolver import ga4gh_tool_registries, tool_resolver
from .secrets import SecretStore
from .software_requirements import (
DependenciesConfiguration,
get_container_from_software_requirements,
)
from .stdfsaccess import StdFsAccess
from .subgraph import get_process, get_step, get_subgraph
from .update import ALLUPDATES, UPDATES
from .utils import (
DEFAULT_TMP_PREFIX,
CWLObjectType,
CWLOutputAtomType,
CWLOutputType,
HasReqsHints,
adjustDirObjs,
normalizeFilesDirs,
processes_to_kill,
trim_listing,
versionstring,
visit_class,
)
from .workflow import Workflow
def _terminate_processes() -> None:
"""Kill all spawned processes.
Processes to be killed must be appended to `utils.processes_to_kill`
as they are spawned.
An important caveat: since there's no supported way to kill another
thread in Python, this function cannot stop other threads from
continuing to execute while it kills the processes that they've
spawned. This may occasionally lead to unexpected behaviour.
"""
# It's possible that another thread will spawn a new task while
# we're executing, so it's not safe to use a for loop here.
while processes_to_kill:
process = processes_to_kill.popleft()
if isinstance(process.args, MutableSequence):
args = process.args
else:
args = [process.args]
cidfile = [str(arg).split("=")[1] for arg in args if "--cidfile" in str(arg)]
if cidfile: # Try to be nice
try:
with open(cidfile[0]) as inp_stream:
p = subprocess.Popen( # nosec
["docker", "kill", inp_stream.read()], shell=False # nosec
)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except FileNotFoundError:
pass
if process.stdin:
process.stdin.close()
try:
process.wait(10)
except subprocess.TimeoutExpired:
pass
process.kill() # Always kill, even if we tried with the cidfile
def _signal_handler(signum: int, _: Any) -> None:
"""Kill all spawned processes and exit.
Note that it's possible for another thread to spawn a process after
all processes have been killed, but before Python exits.
Refer to the docstring for _terminate_processes() for other caveats.
"""
_terminate_processes()
sys.exit(signum)
def generate_example_input(
inptype: Optional[CWLOutputType],
default: Optional[CWLOutputType],
) -> Tuple[Any, str]:
"""Convert a single input schema into an example."""
example = None
comment = ""
defaults = {
"null": "null",
"Any": "null",
"boolean": False,
"int": 0,
"long": 0,
"float": 0.1,
"double": 0.1,
"string": "a_string",
"File": ruamel.yaml.comments.CommentedMap(
[("class", "File"), ("path", "a/file/path")]
),
"Directory": ruamel.yaml.comments.CommentedMap(
[("class", "Directory"), ("path", "a/directory/path")]
),
} # type: CWLObjectType
if isinstance(inptype, MutableSequence):
optional = False
if "null" in inptype:
inptype.remove("null")
optional = True
if len(inptype) == 1:
example, comment = generate_example_input(inptype[0], default)
if optional:
if comment:
comment = f"{comment} (optional)"
else:
comment = "optional"
else:
example = CommentedSeq()
for index, entry in enumerate(inptype):
value, e_comment = generate_example_input(entry, default)
example.append(value)
example.yaml_add_eol_comment(e_comment, index)
if optional:
comment = "optional"
elif isinstance(inptype, Mapping) and "type" in inptype:
if inptype["type"] == "array":
first_item = cast(MutableSequence[CWLObjectType], inptype["items"])[0]
items_len = len(cast(Sized, inptype["items"]))
if items_len == 1 and "type" in first_item and first_item["type"] == "enum":
# array of just an enum then list all the options
example = first_item["symbols"]
if "name" in first_item:
comment = 'array of type "{}".'.format(first_item["name"])
else:
value, comment = generate_example_input(inptype["items"], None)
comment = "array of " + comment
if items_len == 1:
example = [value]
else:
example = value
if default is not None:
example = default
elif inptype["type"] == "enum":
symbols = cast(List[str], inptype["symbols"])
if default is not None:
example = default
elif "default" in inptype:
example = inptype["default"]
elif len(cast(Sized, inptype["symbols"])) == 1:
example = symbols[0]
else:
example = "{}_enum_value".format(inptype.get("name", "valid"))
comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif inptype["type"] == "record":
example = ruamel.yaml.comments.CommentedMap()
if "name" in inptype:
comment = '"{}" record type.'.format(inptype["name"])
else:
comment = "Anonymous record type."
for field in cast(List[CWLObjectType], inptype["fields"]):
value, f_comment = generate_example_input(field["type"], None)
example.insert(0, shortname(cast(str, field["name"])), value, f_comment)
elif "default" in inptype:
example = inptype["default"]
comment = 'default value of type "{}".'.format(inptype["type"])
else:
example = defaults.get(cast(str, inptype["type"]), str(inptype))
comment = 'type "{}".'.format(inptype["type"])
else:
if not default:
example = defaults.get(str(inptype), str(inptype))
comment = f'type "{inptype}"'
else:
example = default
comment = f'default value of type "{inptype}".'
return example, comment
def realize_input_schema(
input_types: MutableSequence[Union[str, CWLObjectType]],
schema_defs: MutableMapping[str, CWLObjectType],
) -> MutableSequence[Union[str, CWLObjectType]]:
"""Replace references to named typed with the actual types."""
for index, entry in enumerate(input_types):
if isinstance(entry, str):
if "#" in entry:
_, input_type_name = entry.split("#")
else:
input_type_name = entry
if input_type_name in schema_defs:
entry = input_types[index] = schema_defs[input_type_name]
if isinstance(entry, MutableMapping):
if isinstance(entry["type"], str) and "#" in entry["type"]:
_, input_type_name = entry["type"].split("#")
if input_type_name in schema_defs:
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]],
schema_defs[input_type_name],
),
schema_defs,
),
)
if isinstance(entry["type"], MutableSequence):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], entry["type"]),
schema_defs,
),
)
if isinstance(entry["type"], Mapping):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
[cast(CWLObjectType, entry["type"])], schema_defs
),
)
if entry["type"] == "array":
items = (
entry["items"]
if not isinstance(entry["items"], str)
else [entry["items"]]
)
entry["items"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], items),
schema_defs,
),
)
if entry["type"] == "record":
entry["fields"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]], entry["fields"]
),
schema_defs,
),
)
return input_types
def generate_input_template(tool: Process) -> CWLObjectType:
"""Generate an example input object for the given CWL process."""
template = ruamel.yaml.comments.CommentedMap()
for inp in cast(
List[MutableMapping[str, str]],
realize_input_schema(tool.tool["inputs"], tool.schemaDefs),
):
name = shortname(inp["id"])
value, comment = generate_example_input(inp["type"], inp.get("default", None))
template.insert(0, name, value, comment)
return template
def load_job_order(
args: argparse.Namespace,
stdin: IO[Any],
fetcher_constructor: Optional[FetcherCallableType],
overrides_list: List[CWLObjectType],
tool_file_uri: str,
) -> Tuple[Optional[CWLObjectType], str, Loader]:
job_order_object = None
job_order_file = None
_jobloaderctx = jobloaderctx.copy()
loader = Loader(_jobloaderctx, fetcher_constructor=fetcher_constructor)
if len(args.job_order) == 1 and args.job_order[0][0] != "-":
job_order_file = args.job_order[0]
elif len(args.job_order) == 1 and args.job_order[0] == "-":
yaml = yaml_no_ts()
job_order_object = yaml.load(stdin)
job_order_object, _ = loader.resolve_all(
job_order_object, file_uri(os.getcwd()) + "/"
)
else:
job_order_file = None
if job_order_object is not None:
input_basedir = args.basedir if args.basedir else os.getcwd()
elif job_order_file is not None:
input_basedir = (
args.basedir
if args.basedir
else os.path.abspath(os.path.dirname(job_order_file))
)
job_order_object, _ = loader.resolve_ref(
job_order_file,
checklinks=False,
content_types=CWL_CONTENT_TYPES,
)
if (
job_order_object is not None
and "http://commonwl.org/cwltool#overrides" in job_order_object
):
ov_uri = file_uri(job_order_file or input_basedir)
overrides_list.extend(
resolve_overrides(job_order_object, ov_uri, tool_file_uri)
)
del job_order_object["http://commonwl.org/cwltool#overrides"]
if job_order_object is None:
input_basedir = args.basedir if args.basedir else os.getcwd()
if job_order_object is not None and not isinstance(
job_order_object, MutableMapping
):
_logger.error(
"CWL input object at %s is not formatted correctly, it should be a "
"JSON/YAML dictionay, not %s.\n"
"Raw input object:\n%s",
job_order_file or "stdin",
type(job_order_object),
job_order_object,
)
sys.exit(1)
return (job_order_object, input_basedir, loader)
def init_job_order(
job_order_object: Optional[CWLObjectType],
args: argparse.Namespace,
process: Process,
loader: Loader,
stdout: Union[TextIO, StreamWriter],
print_input_deps: bool = False,
relative_deps: str = "primary",
make_fs_access: Callable[[str], StdFsAccess] = StdFsAccess,
input_basedir: str = "",
secret_store: Optional[SecretStore] = None,
input_required: bool = True,
runtime_context: Optional[RuntimeContext] = None,
) -> CWLObjectType:
secrets_req, _ = process.get_requirement("http://commonwl.org/cwltool#Secrets")
if job_order_object is None:
namemap = {} # type: Dict[str, str]
records = [] # type: List[str]
toolparser = generate_parser(
argparse.ArgumentParser(prog=args.workflow),
process,
namemap,
records,
input_required,
loader.fetcher.urljoin,
file_uri(os.getcwd()) + "/",
)
if args.tool_help:
toolparser.print_help(cast(IO[str], stdout))
exit(0)
cmd_line = vars(toolparser.parse_args(args.job_order))
for record_name in records:
record = {}
record_items = {
k: v for k, v in cmd_line.items() if k.startswith(record_name)
}
for key, value in record_items.items():
record[key[len(record_name) + 1 :]] = value
del cmd_line[key]
cmd_line[str(record_name)] = record
if "job_order" in cmd_line and cmd_line["job_order"]:
try:
job_order_object = cast(
CWLObjectType,
loader.resolve_ref(cmd_line["job_order"])[0],
)
except Exception:
_logger.exception(
"Failed to resolv job_order: %s", cmd_line["job_order"]
)
exit(1)
else:
job_order_object = {"id": args.workflow}
del cmd_line["job_order"]
job_order_object.update({namemap[k]: v for k, v in cmd_line.items()})
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
"Parsed job order from command line: %s",
json_dumps(job_order_object, indent=4, default=str),
)
for inp in process.tool["inputs"]:
if "default" in inp and (
not job_order_object or shortname(inp["id"]) not in job_order_object
):
if not job_order_object:
job_order_object = {}
job_order_object[shortname(inp["id"])] = inp["default"]
def path_to_loc(p: CWLObjectType) -> None:
if "location" not in p and "path" in p:
p["location"] = p["path"]
del p["path"]
ns = {} # type: ContextType
ns.update(cast(ContextType, job_order_object.get("$namespaces", {})))
ns.update(cast(ContextType, process.metadata.get("$namespaces", {})))
ld = Loader(ns)
def expand_formats(p: CWLObjectType) -> None:
if "format" in p:
p["format"] = ld.expand_url(cast(str, p["format"]), "")
visit_class(job_order_object, ("File", "Directory"), path_to_loc)
visit_class(
job_order_object,
("File",),
functools.partial(add_sizes, make_fs_access(input_basedir)),
)
visit_class(job_order_object, ("File",), expand_formats)
adjustDirObjs(job_order_object, trim_listing)
normalizeFilesDirs(job_order_object)
if print_input_deps:
if not runtime_context:
raise RuntimeError("runtime_context is required for print_input_deps.")
runtime_context.toplevel = True
builder = process._init_job(job_order_object, runtime_context)
builder.loadListing = "no_listing"
builder.bind_input(
process.inputs_record_schema, job_order_object, discover_secondaryFiles=True
)
basedir: Optional[str] = None
uri = cast(str, job_order_object["id"])
if uri == args.workflow:
basedir = os.path.dirname(uri)
uri = ""
printdeps(
job_order_object,
loader,
stdout,
relative_deps,
uri,
basedir=basedir,
nestdirs=False,
)
exit(0)
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if "cwl:tool" in job_order_object:
del job_order_object["cwl:tool"]
if "id" in job_order_object:
del job_order_object["id"]
return job_order_object
def make_relative(base: str, obj: CWLObjectType) -> None:
"""Relativize the location URI of a File or Directory object."""
uri = cast(str, obj.get("location", obj.get("path")))
if ":" in uri.split("/")[0] and not uri.startswith("file://"):
pass
else:
if uri.startswith("file://"):
uri = uri_file_path(uri)
obj["location"] = os.path.relpath(uri, base)
def printdeps(
obj: CWLObjectType,
document_loader: Loader,
stdout: Union[TextIO, StreamWriter],
relative_deps: str,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> None:
"""Print a JSON representation of the dependencies of the CWL document."""
deps = find_deps(obj, document_loader, uri, basedir=basedir, nestdirs=nestdirs)
if relative_deps == "primary":
base = basedir if basedir else os.path.dirname(uri_file_path(str(uri)))
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(make_relative, base))
print(json_dumps(deps, indent=4, default=str), file=stdout)
def prov_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
) -> CWLObjectType:
deps = find_deps(obj, document_loader, uri, basedir=basedir)
def remove_non_cwl(deps: CWLObjectType) -> None:
if "secondaryFiles" in deps:
sec_files = cast(List[CWLObjectType], deps["secondaryFiles"])
for index, entry in enumerate(sec_files):
if not ("format" in entry and entry["format"] == CWL_IANA):
del sec_files[index]
else:
remove_non_cwl(entry)
remove_non_cwl(deps)
return deps
def find_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> CWLObjectType:
"""Find the dependencies of the CWL document."""
deps = {
"class": "File",
"location": uri,
"format": CWL_IANA,
} # type: CWLObjectType
def loadref(base: str, uri: str) -> Union[CommentedMap, CommentedSeq, str, None]:
return document_loader.fetch(document_loader.fetcher.urljoin(base, uri))
sfs = scandeps(
basedir if basedir else uri,
obj,
{"$import", "run"},
{"$include", "$schemas", "location"},
loadref,
nestdirs=nestdirs,
)
if sfs is not None:
deps["secondaryFiles"] = cast(
MutableSequence[CWLOutputAtomType], mergedirs(sfs)
)
return deps
def print_pack(
loadingContext: LoadingContext,
uri: str,
) -> str:
"""Return a CWL serialization of the CWL document in JSON."""
packed = pack(loadingContext, uri)
if len(cast(Sized, packed["$graph"])) > 1:
return json_dumps(packed, indent=4, default=str)
return json_dumps(
cast(MutableSequence[CWLObjectType], packed["$graph"])[0], indent=4, default=str
)
def supported_cwl_versions(enable_dev: bool) -> List[str]:
# ALLUPDATES and UPDATES are dicts
if enable_dev:
versions = list(ALLUPDATES)
else:
versions = list(UPDATES)
versions.sort()
return versions
def setup_schema(
args: argparse.Namespace, custom_schema_callback: Optional[Callable[[], None]]
) -> None:
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
with pkg_resources.resource_stream(__name__, "extensions.yml") as res:
ext10 = res.read().decode("utf-8")
with pkg_resources.resource_stream(__name__, "extensions-v1.1.yml") as res:
ext11 = res.read().decode("utf-8")
use_custom_schema("v1.0", "http://commonwl.org/cwltool", ext10)
use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev3", "http://commonwl.org/cwltool", ext11)
else:
use_standard_schema("v1.0")
use_standard_schema("v1.1")
use_standard_schema("v1.2")
use_standard_schema("v1.2.0-dev1")
use_standard_schema("v1.2.0-dev2")
use_standard_schema("v1.2.0-dev3")
class ProvLogFormatter(logging.Formatter):
"""Enforce ISO8601 with both T and Z."""
def __init__(self) -> None:
"""Use the default formatter with our custom formatstring."""
super().__init__("[%(asctime)sZ] %(message)s")
def formatTime(
self, record: logging.LogRecord, datefmt: Optional[str] = None
) -> str:
formatted_time = time.strftime(
"%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))
)
with_msecs = f"{formatted_time},{record.msecs:03f}"
return with_msecs
ProvOut = Union[io.TextIOWrapper, WritableBagFile]
def setup_provenance(
args: argparse.Namespace,
argsl: List[str],
runtimeContext: RuntimeContext,
) -> Tuple[ProvOut, "logging.StreamHandler[ProvOut]"]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
raise ArgumentException()
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess)(""),
temp_prefix_ro=args.tmpdir_prefix,
orcid=args.orcid,
full_name=args.cwl_full_name,
)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(log_file_io)
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug("[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], " ".join(argsl))
_logger.debug("[cwltool] Arguments: %s", args)
return log_file_io, prov_log_handler
def setup_loadingContext(
loadingContext: Optional[LoadingContext],
runtimeContext: RuntimeContext,
args: argparse.Namespace,
) -> LoadingContext:
"""Prepare a LoadingContext from the given arguments."""
if loadingContext is None:
loadingContext = LoadingContext(vars(args))
loadingContext.singularity = runtimeContext.singularity
loadingContext.podman = runtimeContext.podman
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(
loadingContext.fetcher_constructor,
enable_dev=args.enable_dev,
doc_cache=args.doc_cache,
)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = args.disable_js_validation or (
not args.do_validate
)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool
)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if loadingContext.do_update is None:
loadingContext.do_update = not (args.pack or args.print_subgraph)
return loadingContext
def make_template(
tool: Process,
) -> None:
"""Make a template CWL input object for the give Process."""
def my_represent_none(
self: Any, data: Any
) -> Any: # pylint: disable=unused-argument
"""Force clean representation of 'null'."""
return self.represent_scalar("tag:yaml.org,2002:null", "null")
ruamel.yaml.representer.RoundTripRepresenter.add_representer(
type(None), my_represent_none
)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.dump(
generate_input_template(tool),
sys.stdout,
)
def inherit_reqshints(tool: Process, parent: Process) -> None:
"""Copy down requirements and hints from ancestors of a given process."""
for parent_req in parent.requirements:
found = False
for tool_req in tool.requirements:
if parent_req["class"] == tool_req["class"]:
found = True
break
if not found:
tool.requirements.append(parent_req)
for parent_hint in parent.hints:
found = False
for tool_req in tool.requirements:
if parent_hint["class"] == tool_req["class"]:
found = True
break
if not found:
for tool_hint in tool.hints:
if parent_hint["class"] == tool_hint["class"]:
found = True
break
if not found:
tool.hints.append(parent_hint)
def choose_target(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the Workflow, extract the subset matches all the args.targets."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph(
[tool.tool["id"] + "/" + r for r in args.target], tool, loading_context
)
else:
extracted = get_subgraph(
[
loading_context.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target
],
tool,
loading_context,
)
else:
_logger.error("Can only use --target on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_step(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_step."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_step
else:
step_id = loading_context.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_step
)
extracted = get_step(tool, step_id, loading_context)
else:
_logger.error("Can only use --single-step on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = cast(
Union[CommentedMap, CommentedSeq, str, None], cmap(extracted)
)
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_process(
args: argparse.Namespace,
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_process."""
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_process
else:
step_id = loadingContext.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_process
)
extracted, workflow_step = get_process(
tool,
step_id,
loadingContext,
)
else:
_logger.error("Can only use --single-process on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
new_tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
inherit_reqshints(new_tool, workflow_step)
return new_tool
def check_working_directories(
runtimeContext: RuntimeContext,
) -> Optional[int]:
"""Make any needed working directories."""
for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
if (
getattr(runtimeContext, dirprefix)
and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX
):
sl = (
"/"
if getattr(runtimeContext, dirprefix).endswith("/")
or dirprefix == "cachedir"
else ""
)
setattr(
runtimeContext,
dirprefix,
os.path.abspath(getattr(runtimeContext, dirprefix)) + sl,
)
if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception:
_logger.exception("Failed to create directory.")
return 1
return None
def print_targets(
tool: Process,
stdout: Union[TextIO, StreamWriter],
loading_context: LoadingContext,
prefix: str = "",
) -> None:
"""Recursively find targets for --subgraph and friends."""
for f in ("outputs", "inputs"):
if tool.tool[f]:
_logger.info("%s %s%s targets:", prefix[:-1], f[0].upper(), f[1:-1])
print(
" "
+ "\n ".join([f"{prefix}{shortname(t["id"])}" for t in tool.tool[f]]),
file=stdout,
)
if "steps" in tool.tool:
loading_context = copy.copy(loading_context)
loading_context.requirements = tool.requirements
loading_context.hints = tool.hints
_logger.info("%s steps targets:", prefix[:-1])
for t in tool.tool["steps"]:
print(f" {prefix}{shortname(t["id"])}", file=stdout)
run: Union[str, Process, Dict[str, Any]] = t["run"]
if isinstance(run, str):
process = make_tool(run, loading_context)
elif isinstance(run, dict):
process = make_tool(cast(CommentedMap, cmap(run)), loading_context)
else:
process = run
print_targets(
process, stdout, loading_context, f"{prefix}{shortname(t["id"])}/"
)
def main(
argsl: Optional[List[str]] = None,
args: Optional[argparse.Namespace] = None,
job_order_object: Optional[CWLObjectType] = None,
stdin: IO[Any] = sys.stdin,
stdout: Optional[Union[TextIO, StreamWriter]] = None,
stderr: IO[Any] = sys.stderr,
versionfunc: Callable[[], str] = versionstring,
logger_handler: Optional[logging.Handler] = None,
custom_schema_callback: Optional[Callable[[], None]] = None,
executor: Optional[JobExecutor] = None,
loadingContext: Optional[LoadingContext] = None,
runtimeContext: Optional[RuntimeContext] = None,
input_required: bool = True,
) -> int:
if not stdout: # force UTF-8 even if the console is configured differently
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding.upper() not in (
"UTF-8",
"UTF8",
):
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler: Optional[logging.StreamHandler[ProvOut]] = None
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[str]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(addl + argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
configure_logging(
stderr_handler,
args.quiet,
runtimeContext.debug,
args.enable_color,
args.timestamps,
)
if args.version:
print(versionfunc(), file=stdout)
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)), file=stdout)
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
args.workflow = "CWLFile"
else:
_logger.error("CWL document required, no input file was provided")
parser.print_help(stderr)
return 1
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
try:
prov_log_stream, prov_log_handler = setup_provenance(
args, argsl, runtimeContext
)
except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
uri, tool_file_uri = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
try_again_msg = (
"" if args.debug else ", try again with --debug for more information"
)
try:
job_order_object, input_basedir, jobloader = load_job_order(
args,
stdin,
loadingContext.fetcher_constructor,
loadingContext.overrides_list,
tool_file_uri,
)
if args.overrides:
loadingContext.overrides_list.extend(
load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri
)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(
workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
)
return 0
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
print(print_pack(loadingContext, uri), file=stdout)
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext, uri)
)
if args.print_pre:
print(
json_dumps(
processobj,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
try:
tool = make_tool(uri, loadingContext)
except GraphTargetMissingException as main_missing_exc:
if args.validate:
logging.warn(
"File contains $graph of multiple objects and no default "
"process (#main). Validating all objects:"
)
for entry in workflowobj["$graph"]:
entry_id = entry["id"]
make_tool(entry_id, loadingContext)
print(f"{entry_id} is valid CWL.", file=stdout)
else:
raise main_missing_exc
if args.make_template:
make_template(tool)
return 0
if args.validate:
print(f"{args.workflow} is valid CWL.", file=stdout)
return 0
if args.print_rdf:
print(
printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer),
file=stdout,
)
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
print_targets(tool, stdout, loadingContext)
return 0
if args.target:
ctool = choose_target(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_step:
ctool = choose_step(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_process:
ctool = choose_process(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
print(
json_dumps(
tool.tool,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
if check_working_directories(runtimeContext) is not None:
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.log_dir = args.log_dir
runtimeContext.secret_store = getdefault(
runtimeContext.secret_store, SecretStore()
)
runtimeContext.make_fs_access = getdefault(
runtimeContext.make_fs_access, StdFsAccess
)
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
if isinstance(tool, ProcessGenerator):
tfjob_order = {} # type: CWLObjectType
if loadingContext.jobdefaults:
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
tfout, tfstatus = real_executor(
tool.embedded_tool, tfjob_order, runtimeContext
)
if not tfout or tfstatus != "success":
raise WorkflowException(
"ProcessGenerator failed to generate workflow"
)
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
try:
initialized_job_order_object = init_job_order(
job_order_object,
args,
tool,
jobloader,
stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required,
runtime_context=runtimeContext,
)
except SystemExit as err:
return err.code
del args.workflow
del args.job_order
conf_file = getattr(
args, "beta_dependency_resolvers_configuration", None
) # str
use_conda_dependencies = getattr(
args, "beta_conda_dependencies", None
) # str
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(
out,
("File",),
functools.partial(add_sizes, runtimeContext.make_fs_access("")),
)
def loc_to_path(obj: CWLObjectType) -> None:
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if cast(str, obj["location"]).startswith("file://"):
obj["path"] = uri_file_path(cast(str, obj["location"]))
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File",), MutationManager().unset_generation)
print(
json_dumps(out, indent=4, ensure_ascii=False, default=str),
file=stdout,
)
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
"Unhandled error%s:\n %s",
try_again_msg,
str(exc),
exc_info=args.debug,
)
return 1
finally:
if (
args
and runtimeContext
and runtimeContext.research_obj
and workflowobj
and loadingContext
):
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
prov_deps(workflowobj, loadingContext.loader, uri)
)
else:
_logger.warning(
"Unable to generate provenance snapshot "
" due to missing loadingContext.loader."
)
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(
"[provenance] Closing provenance log file %s", prov_log_handler
)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
if prov_log_stream:
prov_log_stream.close()
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
def find_default_container(
builder: HasReqsHints,
default_container: Optional[str] = None,
use_biocontainers: Optional[bool] = None,
) -> Optional[str]:
"""Find a container."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder
)
return default_container
def windows_check() -> None:
"""See if we are running on MS Windows and warn about the lack of support."""
if os.name == "nt":
warnings.warn(
"The CWL reference runner (cwltool) no longer supports running "
"CWL workflows natively on MS Windows as its previous MS Windows "
"support was incomplete and untested. Instead, please see "
"https://pypi.org/project/cwltool/#ms-windows-users "
"for instructions on running cwltool via "
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
"for your CWL document processing needs."
)
def run(*args: Any, **kwargs: Any) -> None:
"""Run cwltool."""
windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes()
if __name__ == "__main__":
run(sys.argv[1:])
| #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
"""Entry point for cwltool."""
import argparse
import copy
import functools
import io
import logging
import os
import signal
import subprocess # nosec
import sys
import time
import urllib
import warnings
from codecs import StreamWriter, getwriter
from collections.abc import MutableMapping, MutableSequence
from typing import (
IO,
Any,
Callable,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sized,
TextIO,
Tuple,
Union,
cast,
)
import argcomplete
import coloredlogs
import pkg_resources # part of setuptools
import ruamel.yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.main import YAML
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri, uri_file_path
from schema_salad.sourceline import cmap, strip_dup_lineno
from schema_salad.utils import ContextType, FetcherCallableType, json_dumps, yaml_no_ts
from . import CWL_CONTENT_TYPES, workflow
from .argparser import arg_parser, generate_parser, get_default_args
from .context import LoadingContext, RuntimeContext, getdefault
from .cwlrdf import printdot, printrdf
from .errors import (
ArgumentException,
GraphTargetMissingException,
UnsupportedRequirement,
WorkflowException,
)
from .executors import JobExecutor, MultithreadedJobExecutor, SingleJobExecutor
from .load_tool import (
default_loader,
fetch_document,
jobloaderctx,
load_overrides,
make_tool,
resolve_and_validate_document,
resolve_overrides,
resolve_tool_uri,
)
from .loghandler import _logger, configure_logging, defaultStreamHandler
from .mpi import MpiConfig
from .mutation import MutationManager
from .pack import pack
from .process import (
CWL_IANA,
Process,
add_sizes,
mergedirs,
scandeps,
shortname,
use_custom_schema,
use_standard_schema,
)
from .procgenerator import ProcessGenerator
from .provenance import ResearchObject, WritableBagFile
from .resolver import ga4gh_tool_registries, tool_resolver
from .secrets import SecretStore
from .software_requirements import (
DependenciesConfiguration,
get_container_from_software_requirements,
)
from .stdfsaccess import StdFsAccess
from .subgraph import get_process, get_step, get_subgraph
from .update import ALLUPDATES, UPDATES
from .utils import (
DEFAULT_TMP_PREFIX,
CWLObjectType,
CWLOutputAtomType,
CWLOutputType,
HasReqsHints,
adjustDirObjs,
normalizeFilesDirs,
processes_to_kill,
trim_listing,
versionstring,
visit_class,
)
from .workflow import Workflow
def _terminate_processes() -> None:
"""Kill all spawned processes.
Processes to be killed must be appended to `utils.processes_to_kill`
as they are spawned.
An important caveat: since there's no supported way to kill another
thread in Python, this function cannot stop other threads from
continuing to execute while it kills the processes that they've
spawned. This may occasionally lead to unexpected behaviour.
"""
# It's possible that another thread will spawn a new task while
# we're executing, so it's not safe to use a for loop here.
while processes_to_kill:
process = processes_to_kill.popleft()
if isinstance(process.args, MutableSequence):
args = process.args
else:
args = [process.args]
cidfile = [str(arg).split("=")[1] for arg in args if "--cidfile" in str(arg)]
if cidfile: # Try to be nice
try:
with open(cidfile[0]) as inp_stream:
p = subprocess.Popen( # nosec
["docker", "kill", inp_stream.read()], shell=False # nosec
)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except FileNotFoundError:
pass
if process.stdin:
process.stdin.close()
try:
process.wait(10)
except subprocess.TimeoutExpired:
pass
process.kill() # Always kill, even if we tried with the cidfile
def _signal_handler(signum: int, _: Any) -> None:
"""Kill all spawned processes and exit.
Note that it's possible for another thread to spawn a process after
all processes have been killed, but before Python exits.
Refer to the docstring for _terminate_processes() for other caveats.
"""
_terminate_processes()
sys.exit(signum)
def generate_example_input(
inptype: Optional[CWLOutputType],
default: Optional[CWLOutputType],
) -> Tuple[Any, str]:
"""Convert a single input schema into an example."""
example = None
comment = ""
defaults = {
"null": "null",
"Any": "null",
"boolean": False,
"int": 0,
"long": 0,
"float": 0.1,
"double": 0.1,
"string": "a_string",
"File": ruamel.yaml.comments.CommentedMap(
[("class", "File"), ("path", "a/file/path")]
),
"Directory": ruamel.yaml.comments.CommentedMap(
[("class", "Directory"), ("path", "a/directory/path")]
),
} # type: CWLObjectType
if isinstance(inptype, MutableSequence):
optional = False
if "null" in inptype:
inptype.remove("null")
optional = True
if len(inptype) == 1:
example, comment = generate_example_input(inptype[0], default)
if optional:
if comment:
comment = f"{comment} (optional)"
else:
comment = "optional"
else:
example = CommentedSeq()
for index, entry in enumerate(inptype):
value, e_comment = generate_example_input(entry, default)
example.append(value)
example.yaml_add_eol_comment(e_comment, index)
if optional:
comment = "optional"
elif isinstance(inptype, Mapping) and "type" in inptype:
if inptype["type"] == "array":
first_item = cast(MutableSequence[CWLObjectType], inptype["items"])[0]
items_len = len(cast(Sized, inptype["items"]))
if items_len == 1 and "type" in first_item and first_item["type"] == "enum":
# array of just an enum then list all the options
example = first_item["symbols"]
if "name" in first_item:
comment = 'array of type "{}".'.format(first_item["name"])
else:
value, comment = generate_example_input(inptype["items"], None)
comment = "array of " + comment
if items_len == 1:
example = [value]
else:
example = value
if default is not None:
example = default
elif inptype["type"] == "enum":
symbols = cast(List[str], inptype["symbols"])
if default is not None:
example = default
elif "default" in inptype:
example = inptype["default"]
elif len(cast(Sized, inptype["symbols"])) == 1:
example = symbols[0]
else:
example = "{}_enum_value".format(inptype.get("name", "valid"))
comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif inptype["type"] == "record":
example = ruamel.yaml.comments.CommentedMap()
if "name" in inptype:
comment = '"{}" record type.'.format(inptype["name"])
else:
comment = "Anonymous record type."
for field in cast(List[CWLObjectType], inptype["fields"]):
value, f_comment = generate_example_input(field["type"], None)
example.insert(0, shortname(cast(str, field["name"])), value, f_comment)
elif "default" in inptype:
example = inptype["default"]
comment = 'default value of type "{}".'.format(inptype["type"])
else:
example = defaults.get(cast(str, inptype["type"]), str(inptype))
comment = 'type "{}".'.format(inptype["type"])
else:
if not default:
example = defaults.get(str(inptype), str(inptype))
comment = f'type "{inptype}"'
else:
example = default
comment = f'default value of type "{inptype}".'
return example, comment
def realize_input_schema(
input_types: MutableSequence[Union[str, CWLObjectType]],
schema_defs: MutableMapping[str, CWLObjectType],
) -> MutableSequence[Union[str, CWLObjectType]]:
"""Replace references to named typed with the actual types."""
for index, entry in enumerate(input_types):
if isinstance(entry, str):
if "#" in entry:
_, input_type_name = entry.split("#")
else:
input_type_name = entry
if input_type_name in schema_defs:
entry = input_types[index] = schema_defs[input_type_name]
if isinstance(entry, MutableMapping):
if isinstance(entry["type"], str) and "#" in entry["type"]:
_, input_type_name = entry["type"].split("#")
if input_type_name in schema_defs:
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]],
schema_defs[input_type_name],
),
schema_defs,
),
)
if isinstance(entry["type"], MutableSequence):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], entry["type"]),
schema_defs,
),
)
if isinstance(entry["type"], Mapping):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
[cast(CWLObjectType, entry["type"])], schema_defs
),
)
if entry["type"] == "array":
items = (
entry["items"]
if not isinstance(entry["items"], str)
else [entry["items"]]
)
entry["items"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], items),
schema_defs,
),
)
if entry["type"] == "record":
entry["fields"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]], entry["fields"]
),
schema_defs,
),
)
return input_types
def generate_input_template(tool: Process) -> CWLObjectType:
"""Generate an example input object for the given CWL process."""
template = ruamel.yaml.comments.CommentedMap()
for inp in cast(
List[MutableMapping[str, str]],
realize_input_schema(tool.tool["inputs"], tool.schemaDefs),
):
name = shortname(inp["id"])
value, comment = generate_example_input(inp["type"], inp.get("default", None))
template.insert(0, name, value, comment)
return template
def load_job_order(
args: argparse.Namespace,
stdin: IO[Any],
fetcher_constructor: Optional[FetcherCallableType],
overrides_list: List[CWLObjectType],
tool_file_uri: str,
) -> Tuple[Optional[CWLObjectType], str, Loader]:
job_order_object = None
job_order_file = None
_jobloaderctx = jobloaderctx.copy()
loader = Loader(_jobloaderctx, fetcher_constructor=fetcher_constructor)
if len(args.job_order) == 1 and args.job_order[0][0] != "-":
job_order_file = args.job_order[0]
elif len(args.job_order) == 1 and args.job_order[0] == "-":
yaml = yaml_no_ts()
job_order_object = yaml.load(stdin)
job_order_object, _ = loader.resolve_all(
job_order_object, file_uri(os.getcwd()) + "/"
)
else:
job_order_file = None
if job_order_object is not None:
input_basedir = args.basedir if args.basedir else os.getcwd()
elif job_order_file is not None:
input_basedir = (
args.basedir
if args.basedir
else os.path.abspath(os.path.dirname(job_order_file))
)
job_order_object, _ = loader.resolve_ref(
job_order_file,
checklinks=False,
content_types=CWL_CONTENT_TYPES,
)
if (
job_order_object is not None
and "http://commonwl.org/cwltool#overrides" in job_order_object
):
ov_uri = file_uri(job_order_file or input_basedir)
overrides_list.extend(
resolve_overrides(job_order_object, ov_uri, tool_file_uri)
)
del job_order_object["http://commonwl.org/cwltool#overrides"]
if job_order_object is None:
input_basedir = args.basedir if args.basedir else os.getcwd()
if job_order_object is not None and not isinstance(
job_order_object, MutableMapping
):
_logger.error(
"CWL input object at %s is not formatted correctly, it should be a "
"JSON/YAML dictionay, not %s.\n"
"Raw input object:\n%s",
job_order_file or "stdin",
type(job_order_object),
job_order_object,
)
sys.exit(1)
return (job_order_object, input_basedir, loader)
def init_job_order(
job_order_object: Optional[CWLObjectType],
args: argparse.Namespace,
process: Process,
loader: Loader,
stdout: Union[TextIO, StreamWriter],
print_input_deps: bool = False,
relative_deps: str = "primary",
make_fs_access: Callable[[str], StdFsAccess] = StdFsAccess,
input_basedir: str = "",
secret_store: Optional[SecretStore] = None,
input_required: bool = True,
runtime_context: Optional[RuntimeContext] = None,
) -> CWLObjectType:
secrets_req, _ = process.get_requirement("http://commonwl.org/cwltool#Secrets")
if job_order_object is None:
namemap = {} # type: Dict[str, str]
records = [] # type: List[str]
toolparser = generate_parser(
argparse.ArgumentParser(prog=args.workflow),
process,
namemap,
records,
input_required,
loader.fetcher.urljoin,
file_uri(os.getcwd()) + "/",
)
if args.tool_help:
toolparser.print_help(cast(IO[str], stdout))
exit(0)
cmd_line = vars(toolparser.parse_args(args.job_order))
for record_name in records:
record = {}
record_items = {
k: v for k, v in cmd_line.items() if k.startswith(record_name)
}
for key, value in record_items.items():
record[key[len(record_name) + 1 :]] = value
del cmd_line[key]
cmd_line[str(record_name)] = record
if "job_order" in cmd_line and cmd_line["job_order"]:
try:
job_order_object = cast(
CWLObjectType,
loader.resolve_ref(cmd_line["job_order"])[0],
)
except Exception:
_logger.exception(
"Failed to resolv job_order: %s", cmd_line["job_order"]
)
exit(1)
else:
job_order_object = {"id": args.workflow}
del cmd_line["job_order"]
job_order_object.update({namemap[k]: v for k, v in cmd_line.items()})
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
"Parsed job order from command line: %s",
json_dumps(job_order_object, indent=4, default=str),
)
for inp in process.tool["inputs"]:
if "default" in inp and (
not job_order_object or shortname(inp["id"]) not in job_order_object
):
if not job_order_object:
job_order_object = {}
job_order_object[shortname(inp["id"])] = inp["default"]
def path_to_loc(p: CWLObjectType) -> None:
if "location" not in p and "path" in p:
p["location"] = p["path"]
del p["path"]
ns = {} # type: ContextType
ns.update(cast(ContextType, job_order_object.get("$namespaces", {})))
ns.update(cast(ContextType, process.metadata.get("$namespaces", {})))
ld = Loader(ns)
def expand_formats(p: CWLObjectType) -> None:
if "format" in p:
p["format"] = ld.expand_url(cast(str, p["format"]), "")
visit_class(job_order_object, ("File", "Directory"), path_to_loc)
visit_class(
job_order_object,
("File",),
functools.partial(add_sizes, make_fs_access(input_basedir)),
)
visit_class(job_order_object, ("File",), expand_formats)
adjustDirObjs(job_order_object, trim_listing)
normalizeFilesDirs(job_order_object)
if print_input_deps:
if not runtime_context:
raise RuntimeError("runtime_context is required for print_input_deps.")
runtime_context.toplevel = True
builder = process._init_job(job_order_object, runtime_context)
builder.loadListing = "no_listing"
builder.bind_input(
process.inputs_record_schema, job_order_object, discover_secondaryFiles=True
)
basedir: Optional[str] = None
uri = cast(str, job_order_object["id"])
if uri == args.workflow:
basedir = os.path.dirname(uri)
uri = ""
printdeps(
job_order_object,
loader,
stdout,
relative_deps,
uri,
basedir=basedir,
nestdirs=False,
)
exit(0)
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if "cwl:tool" in job_order_object:
del job_order_object["cwl:tool"]
if "id" in job_order_object:
del job_order_object["id"]
return job_order_object
def make_relative(base: str, obj: CWLObjectType) -> None:
"""Relativize the location URI of a File or Directory object."""
uri = cast(str, obj.get("location", obj.get("path")))
if ":" in uri.split("/")[0] and not uri.startswith("file://"):
pass
else:
if uri.startswith("file://"):
uri = uri_file_path(uri)
obj["location"] = os.path.relpath(uri, base)
def printdeps(
obj: CWLObjectType,
document_loader: Loader,
stdout: Union[TextIO, StreamWriter],
relative_deps: str,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> None:
"""Print a JSON representation of the dependencies of the CWL document."""
deps = find_deps(obj, document_loader, uri, basedir=basedir, nestdirs=nestdirs)
if relative_deps == "primary":
base = basedir if basedir else os.path.dirname(uri_file_path(str(uri)))
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(make_relative, base))
print(json_dumps(deps, indent=4, default=str), file=stdout)
def prov_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
) -> CWLObjectType:
deps = find_deps(obj, document_loader, uri, basedir=basedir)
def remove_non_cwl(deps: CWLObjectType) -> None:
if "secondaryFiles" in deps:
sec_files = cast(List[CWLObjectType], deps["secondaryFiles"])
for index, entry in enumerate(sec_files):
if not ("format" in entry and entry["format"] == CWL_IANA):
del sec_files[index]
else:
remove_non_cwl(entry)
remove_non_cwl(deps)
return deps
def find_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> CWLObjectType:
"""Find the dependencies of the CWL document."""
deps = {
"class": "File",
"location": uri,
"format": CWL_IANA,
} # type: CWLObjectType
def loadref(base: str, uri: str) -> Union[CommentedMap, CommentedSeq, str, None]:
return document_loader.fetch(document_loader.fetcher.urljoin(base, uri))
sfs = scandeps(
basedir if basedir else uri,
obj,
{"$import", "run"},
{"$include", "$schemas", "location"},
loadref,
nestdirs=nestdirs,
)
if sfs is not None:
deps["secondaryFiles"] = cast(
MutableSequence[CWLOutputAtomType], mergedirs(sfs)
)
return deps
def print_pack(
loadingContext: LoadingContext,
uri: str,
) -> str:
"""Return a CWL serialization of the CWL document in JSON."""
packed = pack(loadingContext, uri)
if len(cast(Sized, packed["$graph"])) > 1:
return json_dumps(packed, indent=4, default=str)
return json_dumps(
cast(MutableSequence[CWLObjectType], packed["$graph"])[0], indent=4, default=str
)
def supported_cwl_versions(enable_dev: bool) -> List[str]:
# ALLUPDATES and UPDATES are dicts
if enable_dev:
versions = list(ALLUPDATES)
else:
versions = list(UPDATES)
versions.sort()
return versions
def setup_schema(
args: argparse.Namespace, custom_schema_callback: Optional[Callable[[], None]]
) -> None:
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
with pkg_resources.resource_stream(__name__, "extensions.yml") as res:
ext10 = res.read().decode("utf-8")
with pkg_resources.resource_stream(__name__, "extensions-v1.1.yml") as res:
ext11 = res.read().decode("utf-8")
use_custom_schema("v1.0", "http://commonwl.org/cwltool", ext10)
use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev3", "http://commonwl.org/cwltool", ext11)
else:
use_standard_schema("v1.0")
use_standard_schema("v1.1")
use_standard_schema("v1.2")
use_standard_schema("v1.2.0-dev1")
use_standard_schema("v1.2.0-dev2")
use_standard_schema("v1.2.0-dev3")
class ProvLogFormatter(logging.Formatter):
"""Enforce ISO8601 with both T and Z."""
def __init__(self) -> None:
"""Use the default formatter with our custom formatstring."""
super().__init__("[%(asctime)sZ] %(message)s")
def formatTime(
self, record: logging.LogRecord, datefmt: Optional[str] = None
) -> str:
formatted_time = time.strftime(
"%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))
)
with_msecs = f"{formatted_time},{record.msecs:03f}"
return with_msecs
ProvOut = Union[io.TextIOWrapper, WritableBagFile]
def setup_provenance(
args: argparse.Namespace,
argsl: List[str],
runtimeContext: RuntimeContext,
) -> Tuple[ProvOut, "logging.StreamHandler[ProvOut]"]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
raise ArgumentException()
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess)(""),
temp_prefix_ro=args.tmpdir_prefix,
orcid=args.orcid,
full_name=args.cwl_full_name,
)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(log_file_io)
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug("[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], " ".join(argsl))
_logger.debug("[cwltool] Arguments: %s", args)
return log_file_io, prov_log_handler
def setup_loadingContext(
loadingContext: Optional[LoadingContext],
runtimeContext: RuntimeContext,
args: argparse.Namespace,
) -> LoadingContext:
"""Prepare a LoadingContext from the given arguments."""
if loadingContext is None:
loadingContext = LoadingContext(vars(args))
loadingContext.singularity = runtimeContext.singularity
loadingContext.podman = runtimeContext.podman
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(
loadingContext.fetcher_constructor,
enable_dev=args.enable_dev,
doc_cache=args.doc_cache,
)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = args.disable_js_validation or (
not args.do_validate
)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool
)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if loadingContext.do_update is None:
loadingContext.do_update = not (args.pack or args.print_subgraph)
return loadingContext
def make_template(
tool: Process,
) -> None:
"""Make a template CWL input object for the give Process."""
def my_represent_none(
self: Any, data: Any
) -> Any: # pylint: disable=unused-argument
"""Force clean representation of 'null'."""
return self.represent_scalar("tag:yaml.org,2002:null", "null")
ruamel.yaml.representer.RoundTripRepresenter.add_representer(
type(None), my_represent_none
)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.dump(
generate_input_template(tool),
sys.stdout,
)
def inherit_reqshints(tool: Process, parent: Process) -> None:
"""Copy down requirements and hints from ancestors of a given process."""
for parent_req in parent.requirements:
found = False
for tool_req in tool.requirements:
if parent_req["class"] == tool_req["class"]:
found = True
break
if not found:
tool.requirements.append(parent_req)
for parent_hint in parent.hints:
found = False
for tool_req in tool.requirements:
if parent_hint["class"] == tool_req["class"]:
found = True
break
if not found:
for tool_hint in tool.hints:
if parent_hint["class"] == tool_hint["class"]:
found = True
break
if not found:
tool.hints.append(parent_hint)
def choose_target(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the Workflow, extract the subset matches all the args.targets."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph(
[tool.tool["id"] + "/" + r for r in args.target], tool, loading_context
)
else:
extracted = get_subgraph(
[
loading_context.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target
],
tool,
loading_context,
)
else:
_logger.error("Can only use --target on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_step(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_step."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_step
else:
step_id = loading_context.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_step
)
extracted = get_step(tool, step_id, loading_context)
else:
_logger.error("Can only use --single-step on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = cast(
Union[CommentedMap, CommentedSeq, str, None], cmap(extracted)
)
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_process(
args: argparse.Namespace,
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_process."""
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_process
else:
step_id = loadingContext.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_process
)
extracted, workflow_step = get_process(
tool,
step_id,
loadingContext,
)
else:
_logger.error("Can only use --single-process on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
new_tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
inherit_reqshints(new_tool, workflow_step)
return new_tool
def check_working_directories(
runtimeContext: RuntimeContext,
) -> Optional[int]:
"""Make any needed working directories."""
for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
if (
getattr(runtimeContext, dirprefix)
and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX
):
sl = (
"/"
if getattr(runtimeContext, dirprefix).endswith("/")
or dirprefix == "cachedir"
else ""
)
setattr(
runtimeContext,
dirprefix,
os.path.abspath(getattr(runtimeContext, dirprefix)) + sl,
)
if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception:
_logger.exception("Failed to create directory.")
return 1
return None
def print_targets(
tool: Process,
stdout: Union[TextIO, StreamWriter],
loading_context: LoadingContext,
prefix: str = "",
) -> None:
"""Recursively find targets for --subgraph and friends."""
for f in ("outputs", "inputs"):
if tool.tool[f]:
_logger.info("%s %s%s targets:", prefix[:-1], f[0].upper(), f[1:-1])
print(
" "
+ "\n ".join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]]),
file=stdout,
)
if "steps" in tool.tool:
loading_context = copy.copy(loading_context)
loading_context.requirements = tool.requirements
loading_context.hints = tool.hints
_logger.info("%s steps targets:", prefix[:-1])
for t in tool.tool["steps"]:
print(f" {prefix}{shortname(t['id'])}", file=stdout)
run: Union[str, Process, Dict[str, Any]] = t["run"]
if isinstance(run, str):
process = make_tool(run, loading_context)
elif isinstance(run, dict):
process = make_tool(cast(CommentedMap, cmap(run)), loading_context)
else:
process = run
print_targets(
process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/"
)
def main(
argsl: Optional[List[str]] = None,
args: Optional[argparse.Namespace] = None,
job_order_object: Optional[CWLObjectType] = None,
stdin: IO[Any] = sys.stdin,
stdout: Optional[Union[TextIO, StreamWriter]] = None,
stderr: IO[Any] = sys.stderr,
versionfunc: Callable[[], str] = versionstring,
logger_handler: Optional[logging.Handler] = None,
custom_schema_callback: Optional[Callable[[], None]] = None,
executor: Optional[JobExecutor] = None,
loadingContext: Optional[LoadingContext] = None,
runtimeContext: Optional[RuntimeContext] = None,
input_required: bool = True,
) -> int:
if not stdout: # force UTF-8 even if the console is configured differently
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding.upper() not in (
"UTF-8",
"UTF8",
):
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler: Optional[logging.StreamHandler[ProvOut]] = None
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[str]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(addl + argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
configure_logging(
stderr_handler,
args.quiet,
runtimeContext.debug,
args.enable_color,
args.timestamps,
)
if args.version:
print(versionfunc(), file=stdout)
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)), file=stdout)
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
args.workflow = "CWLFile"
else:
_logger.error("CWL document required, no input file was provided")
parser.print_help(stderr)
return 1
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
try:
prov_log_stream, prov_log_handler = setup_provenance(
args, argsl, runtimeContext
)
except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
uri, tool_file_uri = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
try_again_msg = (
"" if args.debug else ", try again with --debug for more information"
)
try:
job_order_object, input_basedir, jobloader = load_job_order(
args,
stdin,
loadingContext.fetcher_constructor,
loadingContext.overrides_list,
tool_file_uri,
)
if args.overrides:
loadingContext.overrides_list.extend(
load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri
)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(
workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
)
return 0
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
print(print_pack(loadingContext, uri), file=stdout)
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext, uri)
)
if args.print_pre:
print(
json_dumps(
processobj,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
try:
tool = make_tool(uri, loadingContext)
except GraphTargetMissingException as main_missing_exc:
if args.validate:
logging.warn(
"File contains $graph of multiple objects and no default "
"process (#main). Validating all objects:"
)
for entry in workflowobj["$graph"]:
entry_id = entry["id"]
make_tool(entry_id, loadingContext)
print(f"{entry_id} is valid CWL.", file=stdout)
else:
raise main_missing_exc
if args.make_template:
make_template(tool)
return 0
if args.validate:
print(f"{args.workflow} is valid CWL.", file=stdout)
return 0
if args.print_rdf:
print(
printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer),
file=stdout,
)
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
print_targets(tool, stdout, loadingContext)
return 0
if args.target:
ctool = choose_target(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_step:
ctool = choose_step(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_process:
ctool = choose_process(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
print(
json_dumps(
tool.tool,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
if check_working_directories(runtimeContext) is not None:
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.log_dir = args.log_dir
runtimeContext.secret_store = getdefault(
runtimeContext.secret_store, SecretStore()
)
runtimeContext.make_fs_access = getdefault(
runtimeContext.make_fs_access, StdFsAccess
)
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
if isinstance(tool, ProcessGenerator):
tfjob_order = {} # type: CWLObjectType
if loadingContext.jobdefaults:
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
tfout, tfstatus = real_executor(
tool.embedded_tool, tfjob_order, runtimeContext
)
if not tfout or tfstatus != "success":
raise WorkflowException(
"ProcessGenerator failed to generate workflow"
)
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
try:
initialized_job_order_object = init_job_order(
job_order_object,
args,
tool,
jobloader,
stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required,
runtime_context=runtimeContext,
)
except SystemExit as err:
return err.code
del args.workflow
del args.job_order
conf_file = getattr(
args, "beta_dependency_resolvers_configuration", None
) # str
use_conda_dependencies = getattr(
args, "beta_conda_dependencies", None
) # str
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(
out,
("File",),
functools.partial(add_sizes, runtimeContext.make_fs_access("")),
)
def loc_to_path(obj: CWLObjectType) -> None:
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if cast(str, obj["location"]).startswith("file://"):
obj["path"] = uri_file_path(cast(str, obj["location"]))
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File",), MutationManager().unset_generation)
print(
json_dumps(out, indent=4, ensure_ascii=False, default=str),
file=stdout,
)
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
"Unhandled error%s:\n %s",
try_again_msg,
str(exc),
exc_info=args.debug,
)
return 1
finally:
if (
args
and runtimeContext
and runtimeContext.research_obj
and workflowobj
and loadingContext
):
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
prov_deps(workflowobj, loadingContext.loader, uri)
)
else:
_logger.warning(
"Unable to generate provenance snapshot "
" due to missing loadingContext.loader."
)
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(
"[provenance] Closing provenance log file %s", prov_log_handler
)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
if prov_log_stream:
prov_log_stream.close()
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
def find_default_container(
builder: HasReqsHints,
default_container: Optional[str] = None,
use_biocontainers: Optional[bool] = None,
) -> Optional[str]:
"""Find a container."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder
)
return default_container
def windows_check() -> None:
"""See if we are running on MS Windows and warn about the lack of support."""
if os.name == "nt":
warnings.warn(
"The CWL reference runner (cwltool) no longer supports running "
"CWL workflows natively on MS Windows as its previous MS Windows "
"support was incomplete and untested. Instead, please see "
"https://pypi.org/project/cwltool/#ms-windows-users "
"for instructions on running cwltool via "
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
"for your CWL document processing needs."
)
def run(*args: Any, **kwargs: Any) -> None:
"""Run cwltool."""
windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes()
if __name__ == "__main__":
run(sys.argv[1:])
|
import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ""}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
| import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
|
#!/usr/bin/env python3
import asyncio
import logging
from collections import defaultdict
from functools import partial
from box import Box
_l = logging.getLogger(__name__)
_instances = dict()
_events = defaultdict(asyncio.Event)
_event_queues = list()
_event_callbacks = defaultdict(list)
class Component:
"""A stateful element in a workflow that can be configured, run, and uniquely named."""
def __init__(self, *args, id=None, workflow=None, parent=None, logger=_l, **kwargs):
self.id = id
if id:
key = (type(self), id)
if key in _instances:
raise ValueError(
f'{key[0].__name__} with ID "{id}" already exists: {_instances[key]}')
_instances[key] = self
self.workflow = workflow
self.parent = parent
self.children = list()
if parent:
parent.children.append(self)
self.logger = logger
self.loop = asyncio.get_event_loop()
self._event_lock = set()
self._debug = {'events'}
self._settings = Box(self.configure(**kwargs) or dict())
if not workflow:
workflow = self
settings = [f'{k}={v}' for k, v in workflow.safe_settings(self._settings).items()]
self.debug(f'Initialized {' '.join(settings)}')
def configure(self, **settings):
return settings
def settings(self, **override):
return Box(self._settings, **override)
def safe_settings(self, settings):
return settings
@property
def type(self):
return type(self).__name__
@property
def status(self):
return getattr(self, '_status', None)
@status.setter
def status(self, status):
if not (self.hasstatus(status) or status in self._event_lock):
self._event_lock.add(status)
try:
self._status_setter(status)
finally:
self._event_lock.remove(status)
_dependent_statuses = {'processing-finished', 'finished', 'exited'}
def _status_setter(self, status):
event = status if isinstance(status, ComponentEvent) else ComponentEvent(status, self)
if event.status in self._dependent_statuses:
children = set(filter(lambda c: isinstance(c, Component), self.children))
ready = set(filter(lambda c: c.hasstatus(event.status), children))
if len(children) > len(ready):
if 'events' in self._debug:
pending = ", ".join(c.id for c in children.difference(ready))
self.debug(f'Status "{event.status}" waiting on {pending}')
return
if self.hasstatus('aborted') and event.status != 'exited':
if 'events' in self._debug:
self.debug(f'Ignoring status "{event.status}" because the component is '
'in aborted state')
return
# event.id = self._fqevent(status)
if 'events' in self._debug:
self.debug(f'Emitting event "{event.id}"')
self._status = event.status
_events[event.id].set()
for queue in _event_queues:
queue.put_nowait(event)
if self.parent and event.status != 'aborted' and not isinstance(self, LocalEvents):
self.parent.status = event.status
for callback in _event_callbacks[event.id]:
asyncio.ensure_future(callback())
_event_callbacks[event.id].clear()
def hasstatus(self, status):
"""Return `True` if given status was set."""
if isinstance(status, ComponentEvent):
event = status.id
elif ':' in status:
event = status
else:
event = ComponentEvent(status, self).id
return _events[event].is_set()
async def waiton(self, event):
if 'events' in self._debug:
self.debug(f'Waiting on event "{event}"')
await _events[event].wait()
if 'events' in self._debug:
self.debug(f'Received event "{event}"')
@property
def running(self):
"""Return `True` if in one of the running states."""
if not self.stopped:
for status in ['started', 'running']:
if self.hasstatus(status):
return True
@property
def stopped(self):
"""Return `True` if in one of the stopped states."""
for status in ['aborted', 'finished']:
if self.hasstatus(status):
return True
@property
def aborted(self):
"""Return `True` if the aborted event was emitted."""
return self.hasstatus('aborted')
def start(self):
self.status = 'started'
return self.run()
def stop(self):
self.debug('Stopping')
def abort(self, exception=None):
if self.hasstatus('aborted'):
return
self.status = ComponentEvent('aborted', self, exception)
for child in self.children:
if child.settings().get('error-propagation') in ('none', 'up'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to child {child.id}')
elif not child.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to child {child.id}')
child.abort()
if self.parent:
if self.parent.settings().get('error-propagation') in ('none', 'down'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to parent {self.parent.id}')
elif not self.parent.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to parent {self.parent.id}')
self.parent.abort(exception)
def __getattr__(self, name):
if name not in ('critical', 'error', 'warning', 'info', 'debug', 'exception'):
raise AttributeError(f"'{self.type}' object has no attribute '{name}'")
return partial(self._proxied_logging_method, name)
def _proxied_logging_method(self, method, *args, **kwargs):
if method == 'debug':
if logging in (self.workflow or self).settings():
debug = (self.workflow or self).settings().logging.debug
else:
debug = []
if not ('all' in debug or self.type in debug or (self.id in debug)):
return lambda *a, **kw: None
return getattr(self.logger, method)(*self._log_formatted(*args), **kwargs)
def _log_formatted(self, msg, *args):
"""Return the msg prefixed with this component's ID and type."""
prefix = f'{self.id} ' if self.id else ''
msg = f'{prefix}({self.type}) {msg}'
return (msg,) + args
async def run(self):
self.status = 'running'
async def try_while_running(self, callable, timeout=0.5):
"""Return result of `callable`, or raise `ComponentInterrupted` if component is stopped."""
while self.running:
coro = callable()
try:
return await asyncio.wait_for(coro, timeout)
except asyncio.TimeoutError:
pass
raise ComponentInterrupted
class ComponentEvent:
def __init__(self, status, component, exception=None):
self.status = status
self.component = component
self.exception = exception
@property
def id(self):
"""Return a fully qualified ID string representing this event."""
return f'{self.component.id}:{self.status}'
class LocalEvents:
pass
class ComponentInterrupted(Exception):
pass
def get_event_listener():
"""Return a new `Queue` object that will see all events."""
queue = asyncio.Queue()
_event_queues.append(queue)
return queue
def add_event_callback(event, callable, *args, **kwargs):
"""Register a callback that will be called upon the given event."""
_event_callbacks[event].append(partial(callable, *args, **kwargs))
| #!/usr/bin/env python3
import asyncio
import logging
from collections import defaultdict
from functools import partial
from box import Box
_l = logging.getLogger(__name__)
_instances = dict()
_events = defaultdict(asyncio.Event)
_event_queues = list()
_event_callbacks = defaultdict(list)
class Component:
"""A stateful element in a workflow that can be configured, run, and uniquely named."""
def __init__(self, *args, id=None, workflow=None, parent=None, logger=_l, **kwargs):
self.id = id
if id:
key = (type(self), id)
if key in _instances:
raise ValueError(
f'{key[0].__name__} with ID "{id}" already exists: {_instances[key]}')
_instances[key] = self
self.workflow = workflow
self.parent = parent
self.children = list()
if parent:
parent.children.append(self)
self.logger = logger
self.loop = asyncio.get_event_loop()
self._event_lock = set()
self._debug = {'events'}
self._settings = Box(self.configure(**kwargs) or dict())
if not workflow:
workflow = self
settings = [f'{k}={v}' for k, v in workflow.safe_settings(self._settings).items()]
self.debug(f'Initialized {" ".join(settings)}')
def configure(self, **settings):
return settings
def settings(self, **override):
return Box(self._settings, **override)
def safe_settings(self, settings):
return settings
@property
def type(self):
return type(self).__name__
@property
def status(self):
return getattr(self, '_status', None)
@status.setter
def status(self, status):
if not (self.hasstatus(status) or status in self._event_lock):
self._event_lock.add(status)
try:
self._status_setter(status)
finally:
self._event_lock.remove(status)
_dependent_statuses = {'processing-finished', 'finished', 'exited'}
def _status_setter(self, status):
event = status if isinstance(status, ComponentEvent) else ComponentEvent(status, self)
if event.status in self._dependent_statuses:
children = set(filter(lambda c: isinstance(c, Component), self.children))
ready = set(filter(lambda c: c.hasstatus(event.status), children))
if len(children) > len(ready):
if 'events' in self._debug:
pending = ", ".join(c.id for c in children.difference(ready))
self.debug(f'Status "{event.status}" waiting on {pending}')
return
if self.hasstatus('aborted') and event.status != 'exited':
if 'events' in self._debug:
self.debug(f'Ignoring status "{event.status}" because the component is '
'in aborted state')
return
# event.id = self._fqevent(status)
if 'events' in self._debug:
self.debug(f'Emitting event "{event.id}"')
self._status = event.status
_events[event.id].set()
for queue in _event_queues:
queue.put_nowait(event)
if self.parent and event.status != 'aborted' and not isinstance(self, LocalEvents):
self.parent.status = event.status
for callback in _event_callbacks[event.id]:
asyncio.ensure_future(callback())
_event_callbacks[event.id].clear()
def hasstatus(self, status):
"""Return `True` if given status was set."""
if isinstance(status, ComponentEvent):
event = status.id
elif ':' in status:
event = status
else:
event = ComponentEvent(status, self).id
return _events[event].is_set()
async def waiton(self, event):
if 'events' in self._debug:
self.debug(f'Waiting on event "{event}"')
await _events[event].wait()
if 'events' in self._debug:
self.debug(f'Received event "{event}"')
@property
def running(self):
"""Return `True` if in one of the running states."""
if not self.stopped:
for status in ['started', 'running']:
if self.hasstatus(status):
return True
@property
def stopped(self):
"""Return `True` if in one of the stopped states."""
for status in ['aborted', 'finished']:
if self.hasstatus(status):
return True
@property
def aborted(self):
"""Return `True` if the aborted event was emitted."""
return self.hasstatus('aborted')
def start(self):
self.status = 'started'
return self.run()
def stop(self):
self.debug('Stopping')
def abort(self, exception=None):
if self.hasstatus('aborted'):
return
self.status = ComponentEvent('aborted', self, exception)
for child in self.children:
if child.settings().get('error-propagation') in ('none', 'up'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to child {child.id}')
elif not child.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to child {child.id}')
child.abort()
if self.parent:
if self.parent.settings().get('error-propagation') in ('none', 'down'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to parent {self.parent.id}')
elif not self.parent.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to parent {self.parent.id}')
self.parent.abort(exception)
def __getattr__(self, name):
if name not in ('critical', 'error', 'warning', 'info', 'debug', 'exception'):
raise AttributeError(f"'{self.type}' object has no attribute '{name}'")
return partial(self._proxied_logging_method, name)
def _proxied_logging_method(self, method, *args, **kwargs):
if method == 'debug':
if logging in (self.workflow or self).settings():
debug = (self.workflow or self).settings().logging.debug
else:
debug = []
if not ('all' in debug or self.type in debug or (self.id in debug)):
return lambda *a, **kw: None
return getattr(self.logger, method)(*self._log_formatted(*args), **kwargs)
def _log_formatted(self, msg, *args):
"""Return the msg prefixed with this component's ID and type."""
prefix = f'{self.id} ' if self.id else ''
msg = f'{prefix}({self.type}) {msg}'
return (msg,) + args
async def run(self):
self.status = 'running'
async def try_while_running(self, callable, timeout=0.5):
"""Return result of `callable`, or raise `ComponentInterrupted` if component is stopped."""
while self.running:
coro = callable()
try:
return await asyncio.wait_for(coro, timeout)
except asyncio.TimeoutError:
pass
raise ComponentInterrupted
class ComponentEvent:
def __init__(self, status, component, exception=None):
self.status = status
self.component = component
self.exception = exception
@property
def id(self):
"""Return a fully qualified ID string representing this event."""
return f'{self.component.id}:{self.status}'
class LocalEvents:
pass
class ComponentInterrupted(Exception):
pass
def get_event_listener():
"""Return a new `Queue` object that will see all events."""
queue = asyncio.Queue()
_event_queues.append(queue)
return queue
def add_event_callback(event, callable, *args, **kwargs):
"""Register a callback that will be called upon the given event."""
_event_callbacks[event].append(partial(callable, *args, **kwargs))
|
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.system_log import CONF_LOGGER
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, Event
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.helpers.storage import Store
from .core import logger
from .core.gateway3 import Gateway3
from .core.helpers import DevicesRegistry
from .core.utils import DOMAIN, XiaomiGateway3Debug
from .core.xiaomi_cloud import MiCloud
_LOGGER = logging.getLogger(__name__)
DOMAINS = ['binary_sensor', 'climate', 'cover', 'light', 'remote', 'sensor',
'switch', 'alarm_control_panel']
CONF_DEVICES = 'devices'
CONF_ATTRIBUTES_TEMPLATE = 'attributes_template'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_DEVICES): {
cv.string: vol.Schema({
vol.Optional('occupancy_timeout'): cv.positive_int,
}, extra=vol.ALLOW_EXTRA),
},
CONF_LOGGER: logger.CONFIG_SCHEMA,
vol.Optional(CONF_ATTRIBUTES_TEMPLATE): cv.template
}, extra=vol.ALLOW_EXTRA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, hass_config: dict):
config = hass_config.get(DOMAIN) or {}
if CONF_LOGGER in config:
logger.init(__name__, config[CONF_LOGGER], hass.config.config_dir)
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
# update global debug_mode for all gateways
if 'debug_mode' in config[CONF_LOGGER]:
setattr(Gateway3, 'debug_mode', config[CONF_LOGGER]['debug_mode'])
if CONF_DEVICES in config:
for k, v in config[CONF_DEVICES].items():
# AA:BB:CC:DD:EE:FF => aabbccddeeff
k = k.replace(':', '').lower()
DevicesRegistry.defaults[k] = v
hass.data[DOMAIN] = {
CONF_ATTRIBUTES_TEMPLATE: config.get(CONF_ATTRIBUTES_TEMPLATE)
}
await _handle_device_remove(hass)
# utils.migrate_unique_id(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Support two kind of enties - MiCloud and Gateway."""
# entry for MiCloud login
if 'servers' in entry.data:
return await _setup_micloud_entry(hass, entry)
# migrate data (also after first setup) to options
if entry.data:
hass.config_entries.async_update_entry(entry, data={},
options=entry.data)
await _setup_logger(hass)
# add options handler
if not entry.update_listeners:
entry.add_update_listener(async_update_options)
hass.data[DOMAIN][entry.entry_id] = Gateway3(**entry.options)
hass.async_create_task(_setup_domains(hass, entry))
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry):
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
# check unload cloud integration
if entry.entry_id not in hass.data[DOMAIN]:
return
# remove all stats entities if disable stats
if not entry.options.get('stats'):
suffix = ('_gateway', '_zigbee', '_ble')
registry: EntityRegistry = hass.data['entity_registry']
remove = [
entity.entity_id
for entity in list(registry.entities.values())
if (entity.config_entry_id == entry.entry_id and
entity.unique_id.endswith(suffix))
]
for entity_id in remove:
registry.async_remove(entity_id)
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
await gw.stop()
await asyncio.gather(*[
hass.config_entries.async_forward_entry_unload(entry, domain)
for domain in DOMAINS
])
return True
async def _setup_domains(hass: HomeAssistant, entry: ConfigEntry):
# init setup for each supported domains
await asyncio.gather(*[
hass.config_entries.async_forward_entry_setup(entry, domain)
for domain in DOMAINS
])
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
gw.start()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gw.stop)
)
async def _setup_micloud_entry(hass: HomeAssistant, config_entry):
data: dict = config_entry.data.copy()
session = async_create_clientsession(hass)
hass.data[DOMAIN]['cloud'] = cloud = MiCloud(session, data['servers'])
if 'service_token' in data:
# load devices with saved MiCloud auth
cloud.auth = data
devices = await cloud.get_devices()
else:
devices = None
if devices is None:
_LOGGER.debug(f"Login to MiCloud for {config_entry.title}")
if await cloud.login(data['username'], data['password']):
# update MiCloud auth in .storage
data.update(cloud.auth)
hass.config_entries.async_update_entry(config_entry, data=data)
devices = await cloud.get_devices()
if devices is None:
_LOGGER.error("Can't load devices from MiCloud")
else:
_LOGGER.error("Can't login to MiCloud")
# load devices from or save to .storage
store = Store(hass, 1, f"{DOMAIN}/{data["username"]}.json")
if devices is None:
_LOGGER.debug("Loading a list of devices from the .storage")
devices = await store.async_load()
else:
_LOGGER.debug(f"Loaded from MiCloud {len(devices)} devices")
await store.async_save(devices)
if devices is None:
_LOGGER.debug("No devices in .storage")
return False
# TODO: Think about a bunch of devices
if 'devices' not in hass.data[DOMAIN]:
hass.data[DOMAIN]['devices'] = devices
else:
hass.data[DOMAIN]['devices'] += devices
for device in devices:
# key - mac for BLE, and did for others
did = device['did'] if device['pid'] not in '6' else \
device['mac'].replace(':', '').lower()
DevicesRegistry.defaults.setdefault(did, {})
# don't override name if exists
DevicesRegistry.defaults[did].setdefault('device_name', device['name'])
return True
async def _handle_device_remove(hass: HomeAssistant):
"""Remove device from Hass and Mi Home if the device is renamed to
`delete`.
"""
async def device_registry_updated(event: Event):
if event.data['action'] != 'update':
return
registry = hass.data['device_registry']
hass_device = registry.async_get(event.data['device_id'])
# check empty identifiers
if not hass_device or not hass_device.identifiers:
return
# handle only our devices
for hass_did in hass_device.identifiers:
if hass_did[0] == DOMAIN and hass_device.name_by_user == 'delete':
break
else:
return
# remove from Mi Home
for gw in hass.data[DOMAIN].values():
if not isinstance(gw, Gateway3):
continue
gw_device = gw.get_device(hass_did[1])
if not gw_device:
continue
if gw_device['type'] == 'zigbee':
gw.debug(f"Remove device: {gw_device["did"]}")
await gw.miio.send('remove_device', [gw_device['did']])
break
# remove from Hass
registry.async_remove_device(hass_device.id)
hass.bus.async_listen('device_registry_updated', device_registry_updated)
async def _setup_logger(hass: HomeAssistant):
if not hasattr(_LOGGER, 'defaul_level'):
# default level from Hass config
_LOGGER.defaul_level = _LOGGER.level
entries = hass.config_entries.async_entries(DOMAIN)
web_logs = any(e.options.get('debug') for e in entries)
# only if global logging don't set
if _LOGGER.defaul_level == logging.NOTSET:
# disable log to console
_LOGGER.propagate = web_logs is False
# set debug if any of integrations has debug
_LOGGER.setLevel(logging.DEBUG if web_logs else logging.NOTSET)
# if don't set handler yet
if web_logs:
# skip if already added
if any(isinstance(h, XiaomiGateway3Debug) for h in _LOGGER.handlers):
return
handler = XiaomiGateway3Debug(hass)
_LOGGER.addHandler(handler)
if _LOGGER.defaul_level == logging.NOTSET:
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
| import asyncio
import logging
import voluptuous as vol
from homeassistant.components.system_log import CONF_LOGGER
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, Event
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.helpers.storage import Store
from .core import logger
from .core.gateway3 import Gateway3
from .core.helpers import DevicesRegistry
from .core.utils import DOMAIN, XiaomiGateway3Debug
from .core.xiaomi_cloud import MiCloud
_LOGGER = logging.getLogger(__name__)
DOMAINS = ['binary_sensor', 'climate', 'cover', 'light', 'remote', 'sensor',
'switch', 'alarm_control_panel']
CONF_DEVICES = 'devices'
CONF_ATTRIBUTES_TEMPLATE = 'attributes_template'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_DEVICES): {
cv.string: vol.Schema({
vol.Optional('occupancy_timeout'): cv.positive_int,
}, extra=vol.ALLOW_EXTRA),
},
CONF_LOGGER: logger.CONFIG_SCHEMA,
vol.Optional(CONF_ATTRIBUTES_TEMPLATE): cv.template
}, extra=vol.ALLOW_EXTRA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, hass_config: dict):
config = hass_config.get(DOMAIN) or {}
if CONF_LOGGER in config:
logger.init(__name__, config[CONF_LOGGER], hass.config.config_dir)
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
# update global debug_mode for all gateways
if 'debug_mode' in config[CONF_LOGGER]:
setattr(Gateway3, 'debug_mode', config[CONF_LOGGER]['debug_mode'])
if CONF_DEVICES in config:
for k, v in config[CONF_DEVICES].items():
# AA:BB:CC:DD:EE:FF => aabbccddeeff
k = k.replace(':', '').lower()
DevicesRegistry.defaults[k] = v
hass.data[DOMAIN] = {
CONF_ATTRIBUTES_TEMPLATE: config.get(CONF_ATTRIBUTES_TEMPLATE)
}
await _handle_device_remove(hass)
# utils.migrate_unique_id(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Support two kind of enties - MiCloud and Gateway."""
# entry for MiCloud login
if 'servers' in entry.data:
return await _setup_micloud_entry(hass, entry)
# migrate data (also after first setup) to options
if entry.data:
hass.config_entries.async_update_entry(entry, data={},
options=entry.data)
await _setup_logger(hass)
# add options handler
if not entry.update_listeners:
entry.add_update_listener(async_update_options)
hass.data[DOMAIN][entry.entry_id] = Gateway3(**entry.options)
hass.async_create_task(_setup_domains(hass, entry))
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry):
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
# check unload cloud integration
if entry.entry_id not in hass.data[DOMAIN]:
return
# remove all stats entities if disable stats
if not entry.options.get('stats'):
suffix = ('_gateway', '_zigbee', '_ble')
registry: EntityRegistry = hass.data['entity_registry']
remove = [
entity.entity_id
for entity in list(registry.entities.values())
if (entity.config_entry_id == entry.entry_id and
entity.unique_id.endswith(suffix))
]
for entity_id in remove:
registry.async_remove(entity_id)
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
await gw.stop()
await asyncio.gather(*[
hass.config_entries.async_forward_entry_unload(entry, domain)
for domain in DOMAINS
])
return True
async def _setup_domains(hass: HomeAssistant, entry: ConfigEntry):
# init setup for each supported domains
await asyncio.gather(*[
hass.config_entries.async_forward_entry_setup(entry, domain)
for domain in DOMAINS
])
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
gw.start()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gw.stop)
)
async def _setup_micloud_entry(hass: HomeAssistant, config_entry):
data: dict = config_entry.data.copy()
session = async_create_clientsession(hass)
hass.data[DOMAIN]['cloud'] = cloud = MiCloud(session, data['servers'])
if 'service_token' in data:
# load devices with saved MiCloud auth
cloud.auth = data
devices = await cloud.get_devices()
else:
devices = None
if devices is None:
_LOGGER.debug(f"Login to MiCloud for {config_entry.title}")
if await cloud.login(data['username'], data['password']):
# update MiCloud auth in .storage
data.update(cloud.auth)
hass.config_entries.async_update_entry(config_entry, data=data)
devices = await cloud.get_devices()
if devices is None:
_LOGGER.error("Can't load devices from MiCloud")
else:
_LOGGER.error("Can't login to MiCloud")
# load devices from or save to .storage
store = Store(hass, 1, f"{DOMAIN}/{data['username']}.json")
if devices is None:
_LOGGER.debug("Loading a list of devices from the .storage")
devices = await store.async_load()
else:
_LOGGER.debug(f"Loaded from MiCloud {len(devices)} devices")
await store.async_save(devices)
if devices is None:
_LOGGER.debug("No devices in .storage")
return False
# TODO: Think about a bunch of devices
if 'devices' not in hass.data[DOMAIN]:
hass.data[DOMAIN]['devices'] = devices
else:
hass.data[DOMAIN]['devices'] += devices
for device in devices:
# key - mac for BLE, and did for others
did = device['did'] if device['pid'] not in '6' else \
device['mac'].replace(':', '').lower()
DevicesRegistry.defaults.setdefault(did, {})
# don't override name if exists
DevicesRegistry.defaults[did].setdefault('device_name', device['name'])
return True
async def _handle_device_remove(hass: HomeAssistant):
"""Remove device from Hass and Mi Home if the device is renamed to
`delete`.
"""
async def device_registry_updated(event: Event):
if event.data['action'] != 'update':
return
registry = hass.data['device_registry']
hass_device = registry.async_get(event.data['device_id'])
# check empty identifiers
if not hass_device or not hass_device.identifiers:
return
# handle only our devices
for hass_did in hass_device.identifiers:
if hass_did[0] == DOMAIN and hass_device.name_by_user == 'delete':
break
else:
return
# remove from Mi Home
for gw in hass.data[DOMAIN].values():
if not isinstance(gw, Gateway3):
continue
gw_device = gw.get_device(hass_did[1])
if not gw_device:
continue
if gw_device['type'] == 'zigbee':
gw.debug(f"Remove device: {gw_device['did']}")
await gw.miio.send('remove_device', [gw_device['did']])
break
# remove from Hass
registry.async_remove_device(hass_device.id)
hass.bus.async_listen('device_registry_updated', device_registry_updated)
async def _setup_logger(hass: HomeAssistant):
if not hasattr(_LOGGER, 'defaul_level'):
# default level from Hass config
_LOGGER.defaul_level = _LOGGER.level
entries = hass.config_entries.async_entries(DOMAIN)
web_logs = any(e.options.get('debug') for e in entries)
# only if global logging don't set
if _LOGGER.defaul_level == logging.NOTSET:
# disable log to console
_LOGGER.propagate = web_logs is False
# set debug if any of integrations has debug
_LOGGER.setLevel(logging.DEBUG if web_logs else logging.NOTSET)
# if don't set handler yet
if web_logs:
# skip if already added
if any(isinstance(h, XiaomiGateway3Debug) for h in _LOGGER.handlers):
return
handler = XiaomiGateway3Debug(hass)
_LOGGER.addHandler(handler)
if _LOGGER.defaul_level == logging.NOTSET:
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
|
# pylint: disable=too-many-lines
import os
import random
import shutil
import time
import uuid
from retval import RetVal
from pycryptostring import CryptoString
from pymensago.encryption import EncryptionPair
from pymensago.hash import blake2hash
from pymensago.serverconn import ServerConnection
from integration_setup import login_admin, regcode_admin, setup_test, init_server, init_user, \
init_user2, reset_top_dir
from tests.integration.integration_setup import funcname
server_response = {
'title' : 'Mensago Server Response',
'type' : 'object',
'required' : [ 'Code', 'Status', 'Info', 'Data' ],
'properties' : {
'Code' : {
'type' : 'integer'
},
'Status' : {
'type' : 'string'
},
'Info' : {
'type' : 'string'
},
'Data' : {
'type' : 'object'
}
}
}
def make_test_file(path: str, file_size=-1, file_name='') -> RetVal:
'''Generate a test file containing nothing but zeroes. If the file size is negative, a random
size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be
generated.
Returns:
name: (str) name of the test file generated
size: (int) size of the test file generated
'''
if file_size < 0:
file_size = random.randint(1,10) * 1024
if file_name == '' or not file_name:
file_name = f"{int(time.time())}.{file_size}.{str(uuid.uuid4())}"
try:
fhandle = open(os.path.join(path, file_name), 'w')
except Exception as e:
return RetVal().wrap_exception(e)
fhandle.write('0' * file_size)
fhandle.close()
return RetVal().set_values({ 'name':file_name, 'size':file_size })
def setup_testdir(name) -> str:
'''Creates a test folder for holding files'''
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'testfiles')
if not os.path.exists(topdir):
os.mkdir(topdir)
testdir = os.path.join(topdir, name)
while os.path.exists(testdir):
try:
shutil.rmtree(testdir)
except:
print("Waiting a second for test folder to unlock")
time.sleep(1.0)
os.mkdir(testdir)
return testdir
def test_copy():
'''Tests the COPY command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': '/ wsp ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
# By making this 1MB + 1byte, the file's mere existence will put us over the limit of the 1MB
# disk quota
status = make_test_file(admin_dir, file_size=0x10_0001)
assert not status.error(), 'test_copy: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata["admin_wid"]} {testfile1}",
'DestDir': f"/ wsp {dbdata["admin_wid"]} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata["admin_wid"]}",
'DestDir': f"/ wsp {dbdata["admin_wid"]} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata["admin_wid"]} {testfile1}",
'DestDir': f"/ wsp {dbdata["admin_wid"]} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
# Subtest #5: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# We actually have to do an update instead of an insert because the quota checks in earlier
# calls ensure that there is a quota record for admin in the database
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata["admin_wid"]}'")
dbconn.commit()
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata["admin_wid"]} {testfile1}",
'DestDir': f"/ wsp {dbdata["admin_wid"]} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_copy: #5 failed to handle quota limit'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata["admin_wid"]}'")
dbconn.commit()
# Subtest #6: Actual success
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata["admin_wid"]} {testfile1}",
'DestDir': f"/ wsp {dbdata["admin_wid"]} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_delete():
'''Test the DELETE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad path
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata["admin_wid"]} some_dir_name"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: failed to handle bad path"
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata["admin_wid"]} 1234.1234.11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #2 failed to handle nonexistent file"
# Subtest #3: Actual success
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata["admin_wid"]} {filename}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, f"{funcname()}: #3 failed to delete file"
def test_download():
'''This tests the command DOWNLOAD'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({'Action': 'DOWNLOAD','Data': {}})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' +
' 1000.1000.22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_download: #2 failed to handle non-existent path'
# Subtest #3: Actual success
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #3 failed to create test file: {status.info}"
testname = status['name']
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata["admin_wid"]} {testname}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #3 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata["admin_wid"]} {testname}",
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 1000, 'test_download: #3 downloaded file had wrong length'
# Set up an 'interrupted' transfer
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #4 failed to create test file: {status.info}"
testname = status['name']
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata["admin_wid"]} {testname}",
'Offset': '2500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #4 failed to handle offset > file size'
# Subtest #5: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata["admin_wid"]} {testname}",
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #5 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata["admin_wid"]} {testname}",
'Offset': '500',
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 500, 'test_download: #5 resumed data had wrong length'
assert blake2hash((('0' * 500) + rawdata).encode()) == \
'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', \
'test_download: #8 resumed file hash failure'
conn.disconnect()
def test_getquotainfo():
'''This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the
disk usage'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"Failed to create test workspace file: {status.info}"
conn.send_message({ 'Action': 'GETQUOTAINFO', 'Data': {} })
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_getquotainfo: failed to get quota information'
assert response['Data']['DiskUsage'] == '1000', 'test_getquotainfo: disk usage was incorrect'
assert response['Data']['QuotaSize'] == '0', \
"test_getquotainfo: admin quota wasn't unlimited"
conn.disconnect()
def test_list():
'''Tests the LIST command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_list: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_list: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_list: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #3 failed to handle empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 0, \
'test_list: #3 failed to have empty response for empty directory'
# Subtest #4: A list of files
for i in range(1,6):
tempname = '.'.join([str(1000 * i), '500', str(uuid.uuid4())])
try:
fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111',
tempname), 'w')
except Exception as e:
assert False, 'test_list: #4 failed to create test files: ' + e
fhandle.write('0' * 500)
fhandle.close()
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #4 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 5, \
'test_list: #4 failed to list all files in directory'
# Subtest #5: A list of files with time specifier
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Time': '3000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #5 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 3, \
'test_list: #5 failed to filter files'
conn.disconnect()
def test_listdirs():
'''Tests the LISTDIRS command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_listdirs: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_listdirs: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_listdirs: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #3 failed to handle empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 0, \
'test_listdirs: #3 failed to have empty response for empty directory'
# Subtest #4: A list of directories
for i in range(2,7):
tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)])
try:
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname))
except Exception as e:
assert False, 'test_listdirs: #4 failed to create test directories: ' + e
make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #4 failed to handle non-empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 5, \
'test_list: #4 failed to list all subdirectories'
conn.disconnect()
def test_mkdir():
'''Tests the MKDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_mkdir: #1 failed to handle bad path'
# Subtest #2: Actual success - 1 directory
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
# Subtest #3: Directory already exists
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_mkdir: #3 failed to handle existing directory'
# Subtest #4: Actual success - nested directories
multipath = ' '.join(['/', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
conn.disconnect()
def test_move():
'''Tests the MOVE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': '/ ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
status = make_test_file(admin_dir)
assert not status.error(), 'test_move: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata["admin_wid"]} {testfile1}",
'DestDir': f"/ wsp {dbdata["admin_wid"]} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata["admin_wid"]}",
'DestDir': f"/ wsp {dbdata["admin_wid"]} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_move: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata["admin_wid"]} {testfile1}",
'DestDir': f"/ wsp {dbdata["admin_wid"]} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
os.remove(os.path.join(inner_dir, status['name']))
# Subtest #5: Actual success
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata["admin_wid"]} {testfile1}",
'DestDir': f"/ wsp {dbdata["admin_wid"]} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_replace():
'''Test the REPLACE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad old file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata["admin_wid"]} some_dir_name",
'NewPath': f"/ wsp {dbdata["admin_wid"]} 1234.1234.11111111-1111-1111-1111-111111111111",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #1 failed to handle bad old file path"
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
filename = status['name']
# Subtest #2: Bad new file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata["admin_wid"]} {filename}",
'NewPath': f"/ wsp {dbdata["admin_wid"]} some_dir_name",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #2 failed to handle bad new file path"
# Subtest #4: Destination directory doesn't exist
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata["admin_wid"]} 1234.1234.11111111-1111-1111-1111-111111111111",
'NewPath': "/ wsp 11111111-1111-1111-1111-111111111111",
'Size': "4321",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #4 failed to handle nonexistent destination dir"
# Subtest #5: Actual success
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata["admin_wid"]} {filename}",
'NewPath': f"/ wsp {dbdata["admin_wid"]}",
'Size': "1000",
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, f'{funcname()}: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, f'{funcname()}: #6 failed to replace file'
conn.disconnect()
def test_rmdir():
'''Tests the RMDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_rmdir: #1 failed to handle bad path'
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_rmdir: #2 failed to handle nonexistent directory'
# Subtest #3: Call fails because of non-empty directory
multipath = ' '.join(['/ wsp', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #3 failed to create test hierarchy'
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_rmdir: #3 failed to handle non-empty directory'
# Subtest #4: Actual success - non-recursively remove an empty directory
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #4 failed to remove an empty directory'
def test_select():
'''Tests the SELECT command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_select: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_select: #2 failed to create test file"
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_select: #2 failed to handle path as file'
# Subtest #3: Actual success
innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222'])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to create test directory'
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to work correctly'
conn.disconnect()
def test_setquota():
'''Tests the SETQUOTA command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
init_user2(dbdata, conn)
# Subtest #1: Bad sizes
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': '0',
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size value'
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "Real programmers don't eat quiche ;)",
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size data type'
# Subtest #2: Bad workspace list
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333,'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad workspace list'
# Subtest #3: Actual success
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333, ' \
'44444444-4444-4444-4444-444444444444'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_setquota: failed to handle actual success'
conn.disconnect()
def test_upload():
'''Tests the UPLOAD command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
# Hash parameter is missing
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_upload: #2 failed to handle non-existent path'
# Subtest #3: Size too big
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x4000_0000 * 200), # 200GiB isn't all that big :P
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 414, 'test_upload: #3 failed to handle file too big'
# Subtest #4: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# Normally in Python direct string substitution is a recipe for SQL injection. We're not
# bringing in any insecure code here, so it's only a little bit bad.
cur = dbconn.cursor()
cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata["admin_wid"]}', 5100 , 5120)")
dbconn.commit()
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x10_0000 * 30), # 30MiB
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_upload: #4 quota check failed'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata["admin_wid"]}'")
dbconn.commit()
# Subtest #5: Hash mismatch
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:5(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #5 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 410, 'test_upload: #5 failed to handle file hash mismatch'
# Subtest #6: Actual success
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #6 failed to handle file hash mismatch'
# Set up an interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '2000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #7 failed to handle offset > file size'
# Subtest #8: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #8 failed to proceed to file upload'
conn.write('0' * 500)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #8 failed to resume with exact offset match'
# Set up one last interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #9: Overlapping resume
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '400'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #9 failed to proceed to file upload'
conn.write('0' * 600)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #9 failed to resume with overlapping offset'
conn.disconnect()
if __name__ == '__main__':
# test_copy()
# test_delete()
# test_download()
# test_getquotainfo()
# test_list()
# test_listdirs()
# test_mkdir()
# test_move()
test_replace()
# test_rmdir()
# test_setquota()
# test_select()
# test_upload()
| # pylint: disable=too-many-lines
import os
import random
import shutil
import time
import uuid
from retval import RetVal
from pycryptostring import CryptoString
from pymensago.encryption import EncryptionPair
from pymensago.hash import blake2hash
from pymensago.serverconn import ServerConnection
from integration_setup import login_admin, regcode_admin, setup_test, init_server, init_user, \
init_user2, reset_top_dir
from tests.integration.integration_setup import funcname
server_response = {
'title' : 'Mensago Server Response',
'type' : 'object',
'required' : [ 'Code', 'Status', 'Info', 'Data' ],
'properties' : {
'Code' : {
'type' : 'integer'
},
'Status' : {
'type' : 'string'
},
'Info' : {
'type' : 'string'
},
'Data' : {
'type' : 'object'
}
}
}
def make_test_file(path: str, file_size=-1, file_name='') -> RetVal:
'''Generate a test file containing nothing but zeroes. If the file size is negative, a random
size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be
generated.
Returns:
name: (str) name of the test file generated
size: (int) size of the test file generated
'''
if file_size < 0:
file_size = random.randint(1,10) * 1024
if file_name == '' or not file_name:
file_name = f"{int(time.time())}.{file_size}.{str(uuid.uuid4())}"
try:
fhandle = open(os.path.join(path, file_name), 'w')
except Exception as e:
return RetVal().wrap_exception(e)
fhandle.write('0' * file_size)
fhandle.close()
return RetVal().set_values({ 'name':file_name, 'size':file_size })
def setup_testdir(name) -> str:
'''Creates a test folder for holding files'''
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'testfiles')
if not os.path.exists(topdir):
os.mkdir(topdir)
testdir = os.path.join(topdir, name)
while os.path.exists(testdir):
try:
shutil.rmtree(testdir)
except:
print("Waiting a second for test folder to unlock")
time.sleep(1.0)
os.mkdir(testdir)
return testdir
def test_copy():
'''Tests the COPY command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': '/ wsp ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
# By making this 1MB + 1byte, the file's mere existence will put us over the limit of the 1MB
# disk quota
status = make_test_file(admin_dir, file_size=0x10_0001)
assert not status.error(), 'test_copy: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
# Subtest #5: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# We actually have to do an update instead of an insert because the quota checks in earlier
# calls ensure that there is a quota record for admin in the database
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_copy: #5 failed to handle quota limit'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
# Subtest #6: Actual success
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_delete():
'''Test the DELETE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad path
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} some_dir_name"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: failed to handle bad path"
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #2 failed to handle nonexistent file"
# Subtest #3: Actual success
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {filename}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, f"{funcname()}: #3 failed to delete file"
def test_download():
'''This tests the command DOWNLOAD'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({'Action': 'DOWNLOAD','Data': {}})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' +
' 1000.1000.22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_download: #2 failed to handle non-existent path'
# Subtest #3: Actual success
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #3 failed to create test file: {status.info}"
testname = status['name']
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #3 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 1000, 'test_download: #3 downloaded file had wrong length'
# Set up an 'interrupted' transfer
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #4 failed to create test file: {status.info}"
testname = status['name']
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '2500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #4 failed to handle offset > file size'
# Subtest #5: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #5 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500',
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 500, 'test_download: #5 resumed data had wrong length'
assert blake2hash((('0' * 500) + rawdata).encode()) == \
'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', \
'test_download: #8 resumed file hash failure'
conn.disconnect()
def test_getquotainfo():
'''This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the
disk usage'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"Failed to create test workspace file: {status.info}"
conn.send_message({ 'Action': 'GETQUOTAINFO', 'Data': {} })
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_getquotainfo: failed to get quota information'
assert response['Data']['DiskUsage'] == '1000', 'test_getquotainfo: disk usage was incorrect'
assert response['Data']['QuotaSize'] == '0', \
"test_getquotainfo: admin quota wasn't unlimited"
conn.disconnect()
def test_list():
'''Tests the LIST command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_list: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_list: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_list: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #3 failed to handle empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 0, \
'test_list: #3 failed to have empty response for empty directory'
# Subtest #4: A list of files
for i in range(1,6):
tempname = '.'.join([str(1000 * i), '500', str(uuid.uuid4())])
try:
fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111',
tempname), 'w')
except Exception as e:
assert False, 'test_list: #4 failed to create test files: ' + e
fhandle.write('0' * 500)
fhandle.close()
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #4 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 5, \
'test_list: #4 failed to list all files in directory'
# Subtest #5: A list of files with time specifier
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Time': '3000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #5 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 3, \
'test_list: #5 failed to filter files'
conn.disconnect()
def test_listdirs():
'''Tests the LISTDIRS command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_listdirs: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_listdirs: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_listdirs: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #3 failed to handle empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 0, \
'test_listdirs: #3 failed to have empty response for empty directory'
# Subtest #4: A list of directories
for i in range(2,7):
tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)])
try:
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname))
except Exception as e:
assert False, 'test_listdirs: #4 failed to create test directories: ' + e
make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #4 failed to handle non-empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 5, \
'test_list: #4 failed to list all subdirectories'
conn.disconnect()
def test_mkdir():
'''Tests the MKDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_mkdir: #1 failed to handle bad path'
# Subtest #2: Actual success - 1 directory
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
# Subtest #3: Directory already exists
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_mkdir: #3 failed to handle existing directory'
# Subtest #4: Actual success - nested directories
multipath = ' '.join(['/', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
conn.disconnect()
def test_move():
'''Tests the MOVE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': '/ ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
status = make_test_file(admin_dir)
assert not status.error(), 'test_move: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_move: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
os.remove(os.path.join(inner_dir, status['name']))
# Subtest #5: Actual success
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_replace():
'''Test the REPLACE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad old file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'NewPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #1 failed to handle bad old file path"
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
filename = status['name']
# Subtest #2: Bad new file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #2 failed to handle bad new file path"
# Subtest #4: Destination directory doesn't exist
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'NewPath': "/ wsp 11111111-1111-1111-1111-111111111111",
'Size': "4321",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #4 failed to handle nonexistent destination dir"
# Subtest #5: Actual success
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']}",
'Size': "1000",
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, f'{funcname()}: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, f'{funcname()}: #6 failed to replace file'
conn.disconnect()
def test_rmdir():
'''Tests the RMDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_rmdir: #1 failed to handle bad path'
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_rmdir: #2 failed to handle nonexistent directory'
# Subtest #3: Call fails because of non-empty directory
multipath = ' '.join(['/ wsp', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #3 failed to create test hierarchy'
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_rmdir: #3 failed to handle non-empty directory'
# Subtest #4: Actual success - non-recursively remove an empty directory
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #4 failed to remove an empty directory'
def test_select():
'''Tests the SELECT command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_select: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_select: #2 failed to create test file"
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_select: #2 failed to handle path as file'
# Subtest #3: Actual success
innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222'])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to create test directory'
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to work correctly'
conn.disconnect()
def test_setquota():
'''Tests the SETQUOTA command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
init_user2(dbdata, conn)
# Subtest #1: Bad sizes
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': '0',
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size value'
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "Real programmers don't eat quiche ;)",
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size data type'
# Subtest #2: Bad workspace list
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333,'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad workspace list'
# Subtest #3: Actual success
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333, ' \
'44444444-4444-4444-4444-444444444444'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_setquota: failed to handle actual success'
conn.disconnect()
def test_upload():
'''Tests the UPLOAD command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
# Hash parameter is missing
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_upload: #2 failed to handle non-existent path'
# Subtest #3: Size too big
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x4000_0000 * 200), # 200GiB isn't all that big :P
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 414, 'test_upload: #3 failed to handle file too big'
# Subtest #4: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# Normally in Python direct string substitution is a recipe for SQL injection. We're not
# bringing in any insecure code here, so it's only a little bit bad.
cur = dbconn.cursor()
cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata['admin_wid']}', 5100 , 5120)")
dbconn.commit()
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x10_0000 * 30), # 30MiB
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_upload: #4 quota check failed'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
# Subtest #5: Hash mismatch
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:5(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #5 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 410, 'test_upload: #5 failed to handle file hash mismatch'
# Subtest #6: Actual success
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #6 failed to handle file hash mismatch'
# Set up an interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '2000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #7 failed to handle offset > file size'
# Subtest #8: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #8 failed to proceed to file upload'
conn.write('0' * 500)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #8 failed to resume with exact offset match'
# Set up one last interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #9: Overlapping resume
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '400'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #9 failed to proceed to file upload'
conn.write('0' * 600)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #9 failed to resume with overlapping offset'
conn.disconnect()
if __name__ == '__main__':
# test_copy()
# test_delete()
# test_download()
# test_getquotainfo()
# test_list()
# test_listdirs()
# test_mkdir()
# test_move()
test_replace()
# test_rmdir()
# test_setquota()
# test_select()
# test_upload()
|
# -*- coding: utf-8 -*-
import requests
from webs.api.exceptions.customs import ServerError, InvalidAPIRequest, RecordNotFound, RecordAlreadyExists
class RequestMixin(object):
CODE_EXCEPTION_MSG = {
400: InvalidAPIRequest,
404: RecordNotFound,
409: RecordAlreadyExists,
422: InvalidAPIRequest,
500: ServerError,
}
def __init__(self):
self.session = requests.Session()
@property
def _headers(self):
return {
"Content-Type": "application/json",
}
def request(self, server, method, url, json=None, params=None, timeout=60):
try:
response = self.session.request(
method, url, json=json, params=params,
timeout=timeout, headers=self._headers
)
except requests.exceptions.ConnectTimeout:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接超时!")
except requests.exceptions.ConnectionError:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接错误!")
try:
response_data = response.json()
except Exception as e:
raise ServerError(f"{server}服务器参数解析失败!")
if not (200 <= response.status_code < 300):
exception = self.CODE_EXCEPTION_MSG[response.status_code] \
if response.status_code in self.CODE_EXCEPTION_MSG else self.CODE_EXCEPTION_MSG[400]
raise exception(f"{server} Response:{response_data.get("error").get("message")}")
return response_data
web_client = RequestMixin()
| # -*- coding: utf-8 -*-
import requests
from webs.api.exceptions.customs import ServerError, InvalidAPIRequest, RecordNotFound, RecordAlreadyExists
class RequestMixin(object):
CODE_EXCEPTION_MSG = {
400: InvalidAPIRequest,
404: RecordNotFound,
409: RecordAlreadyExists,
422: InvalidAPIRequest,
500: ServerError,
}
def __init__(self):
self.session = requests.Session()
@property
def _headers(self):
return {
"Content-Type": "application/json",
}
def request(self, server, method, url, json=None, params=None, timeout=60):
try:
response = self.session.request(
method, url, json=json, params=params,
timeout=timeout, headers=self._headers
)
except requests.exceptions.ConnectTimeout:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接超时!")
except requests.exceptions.ConnectionError:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接错误!")
try:
response_data = response.json()
except Exception as e:
raise ServerError(f"{server}服务器参数解析失败!")
if not (200 <= response.status_code < 300):
exception = self.CODE_EXCEPTION_MSG[response.status_code] \
if response.status_code in self.CODE_EXCEPTION_MSG else self.CODE_EXCEPTION_MSG[400]
raise exception(f"{server} Response:{response_data.get('error').get('message')}")
return response_data
web_client = RequestMixin()
|
import contextlib
import ipaddress
import json
import os
import random
import re
import time
import warnings
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Union
import requests
import test_infra.utils.waiting
import waiting
import yaml
from assisted_service_client import models
from assisted_service_client.models.operator_type import OperatorType
from junit_report import JunitTestCase
from netaddr import IPAddress, IPNetwork
from test_infra import consts, utils
from test_infra.assisted_service_api import InventoryClient
from test_infra.controllers.load_balancer_controller import LoadBalancerController
from test_infra.controllers.node_controllers import Node
from test_infra.helper_classes.cluster_host import ClusterHost
from test_infra.helper_classes.config import BaseClusterConfig, BaseInfraEnvConfig
from test_infra.helper_classes.entity import Entity
from test_infra.helper_classes.events_handler import EventsHandler
from test_infra.helper_classes.infra_env import InfraEnv
from test_infra.helper_classes.nodes import Nodes
from test_infra.tools import static_network, terraform_utils
from test_infra.utils import Path, log, logs_utils, network_utils, operators_utils
from test_infra.utils.entity_name import ClusterName
class Cluster(Entity):
MINIMUM_NODES_TO_WAIT = 1
EVENTS_THRESHOLD = 500 # TODO - remove EVENTS_THRESHOLD after removing it from kni-assisted-installer-auto
_config: BaseClusterConfig
def __init__(
self,
api_client: InventoryClient,
config: BaseClusterConfig,
infra_env_config: BaseInfraEnvConfig,
nodes: Optional[Nodes] = None,
):
super().__init__(api_client, config, nodes)
self._infra_env_config = infra_env_config
self._infra_env = None
# Update infraEnv configurations
self._infra_env_config.cluster_id = config.cluster_id
self._infra_env_config.openshift_version = self._config.openshift_version
self._infra_env_config.pull_secret = self._config.pull_secret
self._high_availability_mode = config.high_availability_mode
self.name = config.cluster_name.get()
@property
def kubeconfig_path(self):
return self._config.kubeconfig_path
@property
def iso_download_path(self):
return self._config.iso_download_path
@property
def enable_image_download(self):
return self._config.download_image
def _update_day2_config(self, api_client: InventoryClient, cluster_id: str):
day2_cluster: models.cluster.Cluster = api_client.cluster_get(cluster_id)
self.update_config(
**dict(
openshift_version=day2_cluster.openshift_version,
cluster_name=ClusterName(day2_cluster.name),
additional_ntp_source=day2_cluster.additional_ntp_source,
user_managed_networking=day2_cluster.user_managed_networking,
high_availability_mode=day2_cluster.high_availability_mode,
olm_operators=day2_cluster.monitored_operators,
base_dns_domain=day2_cluster.base_dns_domain,
vip_dhcp_allocation=day2_cluster.vip_dhcp_allocation,
)
)
def _create(self) -> str:
if self._config.cluster_id:
log.info(f"Fetching day2 cluster with id {self._config.cluster_id}")
self._update_day2_config(self.api_client, self._config.cluster_id)
return self._config.cluster_id
cluster = self.api_client.create_cluster(
self._config.cluster_name.get(),
ssh_public_key=self._config.ssh_public_key,
openshift_version=self._config.openshift_version,
pull_secret=self._config.pull_secret,
base_dns_domain=self._config.base_dns_domain,
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
additional_ntp_source=self._config.additional_ntp_source,
user_managed_networking=self._config.user_managed_networking,
high_availability_mode=self._config.high_availability_mode,
olm_operators=[{"name": name} for name in self._config.olm_operators],
network_type=self._config.network_type,
)
self._config.cluster_id = cluster.id
return cluster.id
def delete(self):
self.api_client.delete_cluster(self.id)
def get_details(self):
return self.api_client.cluster_get(self.id)
def get_cluster_name(self):
return self.get_details().name
def get_hosts(self):
return self.api_client.get_cluster_hosts(self.id)
def get_host_ids(self):
return [host["id"] for host in self.get_hosts()]
def get_host_ids_names_mapping(self):
return {host["id"]: host["requested_hostname"] for host in self.get_hosts()}
def get_host_assigned_roles(self):
hosts = self.get_hosts()
return {h["id"]: h["role"] for h in hosts}
def get_operators(self):
return self.api_client.get_cluster_operators(self.id)
# TODO remove in favor of generate_infra_env
def generate_image(self):
warnings.warn("generate_image is deprecated. Use generate_infra_env instead.", DeprecationWarning)
self.api_client.generate_image(cluster_id=self.id, ssh_key=self._config.ssh_public_key)
def generate_infra_env(
self, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None
) -> InfraEnv:
self._infra_env_config.ssh_public_key = ssh_key or self._config.ssh_public_key
self._infra_env_config.iso_image_type = iso_image_type or self._config.iso_image_type
self._infra_env_config.static_network_config = static_network_config
self._infra_env_config.ignition_config_override = ignition_info
self._infra_env_config.proxy = proxy or self._config.proxy
infra_env = InfraEnv(api_client=self.api_client, config=self._infra_env_config)
self._infra_env = infra_env
return infra_env
def update_infra_env_proxy(self, proxy: models.Proxy) -> None:
self._infra_env_config.proxy = proxy
self._infra_env.update_proxy(proxy=proxy)
def download_infra_env_image(self, iso_download_path=None) -> Path:
iso_download_path = iso_download_path or self._config.iso_download_path
return self._infra_env.download_image(iso_download_path=iso_download_path)
@JunitTestCase()
def generate_and_download_infra_env(
self,
iso_download_path=None,
static_network_config=None,
iso_image_type=None,
ssh_key=None,
ignition_info=None,
proxy=None,
) -> Path:
if self._config.is_static_ip and static_network_config is None:
static_network_config = static_network.generate_static_network_data_from_tf(self.nodes.controller.tf_folder)
self.generate_infra_env(
static_network_config=static_network_config,
iso_image_type=iso_image_type,
ssh_key=ssh_key,
ignition_info=ignition_info,
proxy=proxy,
)
return self.download_infra_env_image(iso_download_path=iso_download_path or self._config.iso_download_path)
@JunitTestCase()
def generate_and_download_image(
self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None
):
warnings.warn(
"generate_and_download_image is deprecated. Use generate_and_download_infra_env instead.",
DeprecationWarning,
)
iso_download_path = iso_download_path or self._config.iso_download_path
# ensure file path exists before downloading
if not os.path.exists(iso_download_path):
utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False)
self.api_client.generate_and_download_image(
cluster_id=self.id,
ssh_key=ssh_key or self._config.ssh_public_key,
image_path=iso_download_path,
image_type=iso_image_type or self._config.iso_image_type,
static_network_config=static_network_config,
)
def wait_until_hosts_are_disconnected(self, nodes_count: int = None):
statuses = [consts.NodesStatus.DISCONNECTED]
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.DISCONNECTED_TIMEOUT,
)
@JunitTestCase()
def wait_until_hosts_are_discovered(self, allow_insufficient=False, nodes_count: int = None):
statuses = [consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN]
if allow_insufficient:
statuses.append(consts.NodesStatus.INSUFFICIENT)
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.NODES_REGISTERED_TIMEOUT,
)
def _get_matching_hosts(self, host_type, count):
hosts = self.get_hosts()
return [{"id": h["id"], "role": host_type} for h in hosts if host_type in h["requested_hostname"]][:count]
def set_cluster_name(self, cluster_name: str):
log.info(f"Setting Cluster Name:{cluster_name} for cluster: {self.id}")
self.update_config(cluster_name=ClusterName(prefix=cluster_name, suffix=None))
self.api_client.update_cluster(self.id, {"name": cluster_name})
def select_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None:
self._infra_env.select_host_installation_disk(host_id=host_id, disk_paths=disk_paths)
def set_ocs(self, properties=None):
self.set_olm_operator(consts.OperatorType.OCS, properties=properties)
def set_cnv(self, properties=None):
self.set_olm_operator(consts.OperatorType.CNV, properties=properties)
def unset_ocs(self):
self.unset_olm_operator(consts.OperatorType.OCS)
def unset_cnv(self):
self.unset_olm_operator(consts.OperatorType.CNV)
def unset_olm_operator(self, operator_name):
log.info(f"Unsetting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
olm_operators = []
for operator in cluster.monitored_operators:
if operator.name == operator_name or operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_olm_operator(self, operator_name, properties=None):
log.info(f"Setting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
if operator_name in [o.name for o in cluster.monitored_operators]:
return
olm_operators = []
for operator in cluster.monitored_operators:
if operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
olm_operators.append({"name": operator_name, "properties": properties})
self._config.olm_operators = olm_operators
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_host_roles(self, num_masters: int = None, num_workers: int = None, requested_roles=None):
if requested_roles is None:
requested_roles = Counter(
master=num_masters or self.nodes.masters_count, worker=num_workers or self.nodes.workers_count
)
assigned_roles = self._get_matching_hosts(host_type=consts.NodeRoles.MASTER, count=requested_roles["master"])
assigned_roles.extend(
self._get_matching_hosts(host_type=consts.NodeRoles.WORKER, count=requested_roles["worker"])
)
for role in assigned_roles:
self._infra_env.update_host(host_id=role["id"], host_role=role["role"])
return assigned_roles
def set_specific_host_role(self, host, role):
self._infra_env.update_host(host_id=host["id"], host_role=role)
def set_network_params(self, controller=None):
# Controller argument is here only for backward compatibility TODO - Remove after QE refactor all e2e tests
controller = controller or self.nodes.controller # TODO - Remove after QE refactor all e2e tests
if self._config.platform == consts.Platforms.NONE:
log.info("On None platform, leaving network management to the user")
api_vip = ingress_vip = machine_networks = None
elif self._config.vip_dhcp_allocation or self._high_availability_mode == consts.HighAvailabilityMode.NONE:
log.info("Letting access VIPs be deducted from machine networks")
api_vip = ingress_vip = None
machine_networks = self.get_machine_networks()
else:
log.info("Assigning VIPs statically")
access_vips = controller.get_ingress_and_api_vips()
api_vip = access_vips["api_vip"]
ingress_vip = access_vips["ingress_vip"]
machine_networks = None
self.set_advanced_networking(
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
cluster_networks=self._config.cluster_networks,
service_networks=self._config.service_networks,
machine_networks=machine_networks,
api_vip=api_vip,
ingress_vip=ingress_vip,
)
# TODO: when assisted-service supports configuring dual-stack networks on one go,
# change it so that we call set_advanced_networking only once
if self._config.is_ipv4 and self._config.is_ipv6:
machine_networks = controller.get_all_machine_addresses()
self.set_advanced_networking(machine_networks=machine_networks)
def get_primary_machine_cidr(self):
cidr = self.nodes.controller.get_primary_machine_cidr()
if not cidr:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
matching_cidrs = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not matching_cidrs:
raise RuntimeError("No matching cidr for DHCP")
cidr = next(iter(matching_cidrs))
return cidr
def get_machine_networks(self):
networks = []
primary_machine_cidr = self.nodes.controller.get_primary_machine_cidr()
if primary_machine_cidr:
networks.append(primary_machine_cidr)
secondary_machine_cidr = self.nodes.controller.get_provisioning_cidr()
if secondary_machine_cidr:
networks.append(secondary_machine_cidr)
if not networks:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
networks = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not networks:
raise RuntimeError("No matching cidr for DHCP")
return networks
def set_ingress_and_api_vips(self, vips):
log.info(f"Setting API VIP:{vips["api_vip"]} and ingress VIP:{vips["ingress_vip"]} for cluster: {self.id}")
self.api_client.update_cluster(self.id, vips)
def set_ssh_key(self, ssh_key: str):
log.info(f"Setting SSH key:{ssh_key} for cluster: {self.id}")
self.update_config(ssh_public_key=ssh_key)
self.api_client.update_cluster(self.id, {"ssh_public_key": ssh_key})
def set_base_dns_domain(self, base_dns_domain: str):
log.info(f"Setting base DNS domain:{base_dns_domain} for cluster: {self.id}")
self.update_config(base_dns_domain=base_dns_domain)
self.api_client.update_cluster(self.id, {"base_dns_domain": base_dns_domain})
def set_advanced_networking(
self,
vip_dhcp_allocation: Optional[bool] = None,
cluster_networks: Optional[List[models.ClusterNetwork]] = None,
service_networks: Optional[List[models.ServiceNetwork]] = None,
machine_networks: Optional[List[models.MachineNetwork]] = None,
api_vip: Optional[str] = None,
ingress_vip: Optional[str] = None,
):
if machine_networks is None:
machine_networks = self._config.machine_networks
else:
machine_networks = [models.MachineNetwork(cidr=cidr) for cidr in machine_networks]
if vip_dhcp_allocation is None:
vip_dhcp_allocation = self._config.vip_dhcp_allocation
advanced_networking = {
"vip_dhcp_allocation": vip_dhcp_allocation,
"cluster_networks": cluster_networks if cluster_networks is not None else self._config.cluster_networks,
"service_networks": service_networks if service_networks is not None else self._config.service_networks,
"machine_networks": machine_networks,
"api_vip": api_vip if api_vip is not None else self._config.api_vip,
"ingress_vip": ingress_vip if ingress_vip is not None else self._config.ingress_vip,
}
log.info(f"Updating advanced networking with {advanced_networking} for cluster: {self.id}")
self.update_config(**advanced_networking)
self.api_client.update_cluster(self.id, advanced_networking)
def set_pull_secret(self, pull_secret: str):
log.info(f"Setting pull secret:{pull_secret} for cluster: {self.id}")
self.update_config(pull_secret=pull_secret)
self.api_client.update_cluster(self.id, {"pull_secret": pull_secret})
def set_host_name(self, host_id, requested_name):
log.info(f"Setting Required Host Name:{requested_name}, for Host ID: {host_id}")
self._infra_env.update_host(host_id=host_id, host_name=requested_name)
def set_additional_ntp_source(self, ntp_source: List[str]):
log.info(f"Setting Additional NTP source:{ntp_source}")
if isinstance(ntp_source, List):
ntp_source_string = ",".join(ntp_source)
elif isinstance(ntp_source, str):
ntp_source_string = ntp_source
else:
raise TypeError(
f"ntp_source must be a string or a list of strings, got: {ntp_source}," f" type: {type(ntp_source)}"
)
self.update_config(additional_ntp_source=ntp_source_string)
self.api_client.update_cluster(self.id, {"additional_ntp_source": ntp_source_string})
def patch_discovery_ignition(self, ignition):
self._infra_env.patch_discovery_ignition(ignition_info=ignition)
def set_proxy_values(self, proxy_values: models.Proxy) -> None:
log.info(f"Setting proxy values {proxy_values} for cluster: {self.id}")
self.update_config(proxy=proxy_values)
self.api_client.set_cluster_proxy(
self.id,
http_proxy=self._config.proxy.http_proxy,
https_proxy=self._config.proxy.https_proxy,
no_proxy=self._config.proxy.no_proxy,
)
@JunitTestCase()
def start_install(self):
self.api_client.install_cluster(cluster_id=self.id)
def wait_for_logs_complete(self, timeout, interval=60, check_host_logs_only=False):
logs_utils.wait_for_logs_complete(
client=self.api_client,
cluster_id=self.id,
timeout=timeout,
interval=interval,
check_host_logs_only=check_host_logs_only,
)
def wait_for_installing_in_progress(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS],
nodes_count=nodes_count,
timeout=consts.INSTALLING_IN_PROGRESS_TIMEOUT,
)
def wait_for_write_image_to_disk(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.WRITE_IMAGE_TO_DISK, consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_host_status(self, statuses, fall_on_error_status=True, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
)
def wait_for_specific_host_status(self, host, statuses, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_specific_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
statuses=statuses,
nodes_count=nodes_count,
)
def wait_for_specific_host_stage(self, host: dict, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_specific_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
)
def wait_for_cluster_in_error_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.ERROR],
timeout=consts.ERROR_TIMEOUT,
)
def wait_for_pending_for_input_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.PENDING_FOR_INPUT],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_boot_during_install(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_non_bootstrap_masters_to_reach_configuring_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.CONFIGURING],
nodes_count=num_masters - 1,
)
def wait_for_non_bootstrap_masters_to_reach_joined_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.JOINED],
nodes_count=num_masters - 1,
)
def wait_for_hosts_stage(self, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
nodes_count=self.nodes.nodes_count,
)
@JunitTestCase()
def start_install_and_wait_for_installed(
self,
wait_for_hosts=True,
wait_for_operators=True,
wait_for_cluster_install=True,
download_kubeconfig=True,
):
self.start_install()
if wait_for_hosts:
self.wait_for_hosts_to_install()
if wait_for_operators:
self.wait_for_operators_to_finish()
if wait_for_cluster_install:
self.wait_for_install()
if download_kubeconfig:
self.download_kubeconfig()
def disable_worker_hosts(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.WORKER)
for host in hosts:
self.disable_host(host)
def disable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to disable host: {host_name} in cluster: {self.id}")
self._infra_env.unbind_host(host_id=host["id"])
def enable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to enable host: {host_name} in cluster: {self.id}")
self._infra_env.bind_host(host_id=host["id"], cluster_id=self.id)
def delete_host(self, host):
host_id = host["id"]
log.info(f"Going to delete host: {host_id} in cluster: {self.id}")
self._infra_env.delete_host(host_id=host_id)
def cancel_install(self):
self.api_client.cancel_cluster_install(cluster_id=self.id)
def get_bootstrap_hostname(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.MASTER)
for host in hosts:
if host.get("bootstrap"):
log.info("Bootstrap node is: %s", host["requested_hostname"])
return host["requested_hostname"]
def get_hosts_by_role(self, role, hosts=None):
hosts = hosts or self.api_client.get_cluster_hosts(self.id)
nodes_by_role = []
for host in hosts:
if host["role"] == role:
nodes_by_role.append(host)
log.info(f"Found hosts: {nodes_by_role}, that has the role: {role}")
return nodes_by_role
def get_random_host_by_role(self, role):
return random.choice(self.get_hosts_by_role(role))
def get_reboot_required_hosts(self):
return self.api_client.get_hosts_in_statuses(
cluster_id=self.id, statuses=[consts.NodesStatus.RESETING_PENDING_USER_ACTION]
)
def reboot_required_nodes_into_iso_after_reset(self):
hosts_to_reboot = self.get_reboot_required_hosts()
self.nodes.run_for_given_nodes_by_cluster_hosts(cluster_hosts=hosts_to_reboot, func_name="reset")
def wait_for_one_host_to_be_in_wrong_boot_order(self, fall_on_error_status=True):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_be_in_reboot_timeout(self, fall_on_error_status=True, nodes_count=1):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.REBOOT_TIMEOUT,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_hosts_to_be_in_wrong_boot_order(
self, nodes_count, timeout=consts.PENDING_USER_ACTION_TIMEOUT, fall_on_error_status=True
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
nodes_count=nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_ready_to_install(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
# This code added due to BZ:1909997, temporarily checking if help to prevent unexpected failure
time.sleep(10)
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
def is_in_cancelled_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.CANCELLED]
)
def is_in_error(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR]
)
def is_finalizing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING]
)
def is_installing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING]
)
def reset_install(self):
self.api_client.reset_cluster_install(cluster_id=self.id)
def is_in_insufficient_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSUFFICIENT]
)
def wait_for_hosts_to_install(
self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True, nodes_count: int = None
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
nodes_count=nodes_count or self.nodes.nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_operators_to_finish(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True):
operators = self.get_operators()
if fall_on_error_status:
statuses = [consts.OperatorStatus.AVAILABLE]
else:
statuses = [consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED]
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.BUILTIN)),
operator_types=[OperatorType.BUILTIN],
statuses=statuses,
timeout=timeout,
fall_on_error_status=False,
)
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.OLM)),
operator_types=[OperatorType.OLM],
statuses=[consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED],
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def is_operator_in_status(self, operator_name, status):
return operators_utils.is_operator_in_status(
operators=self.get_operators(), operator_name=operator_name, status=status
)
def wait_for_install(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
timeout=timeout,
)
def _set_hostnames_and_roles(self):
cluster_id = self.id
hosts = self.to_cluster_hosts(self.api_client.get_cluster_hosts(cluster_id))
nodes = self.nodes.get_nodes(refresh=True)
for host in hosts:
if host.has_hostname():
continue
name = self.find_matching_node_name(host, nodes)
assert name is not None, (
f"Failed to find matching node for host with mac address {host.macs()}"
f" nodes: {[(n.name, n.ips, n.macs) for n in nodes]}"
)
if self.nodes.nodes_count == 1:
role = None
else:
role = consts.NodeRoles.MASTER if consts.NodeRoles.MASTER in name else consts.NodeRoles.WORKER
self._infra_env.update_host(host_id=host.get_id(), host_role=role, host_name=name)
def _ha_not_none(self):
return (
self._high_availability_mode != consts.HighAvailabilityMode.NONE
and self._config.platform != consts.Platforms.NONE
)
def download_image(self, iso_download_path: str = None) -> Path:
if self._infra_env is None:
log.warning("No infra_env found. Generating infra_env and downloading ISO")
return self.generate_and_download_infra_env(
iso_download_path=iso_download_path or self._config.iso_download_path,
iso_image_type=self._config.iso_image_type,
)
return self._infra_env.download_image(iso_download_path)
@JunitTestCase()
def prepare_for_installation(self, **kwargs):
super(Cluster, self).prepare_for_installation(**kwargs)
self.nodes.wait_for_networking()
self._set_hostnames_and_roles()
if self._high_availability_mode != consts.HighAvailabilityMode.NONE:
self.set_host_roles(len(self.nodes.get_masters()), len(self.nodes.get_workers()))
self.set_network_params(controller=self.nodes.controller)
# in case of None platform we need to specify dns records before hosts are ready
if self._config.platform == consts.Platforms.NONE:
self._configure_load_balancer()
self.nodes.controller.set_dns_for_user_managed_network()
elif self._high_availability_mode == consts.HighAvailabilityMode.NONE:
main_cidr = self.get_primary_machine_cidr()
ip = Cluster.get_ip_for_single_node(self.api_client, self.id, main_cidr)
self.nodes.controller.set_single_node_ip(ip)
self.nodes.controller.set_dns(api_vip=ip, ingress_vip=ip)
self.wait_for_ready_to_install()
# in case of regular cluster, need to set dns after vips exits
# in our case when nodes are ready, vips will be there for sure
if self._ha_not_none():
vips_info = self.__class__.get_vips_from_cluster(self.api_client, self.id)
self.nodes.controller.set_dns(api_vip=vips_info["api_vip"], ingress_vip=vips_info["ingress_vip"])
def download_kubeconfig_no_ingress(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig_no_ingress(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_kubeconfig(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_installation_logs(self, cluster_tar_path):
self.api_client.download_cluster_logs(self.id, cluster_tar_path)
def get_install_config(self):
return yaml.safe_load(self.api_client.get_cluster_install_config(self.id))
def get_admin_credentials(self):
return self.api_client.get_cluster_admin_credentials(self.id)
def register_dummy_host(self):
dummy_host_id = "b164df18-0ff1-4b85-9121-059f10f58f71"
self.api_client.register_host(self.id, dummy_host_id)
def host_get_next_step(self, host_id):
return self.api_client.host_get_next_step(self.id, host_id)
def host_post_step_result(self, host_id, step_type, step_id, exit_code, output):
self.api_client.host_post_step_result(
self.id, host_id, step_type=step_type, step_id=step_id, exit_code=exit_code, output=output
)
def host_update_install_progress(self, host_id, current_stage, progress_info=None):
self.api_client.host_update_progress(self.id, host_id, current_stage, progress_info=progress_info)
def host_complete_install(self):
self.api_client.complete_cluster_installation(cluster_id=self.id, is_success=True)
def setup_nodes(self, nodes, infra_env_config: BaseInfraEnvConfig):
self._infra_env = InfraEnv.generate(
self.api_client, infra_env_config, iso_image_type=self._config.iso_image_type
)
self._infra_env.download_image(iso_download_path=self._config.iso_download_path)
nodes.start_all()
self.wait_until_hosts_are_discovered()
return nodes.create_nodes_cluster_hosts_mapping(cluster=self)
def wait_for_cluster_validation(
self, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until cluster %s validation %s is in status %s", self.id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_cluster_validation_in_status(
validation_section=validation_section, validation_id=validation_id, statuses=statuses
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Cluster validation to be in status {statuses}",
)
except BaseException:
log.error(
"Cluster validation status is: %s",
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
),
)
raise
def is_cluster_validation_in_status(self, validation_section, validation_id, statuses):
log.info("Is cluster %s validation %s in status %s", self.id, validation_id, statuses)
try:
return (
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_host_validation(
self, host_id, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until host %s validation %s is in status %s", host_id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_host_validation_in_status(
host_id=host_id,
validation_section=validation_section,
validation_id=validation_id,
statuses=statuses,
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Host validation to be in status {statuses}",
)
except BaseException:
log.error(
"Host validation status is: %s",
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
),
)
raise
def is_host_validation_in_status(self, host_id, validation_section, validation_id, statuses):
log.info("Is host %s validation %s in status %s", host_id, validation_id, statuses)
try:
return (
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_cluster_to_be_in_installing_pending_user_action_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING_PENDING_USER_ACTION],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_cluster_to_be_in_installing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING],
timeout=consts.START_CLUSTER_INSTALLATION_TIMEOUT,
)
def wait_for_cluster_to_be_in_finalizing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.FINALIZING, consts.ClusterStatus.INSTALLED],
timeout=consts.CLUSTER_INSTALLATION_TIMEOUT,
break_statuses=[consts.ClusterStatus.ERROR],
)
def wait_for_cluster_to_be_in_status(self, statuses, timeout=consts.ERROR_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
timeout=timeout,
)
@classmethod
def reset_cluster_and_wait_for_ready(cls, cluster):
# Reset cluster install
cluster.reset_install()
assert cluster.is_in_insufficient_status()
# Reboot required nodes into ISO
cluster.reboot_required_nodes_into_iso_after_reset()
# Wait for hosts to be rediscovered
cluster.wait_until_hosts_are_discovered()
cluster.wait_for_ready_to_install()
def get_events(self, host_id="", infra_env_id=""):
warnings.warn(
"Cluster.get_events is now deprecated, use EventsHandler.get_events instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.get_events(host_id, self.id, infra_env_id)
def _configure_load_balancer(self):
main_cidr = self.get_primary_machine_cidr()
secondary_cidr = self.nodes.controller.get_provisioning_cidr()
master_ips = self.get_master_ips(self.api_client, self.id, main_cidr) + self.get_master_ips(
self.api_client, self.id, secondary_cidr
)
worker_ips = self.get_worker_ips(self.api_client, self.id, main_cidr)
load_balancer_ip = str(IPNetwork(main_cidr).ip + 1)
tf = terraform_utils.TerraformUtils(working_dir=self.nodes.controller.tf_folder)
lb_controller = LoadBalancerController(tf)
lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips)
@classmethod
def _get_namespace_index(cls, libvirt_network_if):
# Hack to retrieve namespace index - does not exist in tests
matcher = re.match(r"^tt(\d+)$", libvirt_network_if)
return int(matcher.groups()[0]) if matcher is not None else 0
def wait_for_event(self, event_to_find, reference_time, params_list=None, host_id="", infra_env_id="", timeout=10):
warnings.warn(
"Cluster.wait_for_event is now deprecated, use EventsHandler.wait_for_event instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.wait_for_event(
event_to_find, reference_time, params_list, host_id, infra_env_id, self.id, timeout
)
@staticmethod
def get_inventory_host_nics_data(host: dict, ipv4_first=True):
def get_network_interface_ip(interface):
addresses = (
interface.ipv4_addresses + interface.ipv6_addresses
if ipv4_first
else interface.ipv6_addresses + interface.ipv4_addresses
)
return addresses[0].split("/")[0] if len(addresses) > 0 else None
inventory = models.Inventory(**json.loads(host["inventory"]))
interfaces_list = [models.Interface(**interface) for interface in inventory.interfaces]
return [
{
"name": interface.name,
"model": interface.product,
"mac": interface.mac_address,
"ip": get_network_interface_ip(interface),
"speed": interface.speed_mbps,
}
for interface in interfaces_list
]
@staticmethod
def get_hosts_nics_data(hosts: list, ipv4_first=True):
return [Cluster.get_inventory_host_nics_data(h, ipv4_first=ipv4_first) for h in hosts]
@staticmethod
def get_cluster_hosts(cluster: models.cluster.Cluster) -> List[ClusterHost]:
return [ClusterHost(h) for h in cluster.hosts]
@staticmethod
def to_cluster_hosts(hosts: List[Dict[str, Any]]) -> List[ClusterHost]:
return [ClusterHost(models.Host(**h)) for h in hosts]
def get_cluster_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cidrs = set()
for host in hosts:
ips = []
if self.nodes.is_ipv4:
ips += host.ipv4_addresses()
if self.nodes.is_ipv6:
ips += host.ipv6_addresses()
for host_ip in ips:
cidr = network_utils.get_cidr_by_interface(host_ip)
cidrs.add(cidr)
return cidrs
def get_cluster_matching_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cluster_cidrs = self.get_cluster_cidrs(hosts)
matching_cidrs = set()
for cidr in cluster_cidrs:
for host in hosts:
interfaces = []
if self.nodes.is_ipv4:
interfaces += host.ipv4_addresses()
if self.nodes.is_ipv6:
interfaces += host.ipv6_addresses()
if not network_utils.any_interface_in_cidr(interfaces, cidr):
break
matching_cidrs.add(cidr)
return matching_cidrs
@staticmethod
def get_ip_for_single_node(client, cluster_id, machine_cidr, ipv4_first=True):
cluster_info = client.cluster_get(cluster_id).to_dict()
if len(cluster_info["hosts"]) == 0:
raise Exception("No host found")
network = IPNetwork(machine_cidr)
interfaces = Cluster.get_inventory_host_nics_data(cluster_info["hosts"][0], ipv4_first=ipv4_first)
for intf in interfaces:
ip = intf["ip"]
if IPAddress(ip) in network:
return ip
raise Exception("IP for single node not found")
@staticmethod
def get_ips_for_role(client, cluster_id, network, role):
cluster_info = client.cluster_get(cluster_id).to_dict()
ret = []
net = IPNetwork(network)
hosts_interfaces = Cluster.get_hosts_nics_data([h for h in cluster_info["hosts"] if h["role"] == role])
for host_interfaces in hosts_interfaces:
for intf in host_interfaces:
ip = IPAddress(intf["ip"])
if ip in net:
ret = ret + [intf["ip"]]
return ret
@staticmethod
def get_master_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.MASTER)
@staticmethod
def get_worker_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.WORKER)
@staticmethod
def get_vips_from_cluster(client, cluster_id):
cluster_info = client.cluster_get(cluster_id)
return dict(api_vip=cluster_info.api_vip, ingress_vip=cluster_info.ingress_vip)
def get_host_disks(self, host, filter=None):
hosts = self.get_hosts()
selected_host = [h for h in hosts if h["id"] == host["id"]]
disks = json.loads(selected_host[0]["inventory"])["disks"]
if not filter:
return [disk for disk in disks]
else:
return [disk for disk in disks if filter(disk)]
def get_inventory_host_ips_data(self, host: dict):
nics = self.get_inventory_host_nics_data(host)
return [nic["ip"] for nic in nics]
# needed for None platform and single node
# we need to get ip where api is running
def get_kube_api_ip(self, hosts):
for host in hosts:
for ip in self.get_inventory_host_ips_data(host):
if self.is_kubeapi_service_ready(ip):
return ip
def get_api_vip(self, cluster):
cluster = cluster or self.get_details()
api_vip = cluster.api_vip
if not api_vip and cluster.user_managed_networking:
log.info("API VIP is not set, searching for api ip on masters")
masters = self.get_hosts_by_role(consts.NodeRoles.MASTER, hosts=cluster.to_dict()["hosts"])
api_vip = self._wait_for_api_vip(masters)
log.info("api vip is %s", api_vip)
return api_vip
def _wait_for_api_vip(self, hosts, timeout=180):
"""Enable some grace time for waiting for API's availability."""
return waiting.wait(
lambda: self.get_kube_api_ip(hosts=hosts), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP"
)
def find_matching_node_name(self, host: ClusterHost, nodes: List[Node]) -> Union[str, None]:
# Looking for node matches the given host by its mac address (which is unique)
for node in nodes:
for mac in node.macs:
if mac.lower() in host.macs():
return node.name
# IPv6 static ips
if self._config.is_static_ip:
mappings = static_network.get_name_to_mac_addresses_mapping(self.nodes.controller.tf_folder)
for mac in host.macs():
for name, macs in mappings.items():
if mac in macs:
return name
return None
@staticmethod
def is_kubeapi_service_ready(ip_or_dns):
"""Validate if kube-api is ready on given address."""
with contextlib.suppress(ValueError):
# IPv6 addresses need to be surrounded with square-brackets
# to differentiate them from domain names
if ipaddress.ip_address(ip_or_dns).version == 6:
ip_or_dns = f"[{ip_or_dns}]"
try:
response = requests.get(f"https://{ip_or_dns}:6443/readyz", verify=False, timeout=1)
return response.ok
except BaseException:
return False
def wait_and_kill_installer(self, host):
# Wait for specific host to be in installing in progress
self.wait_for_specific_host_status(host=host, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS])
# Kill installer to simulate host error
selected_node = self.nodes.get_node_from_cluster_host(host)
selected_node.kill_installer()
def get_api_vip_from_cluster(api_client, cluster_info: Union[dict, models.cluster.Cluster], pull_secret):
import warnings
from tests.config import ClusterConfig, InfraEnvConfig
warnings.warn(
"Soon get_api_vip_from_cluster will be deprecated. Avoid using or adding new functionality to "
"this function. The function and solution for that case have not been determined yet. It might be "
"on another module, or as a classmethod within Cluster class."
" For more information see https://issues.redhat.com/browse/MGMT-4975",
PendingDeprecationWarning,
)
if isinstance(cluster_info, dict):
cluster_info = models.cluster.Cluster(**cluster_info)
cluster = Cluster(
api_client=api_client,
infra_env_config=InfraEnvConfig(),
config=ClusterConfig(
cluster_name=ClusterName(cluster_info.name),
pull_secret=pull_secret,
ssh_public_key=cluster_info.ssh_public_key,
cluster_id=cluster_info.id,
),
nodes=None,
)
return cluster.get_api_vip(cluster=cluster_info)
| import contextlib
import ipaddress
import json
import os
import random
import re
import time
import warnings
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Union
import requests
import test_infra.utils.waiting
import waiting
import yaml
from assisted_service_client import models
from assisted_service_client.models.operator_type import OperatorType
from junit_report import JunitTestCase
from netaddr import IPAddress, IPNetwork
from test_infra import consts, utils
from test_infra.assisted_service_api import InventoryClient
from test_infra.controllers.load_balancer_controller import LoadBalancerController
from test_infra.controllers.node_controllers import Node
from test_infra.helper_classes.cluster_host import ClusterHost
from test_infra.helper_classes.config import BaseClusterConfig, BaseInfraEnvConfig
from test_infra.helper_classes.entity import Entity
from test_infra.helper_classes.events_handler import EventsHandler
from test_infra.helper_classes.infra_env import InfraEnv
from test_infra.helper_classes.nodes import Nodes
from test_infra.tools import static_network, terraform_utils
from test_infra.utils import Path, log, logs_utils, network_utils, operators_utils
from test_infra.utils.entity_name import ClusterName
class Cluster(Entity):
MINIMUM_NODES_TO_WAIT = 1
EVENTS_THRESHOLD = 500 # TODO - remove EVENTS_THRESHOLD after removing it from kni-assisted-installer-auto
_config: BaseClusterConfig
def __init__(
self,
api_client: InventoryClient,
config: BaseClusterConfig,
infra_env_config: BaseInfraEnvConfig,
nodes: Optional[Nodes] = None,
):
super().__init__(api_client, config, nodes)
self._infra_env_config = infra_env_config
self._infra_env = None
# Update infraEnv configurations
self._infra_env_config.cluster_id = config.cluster_id
self._infra_env_config.openshift_version = self._config.openshift_version
self._infra_env_config.pull_secret = self._config.pull_secret
self._high_availability_mode = config.high_availability_mode
self.name = config.cluster_name.get()
@property
def kubeconfig_path(self):
return self._config.kubeconfig_path
@property
def iso_download_path(self):
return self._config.iso_download_path
@property
def enable_image_download(self):
return self._config.download_image
def _update_day2_config(self, api_client: InventoryClient, cluster_id: str):
day2_cluster: models.cluster.Cluster = api_client.cluster_get(cluster_id)
self.update_config(
**dict(
openshift_version=day2_cluster.openshift_version,
cluster_name=ClusterName(day2_cluster.name),
additional_ntp_source=day2_cluster.additional_ntp_source,
user_managed_networking=day2_cluster.user_managed_networking,
high_availability_mode=day2_cluster.high_availability_mode,
olm_operators=day2_cluster.monitored_operators,
base_dns_domain=day2_cluster.base_dns_domain,
vip_dhcp_allocation=day2_cluster.vip_dhcp_allocation,
)
)
def _create(self) -> str:
if self._config.cluster_id:
log.info(f"Fetching day2 cluster with id {self._config.cluster_id}")
self._update_day2_config(self.api_client, self._config.cluster_id)
return self._config.cluster_id
cluster = self.api_client.create_cluster(
self._config.cluster_name.get(),
ssh_public_key=self._config.ssh_public_key,
openshift_version=self._config.openshift_version,
pull_secret=self._config.pull_secret,
base_dns_domain=self._config.base_dns_domain,
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
additional_ntp_source=self._config.additional_ntp_source,
user_managed_networking=self._config.user_managed_networking,
high_availability_mode=self._config.high_availability_mode,
olm_operators=[{"name": name} for name in self._config.olm_operators],
network_type=self._config.network_type,
)
self._config.cluster_id = cluster.id
return cluster.id
def delete(self):
self.api_client.delete_cluster(self.id)
def get_details(self):
return self.api_client.cluster_get(self.id)
def get_cluster_name(self):
return self.get_details().name
def get_hosts(self):
return self.api_client.get_cluster_hosts(self.id)
def get_host_ids(self):
return [host["id"] for host in self.get_hosts()]
def get_host_ids_names_mapping(self):
return {host["id"]: host["requested_hostname"] for host in self.get_hosts()}
def get_host_assigned_roles(self):
hosts = self.get_hosts()
return {h["id"]: h["role"] for h in hosts}
def get_operators(self):
return self.api_client.get_cluster_operators(self.id)
# TODO remove in favor of generate_infra_env
def generate_image(self):
warnings.warn("generate_image is deprecated. Use generate_infra_env instead.", DeprecationWarning)
self.api_client.generate_image(cluster_id=self.id, ssh_key=self._config.ssh_public_key)
def generate_infra_env(
self, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None
) -> InfraEnv:
self._infra_env_config.ssh_public_key = ssh_key or self._config.ssh_public_key
self._infra_env_config.iso_image_type = iso_image_type or self._config.iso_image_type
self._infra_env_config.static_network_config = static_network_config
self._infra_env_config.ignition_config_override = ignition_info
self._infra_env_config.proxy = proxy or self._config.proxy
infra_env = InfraEnv(api_client=self.api_client, config=self._infra_env_config)
self._infra_env = infra_env
return infra_env
def update_infra_env_proxy(self, proxy: models.Proxy) -> None:
self._infra_env_config.proxy = proxy
self._infra_env.update_proxy(proxy=proxy)
def download_infra_env_image(self, iso_download_path=None) -> Path:
iso_download_path = iso_download_path or self._config.iso_download_path
return self._infra_env.download_image(iso_download_path=iso_download_path)
@JunitTestCase()
def generate_and_download_infra_env(
self,
iso_download_path=None,
static_network_config=None,
iso_image_type=None,
ssh_key=None,
ignition_info=None,
proxy=None,
) -> Path:
if self._config.is_static_ip and static_network_config is None:
static_network_config = static_network.generate_static_network_data_from_tf(self.nodes.controller.tf_folder)
self.generate_infra_env(
static_network_config=static_network_config,
iso_image_type=iso_image_type,
ssh_key=ssh_key,
ignition_info=ignition_info,
proxy=proxy,
)
return self.download_infra_env_image(iso_download_path=iso_download_path or self._config.iso_download_path)
@JunitTestCase()
def generate_and_download_image(
self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None
):
warnings.warn(
"generate_and_download_image is deprecated. Use generate_and_download_infra_env instead.",
DeprecationWarning,
)
iso_download_path = iso_download_path or self._config.iso_download_path
# ensure file path exists before downloading
if not os.path.exists(iso_download_path):
utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False)
self.api_client.generate_and_download_image(
cluster_id=self.id,
ssh_key=ssh_key or self._config.ssh_public_key,
image_path=iso_download_path,
image_type=iso_image_type or self._config.iso_image_type,
static_network_config=static_network_config,
)
def wait_until_hosts_are_disconnected(self, nodes_count: int = None):
statuses = [consts.NodesStatus.DISCONNECTED]
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.DISCONNECTED_TIMEOUT,
)
@JunitTestCase()
def wait_until_hosts_are_discovered(self, allow_insufficient=False, nodes_count: int = None):
statuses = [consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN]
if allow_insufficient:
statuses.append(consts.NodesStatus.INSUFFICIENT)
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.NODES_REGISTERED_TIMEOUT,
)
def _get_matching_hosts(self, host_type, count):
hosts = self.get_hosts()
return [{"id": h["id"], "role": host_type} for h in hosts if host_type in h["requested_hostname"]][:count]
def set_cluster_name(self, cluster_name: str):
log.info(f"Setting Cluster Name:{cluster_name} for cluster: {self.id}")
self.update_config(cluster_name=ClusterName(prefix=cluster_name, suffix=None))
self.api_client.update_cluster(self.id, {"name": cluster_name})
def select_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None:
self._infra_env.select_host_installation_disk(host_id=host_id, disk_paths=disk_paths)
def set_ocs(self, properties=None):
self.set_olm_operator(consts.OperatorType.OCS, properties=properties)
def set_cnv(self, properties=None):
self.set_olm_operator(consts.OperatorType.CNV, properties=properties)
def unset_ocs(self):
self.unset_olm_operator(consts.OperatorType.OCS)
def unset_cnv(self):
self.unset_olm_operator(consts.OperatorType.CNV)
def unset_olm_operator(self, operator_name):
log.info(f"Unsetting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
olm_operators = []
for operator in cluster.monitored_operators:
if operator.name == operator_name or operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_olm_operator(self, operator_name, properties=None):
log.info(f"Setting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
if operator_name in [o.name for o in cluster.monitored_operators]:
return
olm_operators = []
for operator in cluster.monitored_operators:
if operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
olm_operators.append({"name": operator_name, "properties": properties})
self._config.olm_operators = olm_operators
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_host_roles(self, num_masters: int = None, num_workers: int = None, requested_roles=None):
if requested_roles is None:
requested_roles = Counter(
master=num_masters or self.nodes.masters_count, worker=num_workers or self.nodes.workers_count
)
assigned_roles = self._get_matching_hosts(host_type=consts.NodeRoles.MASTER, count=requested_roles["master"])
assigned_roles.extend(
self._get_matching_hosts(host_type=consts.NodeRoles.WORKER, count=requested_roles["worker"])
)
for role in assigned_roles:
self._infra_env.update_host(host_id=role["id"], host_role=role["role"])
return assigned_roles
def set_specific_host_role(self, host, role):
self._infra_env.update_host(host_id=host["id"], host_role=role)
def set_network_params(self, controller=None):
# Controller argument is here only for backward compatibility TODO - Remove after QE refactor all e2e tests
controller = controller or self.nodes.controller # TODO - Remove after QE refactor all e2e tests
if self._config.platform == consts.Platforms.NONE:
log.info("On None platform, leaving network management to the user")
api_vip = ingress_vip = machine_networks = None
elif self._config.vip_dhcp_allocation or self._high_availability_mode == consts.HighAvailabilityMode.NONE:
log.info("Letting access VIPs be deducted from machine networks")
api_vip = ingress_vip = None
machine_networks = self.get_machine_networks()
else:
log.info("Assigning VIPs statically")
access_vips = controller.get_ingress_and_api_vips()
api_vip = access_vips["api_vip"]
ingress_vip = access_vips["ingress_vip"]
machine_networks = None
self.set_advanced_networking(
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
cluster_networks=self._config.cluster_networks,
service_networks=self._config.service_networks,
machine_networks=machine_networks,
api_vip=api_vip,
ingress_vip=ingress_vip,
)
# TODO: when assisted-service supports configuring dual-stack networks on one go,
# change it so that we call set_advanced_networking only once
if self._config.is_ipv4 and self._config.is_ipv6:
machine_networks = controller.get_all_machine_addresses()
self.set_advanced_networking(machine_networks=machine_networks)
def get_primary_machine_cidr(self):
cidr = self.nodes.controller.get_primary_machine_cidr()
if not cidr:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
matching_cidrs = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not matching_cidrs:
raise RuntimeError("No matching cidr for DHCP")
cidr = next(iter(matching_cidrs))
return cidr
def get_machine_networks(self):
networks = []
primary_machine_cidr = self.nodes.controller.get_primary_machine_cidr()
if primary_machine_cidr:
networks.append(primary_machine_cidr)
secondary_machine_cidr = self.nodes.controller.get_provisioning_cidr()
if secondary_machine_cidr:
networks.append(secondary_machine_cidr)
if not networks:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
networks = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not networks:
raise RuntimeError("No matching cidr for DHCP")
return networks
def set_ingress_and_api_vips(self, vips):
log.info(f"Setting API VIP:{vips['api_vip']} and ingress VIP:{vips['ingress_vip']} for cluster: {self.id}")
self.api_client.update_cluster(self.id, vips)
def set_ssh_key(self, ssh_key: str):
log.info(f"Setting SSH key:{ssh_key} for cluster: {self.id}")
self.update_config(ssh_public_key=ssh_key)
self.api_client.update_cluster(self.id, {"ssh_public_key": ssh_key})
def set_base_dns_domain(self, base_dns_domain: str):
log.info(f"Setting base DNS domain:{base_dns_domain} for cluster: {self.id}")
self.update_config(base_dns_domain=base_dns_domain)
self.api_client.update_cluster(self.id, {"base_dns_domain": base_dns_domain})
def set_advanced_networking(
self,
vip_dhcp_allocation: Optional[bool] = None,
cluster_networks: Optional[List[models.ClusterNetwork]] = None,
service_networks: Optional[List[models.ServiceNetwork]] = None,
machine_networks: Optional[List[models.MachineNetwork]] = None,
api_vip: Optional[str] = None,
ingress_vip: Optional[str] = None,
):
if machine_networks is None:
machine_networks = self._config.machine_networks
else:
machine_networks = [models.MachineNetwork(cidr=cidr) for cidr in machine_networks]
if vip_dhcp_allocation is None:
vip_dhcp_allocation = self._config.vip_dhcp_allocation
advanced_networking = {
"vip_dhcp_allocation": vip_dhcp_allocation,
"cluster_networks": cluster_networks if cluster_networks is not None else self._config.cluster_networks,
"service_networks": service_networks if service_networks is not None else self._config.service_networks,
"machine_networks": machine_networks,
"api_vip": api_vip if api_vip is not None else self._config.api_vip,
"ingress_vip": ingress_vip if ingress_vip is not None else self._config.ingress_vip,
}
log.info(f"Updating advanced networking with {advanced_networking} for cluster: {self.id}")
self.update_config(**advanced_networking)
self.api_client.update_cluster(self.id, advanced_networking)
def set_pull_secret(self, pull_secret: str):
log.info(f"Setting pull secret:{pull_secret} for cluster: {self.id}")
self.update_config(pull_secret=pull_secret)
self.api_client.update_cluster(self.id, {"pull_secret": pull_secret})
def set_host_name(self, host_id, requested_name):
log.info(f"Setting Required Host Name:{requested_name}, for Host ID: {host_id}")
self._infra_env.update_host(host_id=host_id, host_name=requested_name)
def set_additional_ntp_source(self, ntp_source: List[str]):
log.info(f"Setting Additional NTP source:{ntp_source}")
if isinstance(ntp_source, List):
ntp_source_string = ",".join(ntp_source)
elif isinstance(ntp_source, str):
ntp_source_string = ntp_source
else:
raise TypeError(
f"ntp_source must be a string or a list of strings, got: {ntp_source}," f" type: {type(ntp_source)}"
)
self.update_config(additional_ntp_source=ntp_source_string)
self.api_client.update_cluster(self.id, {"additional_ntp_source": ntp_source_string})
def patch_discovery_ignition(self, ignition):
self._infra_env.patch_discovery_ignition(ignition_info=ignition)
def set_proxy_values(self, proxy_values: models.Proxy) -> None:
log.info(f"Setting proxy values {proxy_values} for cluster: {self.id}")
self.update_config(proxy=proxy_values)
self.api_client.set_cluster_proxy(
self.id,
http_proxy=self._config.proxy.http_proxy,
https_proxy=self._config.proxy.https_proxy,
no_proxy=self._config.proxy.no_proxy,
)
@JunitTestCase()
def start_install(self):
self.api_client.install_cluster(cluster_id=self.id)
def wait_for_logs_complete(self, timeout, interval=60, check_host_logs_only=False):
logs_utils.wait_for_logs_complete(
client=self.api_client,
cluster_id=self.id,
timeout=timeout,
interval=interval,
check_host_logs_only=check_host_logs_only,
)
def wait_for_installing_in_progress(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS],
nodes_count=nodes_count,
timeout=consts.INSTALLING_IN_PROGRESS_TIMEOUT,
)
def wait_for_write_image_to_disk(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.WRITE_IMAGE_TO_DISK, consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_host_status(self, statuses, fall_on_error_status=True, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
)
def wait_for_specific_host_status(self, host, statuses, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_specific_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
statuses=statuses,
nodes_count=nodes_count,
)
def wait_for_specific_host_stage(self, host: dict, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_specific_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
)
def wait_for_cluster_in_error_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.ERROR],
timeout=consts.ERROR_TIMEOUT,
)
def wait_for_pending_for_input_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.PENDING_FOR_INPUT],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_boot_during_install(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_non_bootstrap_masters_to_reach_configuring_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.CONFIGURING],
nodes_count=num_masters - 1,
)
def wait_for_non_bootstrap_masters_to_reach_joined_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.JOINED],
nodes_count=num_masters - 1,
)
def wait_for_hosts_stage(self, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
nodes_count=self.nodes.nodes_count,
)
@JunitTestCase()
def start_install_and_wait_for_installed(
self,
wait_for_hosts=True,
wait_for_operators=True,
wait_for_cluster_install=True,
download_kubeconfig=True,
):
self.start_install()
if wait_for_hosts:
self.wait_for_hosts_to_install()
if wait_for_operators:
self.wait_for_operators_to_finish()
if wait_for_cluster_install:
self.wait_for_install()
if download_kubeconfig:
self.download_kubeconfig()
def disable_worker_hosts(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.WORKER)
for host in hosts:
self.disable_host(host)
def disable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to disable host: {host_name} in cluster: {self.id}")
self._infra_env.unbind_host(host_id=host["id"])
def enable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to enable host: {host_name} in cluster: {self.id}")
self._infra_env.bind_host(host_id=host["id"], cluster_id=self.id)
def delete_host(self, host):
host_id = host["id"]
log.info(f"Going to delete host: {host_id} in cluster: {self.id}")
self._infra_env.delete_host(host_id=host_id)
def cancel_install(self):
self.api_client.cancel_cluster_install(cluster_id=self.id)
def get_bootstrap_hostname(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.MASTER)
for host in hosts:
if host.get("bootstrap"):
log.info("Bootstrap node is: %s", host["requested_hostname"])
return host["requested_hostname"]
def get_hosts_by_role(self, role, hosts=None):
hosts = hosts or self.api_client.get_cluster_hosts(self.id)
nodes_by_role = []
for host in hosts:
if host["role"] == role:
nodes_by_role.append(host)
log.info(f"Found hosts: {nodes_by_role}, that has the role: {role}")
return nodes_by_role
def get_random_host_by_role(self, role):
return random.choice(self.get_hosts_by_role(role))
def get_reboot_required_hosts(self):
return self.api_client.get_hosts_in_statuses(
cluster_id=self.id, statuses=[consts.NodesStatus.RESETING_PENDING_USER_ACTION]
)
def reboot_required_nodes_into_iso_after_reset(self):
hosts_to_reboot = self.get_reboot_required_hosts()
self.nodes.run_for_given_nodes_by_cluster_hosts(cluster_hosts=hosts_to_reboot, func_name="reset")
def wait_for_one_host_to_be_in_wrong_boot_order(self, fall_on_error_status=True):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_be_in_reboot_timeout(self, fall_on_error_status=True, nodes_count=1):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.REBOOT_TIMEOUT,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_hosts_to_be_in_wrong_boot_order(
self, nodes_count, timeout=consts.PENDING_USER_ACTION_TIMEOUT, fall_on_error_status=True
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
nodes_count=nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_ready_to_install(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
# This code added due to BZ:1909997, temporarily checking if help to prevent unexpected failure
time.sleep(10)
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
def is_in_cancelled_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.CANCELLED]
)
def is_in_error(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR]
)
def is_finalizing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING]
)
def is_installing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING]
)
def reset_install(self):
self.api_client.reset_cluster_install(cluster_id=self.id)
def is_in_insufficient_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSUFFICIENT]
)
def wait_for_hosts_to_install(
self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True, nodes_count: int = None
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
nodes_count=nodes_count or self.nodes.nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_operators_to_finish(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True):
operators = self.get_operators()
if fall_on_error_status:
statuses = [consts.OperatorStatus.AVAILABLE]
else:
statuses = [consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED]
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.BUILTIN)),
operator_types=[OperatorType.BUILTIN],
statuses=statuses,
timeout=timeout,
fall_on_error_status=False,
)
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.OLM)),
operator_types=[OperatorType.OLM],
statuses=[consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED],
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def is_operator_in_status(self, operator_name, status):
return operators_utils.is_operator_in_status(
operators=self.get_operators(), operator_name=operator_name, status=status
)
def wait_for_install(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
timeout=timeout,
)
def _set_hostnames_and_roles(self):
cluster_id = self.id
hosts = self.to_cluster_hosts(self.api_client.get_cluster_hosts(cluster_id))
nodes = self.nodes.get_nodes(refresh=True)
for host in hosts:
if host.has_hostname():
continue
name = self.find_matching_node_name(host, nodes)
assert name is not None, (
f"Failed to find matching node for host with mac address {host.macs()}"
f" nodes: {[(n.name, n.ips, n.macs) for n in nodes]}"
)
if self.nodes.nodes_count == 1:
role = None
else:
role = consts.NodeRoles.MASTER if consts.NodeRoles.MASTER in name else consts.NodeRoles.WORKER
self._infra_env.update_host(host_id=host.get_id(), host_role=role, host_name=name)
def _ha_not_none(self):
return (
self._high_availability_mode != consts.HighAvailabilityMode.NONE
and self._config.platform != consts.Platforms.NONE
)
def download_image(self, iso_download_path: str = None) -> Path:
if self._infra_env is None:
log.warning("No infra_env found. Generating infra_env and downloading ISO")
return self.generate_and_download_infra_env(
iso_download_path=iso_download_path or self._config.iso_download_path,
iso_image_type=self._config.iso_image_type,
)
return self._infra_env.download_image(iso_download_path)
@JunitTestCase()
def prepare_for_installation(self, **kwargs):
super(Cluster, self).prepare_for_installation(**kwargs)
self.nodes.wait_for_networking()
self._set_hostnames_and_roles()
if self._high_availability_mode != consts.HighAvailabilityMode.NONE:
self.set_host_roles(len(self.nodes.get_masters()), len(self.nodes.get_workers()))
self.set_network_params(controller=self.nodes.controller)
# in case of None platform we need to specify dns records before hosts are ready
if self._config.platform == consts.Platforms.NONE:
self._configure_load_balancer()
self.nodes.controller.set_dns_for_user_managed_network()
elif self._high_availability_mode == consts.HighAvailabilityMode.NONE:
main_cidr = self.get_primary_machine_cidr()
ip = Cluster.get_ip_for_single_node(self.api_client, self.id, main_cidr)
self.nodes.controller.set_single_node_ip(ip)
self.nodes.controller.set_dns(api_vip=ip, ingress_vip=ip)
self.wait_for_ready_to_install()
# in case of regular cluster, need to set dns after vips exits
# in our case when nodes are ready, vips will be there for sure
if self._ha_not_none():
vips_info = self.__class__.get_vips_from_cluster(self.api_client, self.id)
self.nodes.controller.set_dns(api_vip=vips_info["api_vip"], ingress_vip=vips_info["ingress_vip"])
def download_kubeconfig_no_ingress(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig_no_ingress(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_kubeconfig(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_installation_logs(self, cluster_tar_path):
self.api_client.download_cluster_logs(self.id, cluster_tar_path)
def get_install_config(self):
return yaml.safe_load(self.api_client.get_cluster_install_config(self.id))
def get_admin_credentials(self):
return self.api_client.get_cluster_admin_credentials(self.id)
def register_dummy_host(self):
dummy_host_id = "b164df18-0ff1-4b85-9121-059f10f58f71"
self.api_client.register_host(self.id, dummy_host_id)
def host_get_next_step(self, host_id):
return self.api_client.host_get_next_step(self.id, host_id)
def host_post_step_result(self, host_id, step_type, step_id, exit_code, output):
self.api_client.host_post_step_result(
self.id, host_id, step_type=step_type, step_id=step_id, exit_code=exit_code, output=output
)
def host_update_install_progress(self, host_id, current_stage, progress_info=None):
self.api_client.host_update_progress(self.id, host_id, current_stage, progress_info=progress_info)
def host_complete_install(self):
self.api_client.complete_cluster_installation(cluster_id=self.id, is_success=True)
def setup_nodes(self, nodes, infra_env_config: BaseInfraEnvConfig):
self._infra_env = InfraEnv.generate(
self.api_client, infra_env_config, iso_image_type=self._config.iso_image_type
)
self._infra_env.download_image(iso_download_path=self._config.iso_download_path)
nodes.start_all()
self.wait_until_hosts_are_discovered()
return nodes.create_nodes_cluster_hosts_mapping(cluster=self)
def wait_for_cluster_validation(
self, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until cluster %s validation %s is in status %s", self.id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_cluster_validation_in_status(
validation_section=validation_section, validation_id=validation_id, statuses=statuses
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Cluster validation to be in status {statuses}",
)
except BaseException:
log.error(
"Cluster validation status is: %s",
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
),
)
raise
def is_cluster_validation_in_status(self, validation_section, validation_id, statuses):
log.info("Is cluster %s validation %s in status %s", self.id, validation_id, statuses)
try:
return (
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_host_validation(
self, host_id, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until host %s validation %s is in status %s", host_id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_host_validation_in_status(
host_id=host_id,
validation_section=validation_section,
validation_id=validation_id,
statuses=statuses,
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Host validation to be in status {statuses}",
)
except BaseException:
log.error(
"Host validation status is: %s",
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
),
)
raise
def is_host_validation_in_status(self, host_id, validation_section, validation_id, statuses):
log.info("Is host %s validation %s in status %s", host_id, validation_id, statuses)
try:
return (
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_cluster_to_be_in_installing_pending_user_action_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING_PENDING_USER_ACTION],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_cluster_to_be_in_installing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING],
timeout=consts.START_CLUSTER_INSTALLATION_TIMEOUT,
)
def wait_for_cluster_to_be_in_finalizing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.FINALIZING, consts.ClusterStatus.INSTALLED],
timeout=consts.CLUSTER_INSTALLATION_TIMEOUT,
break_statuses=[consts.ClusterStatus.ERROR],
)
def wait_for_cluster_to_be_in_status(self, statuses, timeout=consts.ERROR_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
timeout=timeout,
)
@classmethod
def reset_cluster_and_wait_for_ready(cls, cluster):
# Reset cluster install
cluster.reset_install()
assert cluster.is_in_insufficient_status()
# Reboot required nodes into ISO
cluster.reboot_required_nodes_into_iso_after_reset()
# Wait for hosts to be rediscovered
cluster.wait_until_hosts_are_discovered()
cluster.wait_for_ready_to_install()
def get_events(self, host_id="", infra_env_id=""):
warnings.warn(
"Cluster.get_events is now deprecated, use EventsHandler.get_events instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.get_events(host_id, self.id, infra_env_id)
def _configure_load_balancer(self):
main_cidr = self.get_primary_machine_cidr()
secondary_cidr = self.nodes.controller.get_provisioning_cidr()
master_ips = self.get_master_ips(self.api_client, self.id, main_cidr) + self.get_master_ips(
self.api_client, self.id, secondary_cidr
)
worker_ips = self.get_worker_ips(self.api_client, self.id, main_cidr)
load_balancer_ip = str(IPNetwork(main_cidr).ip + 1)
tf = terraform_utils.TerraformUtils(working_dir=self.nodes.controller.tf_folder)
lb_controller = LoadBalancerController(tf)
lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips)
@classmethod
def _get_namespace_index(cls, libvirt_network_if):
# Hack to retrieve namespace index - does not exist in tests
matcher = re.match(r"^tt(\d+)$", libvirt_network_if)
return int(matcher.groups()[0]) if matcher is not None else 0
def wait_for_event(self, event_to_find, reference_time, params_list=None, host_id="", infra_env_id="", timeout=10):
warnings.warn(
"Cluster.wait_for_event is now deprecated, use EventsHandler.wait_for_event instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.wait_for_event(
event_to_find, reference_time, params_list, host_id, infra_env_id, self.id, timeout
)
@staticmethod
def get_inventory_host_nics_data(host: dict, ipv4_first=True):
def get_network_interface_ip(interface):
addresses = (
interface.ipv4_addresses + interface.ipv6_addresses
if ipv4_first
else interface.ipv6_addresses + interface.ipv4_addresses
)
return addresses[0].split("/")[0] if len(addresses) > 0 else None
inventory = models.Inventory(**json.loads(host["inventory"]))
interfaces_list = [models.Interface(**interface) for interface in inventory.interfaces]
return [
{
"name": interface.name,
"model": interface.product,
"mac": interface.mac_address,
"ip": get_network_interface_ip(interface),
"speed": interface.speed_mbps,
}
for interface in interfaces_list
]
@staticmethod
def get_hosts_nics_data(hosts: list, ipv4_first=True):
return [Cluster.get_inventory_host_nics_data(h, ipv4_first=ipv4_first) for h in hosts]
@staticmethod
def get_cluster_hosts(cluster: models.cluster.Cluster) -> List[ClusterHost]:
return [ClusterHost(h) for h in cluster.hosts]
@staticmethod
def to_cluster_hosts(hosts: List[Dict[str, Any]]) -> List[ClusterHost]:
return [ClusterHost(models.Host(**h)) for h in hosts]
def get_cluster_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cidrs = set()
for host in hosts:
ips = []
if self.nodes.is_ipv4:
ips += host.ipv4_addresses()
if self.nodes.is_ipv6:
ips += host.ipv6_addresses()
for host_ip in ips:
cidr = network_utils.get_cidr_by_interface(host_ip)
cidrs.add(cidr)
return cidrs
def get_cluster_matching_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cluster_cidrs = self.get_cluster_cidrs(hosts)
matching_cidrs = set()
for cidr in cluster_cidrs:
for host in hosts:
interfaces = []
if self.nodes.is_ipv4:
interfaces += host.ipv4_addresses()
if self.nodes.is_ipv6:
interfaces += host.ipv6_addresses()
if not network_utils.any_interface_in_cidr(interfaces, cidr):
break
matching_cidrs.add(cidr)
return matching_cidrs
@staticmethod
def get_ip_for_single_node(client, cluster_id, machine_cidr, ipv4_first=True):
cluster_info = client.cluster_get(cluster_id).to_dict()
if len(cluster_info["hosts"]) == 0:
raise Exception("No host found")
network = IPNetwork(machine_cidr)
interfaces = Cluster.get_inventory_host_nics_data(cluster_info["hosts"][0], ipv4_first=ipv4_first)
for intf in interfaces:
ip = intf["ip"]
if IPAddress(ip) in network:
return ip
raise Exception("IP for single node not found")
@staticmethod
def get_ips_for_role(client, cluster_id, network, role):
cluster_info = client.cluster_get(cluster_id).to_dict()
ret = []
net = IPNetwork(network)
hosts_interfaces = Cluster.get_hosts_nics_data([h for h in cluster_info["hosts"] if h["role"] == role])
for host_interfaces in hosts_interfaces:
for intf in host_interfaces:
ip = IPAddress(intf["ip"])
if ip in net:
ret = ret + [intf["ip"]]
return ret
@staticmethod
def get_master_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.MASTER)
@staticmethod
def get_worker_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.WORKER)
@staticmethod
def get_vips_from_cluster(client, cluster_id):
cluster_info = client.cluster_get(cluster_id)
return dict(api_vip=cluster_info.api_vip, ingress_vip=cluster_info.ingress_vip)
def get_host_disks(self, host, filter=None):
hosts = self.get_hosts()
selected_host = [h for h in hosts if h["id"] == host["id"]]
disks = json.loads(selected_host[0]["inventory"])["disks"]
if not filter:
return [disk for disk in disks]
else:
return [disk for disk in disks if filter(disk)]
def get_inventory_host_ips_data(self, host: dict):
nics = self.get_inventory_host_nics_data(host)
return [nic["ip"] for nic in nics]
# needed for None platform and single node
# we need to get ip where api is running
def get_kube_api_ip(self, hosts):
for host in hosts:
for ip in self.get_inventory_host_ips_data(host):
if self.is_kubeapi_service_ready(ip):
return ip
def get_api_vip(self, cluster):
cluster = cluster or self.get_details()
api_vip = cluster.api_vip
if not api_vip and cluster.user_managed_networking:
log.info("API VIP is not set, searching for api ip on masters")
masters = self.get_hosts_by_role(consts.NodeRoles.MASTER, hosts=cluster.to_dict()["hosts"])
api_vip = self._wait_for_api_vip(masters)
log.info("api vip is %s", api_vip)
return api_vip
def _wait_for_api_vip(self, hosts, timeout=180):
"""Enable some grace time for waiting for API's availability."""
return waiting.wait(
lambda: self.get_kube_api_ip(hosts=hosts), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP"
)
def find_matching_node_name(self, host: ClusterHost, nodes: List[Node]) -> Union[str, None]:
# Looking for node matches the given host by its mac address (which is unique)
for node in nodes:
for mac in node.macs:
if mac.lower() in host.macs():
return node.name
# IPv6 static ips
if self._config.is_static_ip:
mappings = static_network.get_name_to_mac_addresses_mapping(self.nodes.controller.tf_folder)
for mac in host.macs():
for name, macs in mappings.items():
if mac in macs:
return name
return None
@staticmethod
def is_kubeapi_service_ready(ip_or_dns):
"""Validate if kube-api is ready on given address."""
with contextlib.suppress(ValueError):
# IPv6 addresses need to be surrounded with square-brackets
# to differentiate them from domain names
if ipaddress.ip_address(ip_or_dns).version == 6:
ip_or_dns = f"[{ip_or_dns}]"
try:
response = requests.get(f"https://{ip_or_dns}:6443/readyz", verify=False, timeout=1)
return response.ok
except BaseException:
return False
def wait_and_kill_installer(self, host):
# Wait for specific host to be in installing in progress
self.wait_for_specific_host_status(host=host, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS])
# Kill installer to simulate host error
selected_node = self.nodes.get_node_from_cluster_host(host)
selected_node.kill_installer()
def get_api_vip_from_cluster(api_client, cluster_info: Union[dict, models.cluster.Cluster], pull_secret):
import warnings
from tests.config import ClusterConfig, InfraEnvConfig
warnings.warn(
"Soon get_api_vip_from_cluster will be deprecated. Avoid using or adding new functionality to "
"this function. The function and solution for that case have not been determined yet. It might be "
"on another module, or as a classmethod within Cluster class."
" For more information see https://issues.redhat.com/browse/MGMT-4975",
PendingDeprecationWarning,
)
if isinstance(cluster_info, dict):
cluster_info = models.cluster.Cluster(**cluster_info)
cluster = Cluster(
api_client=api_client,
infra_env_config=InfraEnvConfig(),
config=ClusterConfig(
cluster_name=ClusterName(cluster_info.name),
pull_secret=pull_secret,
ssh_public_key=cluster_info.ssh_public_key,
cluster_id=cluster_info.id,
),
nodes=None,
)
return cluster.get_api_vip(cluster=cluster_info)
|
import codecs
import csv
import datetime
import logging
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from todo.models import Task, TaskList
log = logging.getLogger(__name__)
class CSVImporter:
"""Core upsert functionality for CSV import, for re-use by `import_csv` management command, web UI and tests.
Supplies a detailed log of what was and was not imported at the end. See README for usage notes.
"""
def __init__(self):
self.errors = []
self.upserts = []
self.summaries = []
self.line_count = 0
self.upsert_count = 0
def upsert(self, fileobj, as_string_obj=False):
"""Expects a file *object*, not a file path. This is important because this has to work for both
the management command and the web uploader; the web uploader will pass in in-memory file
with no path!
Header row is:
Title, Group, Task List, Created Date, Due Date, Completed, Created By, Assigned To, Note, Priority
"""
if as_string_obj:
# fileobj comes from mgmt command
csv_reader = csv.DictReader(fileobj)
else:
# fileobj comes from browser upload (in-memory)
csv_reader = csv.DictReader(codecs.iterdecode(fileobj, "utf-8"))
# DI check: Do we have expected header row?
header = csv_reader.fieldnames
expected = [
"Title",
"Group",
"Task List",
"Created By",
"Created Date",
"Due Date",
"Completed",
"Assigned To",
"Note",
"Priority",
]
if header != expected:
self.errors.append(
f"Inbound data does not have expected columns.\nShould be: {expected}"
)
return
for row in csv_reader:
self.line_count += 1
newrow = self.validate_row(row)
if newrow:
# newrow at this point is fully validated, and all FK relations exist,
# e.g. `newrow.get("Assigned To")`, is a Django User instance.
assignee = newrow.get("Assigned To") if newrow.get("Assigned To") else None
created_at = (
newrow.get("Created Date")
if newrow.get("Created Date")
else datetime.datetime.today()
)
due_date = newrow.get("Due Date") if newrow.get("Due Date") else None
priority = newrow.get("Priority") if newrow.get("Priority") else None
obj, created = Task.objects.update_or_create(
created_by=newrow.get("Created By"),
task_list=newrow.get("Task List"),
title=newrow.get("Title"),
defaults={
"assigned_to": assignee,
"completed": newrow.get("Completed"),
"created_at": created_at,
"due_date": due_date,
"note": newrow.get("Note"),
"priority": priority,
},
)
self.upsert_count += 1
msg = (
f'Upserted task {obj.id}: "{obj.title}"'
f' in list "{obj.task_list}" (group "{obj.task_list.group}")'
)
self.upserts.append(msg)
self.summaries.append(f"Processed {self.line_count} CSV rows")
self.summaries.append(f"Upserted {self.upsert_count} rows")
self.summaries.append(f"Skipped {self.line_count - self.upsert_count} rows")
return {"summaries": self.summaries, "upserts": self.upserts, "errors": self.errors}
def validate_row(self, row):
"""Perform data integrity checks and set default values. Returns a valid object for insertion, or False.
Errors are stored for later display. Intentionally not broken up into separate validator functions because
there are interdpendencies, such as checking for existing `creator` in one place and then using
that creator for group membership check in others."""
row_errors = []
# #######################
# Task creator must exist
if not row.get("Created By"):
msg = f"Missing required task creator."
row_errors.append(msg)
creator = get_user_model().objects.filter(username=row.get("Created By")).first()
if not creator:
msg = f"Invalid task creator {row.get("Created By")}"
row_errors.append(msg)
# #######################
# If specified, Assignee must exist
assignee = None # Perfectly valid
if row.get("Assigned To"):
assigned = get_user_model().objects.filter(username=row.get("Assigned To"))
if assigned.exists():
assignee = assigned.first()
else:
msg = f"Missing or invalid task assignee {row.get("Assigned To")}"
row_errors.append(msg)
# #######################
# Group must exist
try:
target_group = Group.objects.get(name=row.get("Group"))
except Group.DoesNotExist:
msg = f"Could not find group {row.get("Group")}."
row_errors.append(msg)
target_group = None
# #######################
# Task creator must be in the target group
if creator and target_group not in creator.groups.all():
msg = f"{creator} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Assignee must be in the target group
if assignee and target_group not in assignee.groups.all():
msg = f"{assignee} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Task list must exist in the target group
try:
tasklist = TaskList.objects.get(name=row.get("Task List"), group=target_group)
row["Task List"] = tasklist
except TaskList.DoesNotExist:
msg = f"Task list {row.get("Task List")} in group {target_group} does not exist"
row_errors.append(msg)
# #######################
# Validate Dates
datefields = ["Due Date", "Created Date"]
for datefield in datefields:
datestring = row.get(datefield)
if datestring:
valid_date = self.validate_date(datestring)
if valid_date:
row[datefield] = valid_date
else:
msg = f"Could not convert {datefield} {datestring} to valid date instance"
row_errors.append(msg)
# #######################
# Group membership checks have passed
row["Created By"] = creator
row["Group"] = target_group
if assignee:
row["Assigned To"] = assignee
# Set Completed
row["Completed"] = row["Completed"] == "Yes"
# #######################
if row_errors:
self.errors.append({self.line_count: row_errors})
return False
# No errors:
return row
def validate_date(self, datestring):
"""Inbound date string from CSV translates to a valid python date."""
try:
date_obj = datetime.datetime.strptime(datestring, "%Y-%m-%d")
return date_obj
except ValueError:
return False
| import codecs
import csv
import datetime
import logging
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from todo.models import Task, TaskList
log = logging.getLogger(__name__)
class CSVImporter:
"""Core upsert functionality for CSV import, for re-use by `import_csv` management command, web UI and tests.
Supplies a detailed log of what was and was not imported at the end. See README for usage notes.
"""
def __init__(self):
self.errors = []
self.upserts = []
self.summaries = []
self.line_count = 0
self.upsert_count = 0
def upsert(self, fileobj, as_string_obj=False):
"""Expects a file *object*, not a file path. This is important because this has to work for both
the management command and the web uploader; the web uploader will pass in in-memory file
with no path!
Header row is:
Title, Group, Task List, Created Date, Due Date, Completed, Created By, Assigned To, Note, Priority
"""
if as_string_obj:
# fileobj comes from mgmt command
csv_reader = csv.DictReader(fileobj)
else:
# fileobj comes from browser upload (in-memory)
csv_reader = csv.DictReader(codecs.iterdecode(fileobj, "utf-8"))
# DI check: Do we have expected header row?
header = csv_reader.fieldnames
expected = [
"Title",
"Group",
"Task List",
"Created By",
"Created Date",
"Due Date",
"Completed",
"Assigned To",
"Note",
"Priority",
]
if header != expected:
self.errors.append(
f"Inbound data does not have expected columns.\nShould be: {expected}"
)
return
for row in csv_reader:
self.line_count += 1
newrow = self.validate_row(row)
if newrow:
# newrow at this point is fully validated, and all FK relations exist,
# e.g. `newrow.get("Assigned To")`, is a Django User instance.
assignee = newrow.get("Assigned To") if newrow.get("Assigned To") else None
created_at = (
newrow.get("Created Date")
if newrow.get("Created Date")
else datetime.datetime.today()
)
due_date = newrow.get("Due Date") if newrow.get("Due Date") else None
priority = newrow.get("Priority") if newrow.get("Priority") else None
obj, created = Task.objects.update_or_create(
created_by=newrow.get("Created By"),
task_list=newrow.get("Task List"),
title=newrow.get("Title"),
defaults={
"assigned_to": assignee,
"completed": newrow.get("Completed"),
"created_at": created_at,
"due_date": due_date,
"note": newrow.get("Note"),
"priority": priority,
},
)
self.upsert_count += 1
msg = (
f'Upserted task {obj.id}: "{obj.title}"'
f' in list "{obj.task_list}" (group "{obj.task_list.group}")'
)
self.upserts.append(msg)
self.summaries.append(f"Processed {self.line_count} CSV rows")
self.summaries.append(f"Upserted {self.upsert_count} rows")
self.summaries.append(f"Skipped {self.line_count - self.upsert_count} rows")
return {"summaries": self.summaries, "upserts": self.upserts, "errors": self.errors}
def validate_row(self, row):
"""Perform data integrity checks and set default values. Returns a valid object for insertion, or False.
Errors are stored for later display. Intentionally not broken up into separate validator functions because
there are interdpendencies, such as checking for existing `creator` in one place and then using
that creator for group membership check in others."""
row_errors = []
# #######################
# Task creator must exist
if not row.get("Created By"):
msg = f"Missing required task creator."
row_errors.append(msg)
creator = get_user_model().objects.filter(username=row.get("Created By")).first()
if not creator:
msg = f"Invalid task creator {row.get('Created By')}"
row_errors.append(msg)
# #######################
# If specified, Assignee must exist
assignee = None # Perfectly valid
if row.get("Assigned To"):
assigned = get_user_model().objects.filter(username=row.get("Assigned To"))
if assigned.exists():
assignee = assigned.first()
else:
msg = f"Missing or invalid task assignee {row.get('Assigned To')}"
row_errors.append(msg)
# #######################
# Group must exist
try:
target_group = Group.objects.get(name=row.get("Group"))
except Group.DoesNotExist:
msg = f"Could not find group {row.get('Group')}."
row_errors.append(msg)
target_group = None
# #######################
# Task creator must be in the target group
if creator and target_group not in creator.groups.all():
msg = f"{creator} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Assignee must be in the target group
if assignee and target_group not in assignee.groups.all():
msg = f"{assignee} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Task list must exist in the target group
try:
tasklist = TaskList.objects.get(name=row.get("Task List"), group=target_group)
row["Task List"] = tasklist
except TaskList.DoesNotExist:
msg = f"Task list {row.get('Task List')} in group {target_group} does not exist"
row_errors.append(msg)
# #######################
# Validate Dates
datefields = ["Due Date", "Created Date"]
for datefield in datefields:
datestring = row.get(datefield)
if datestring:
valid_date = self.validate_date(datestring)
if valid_date:
row[datefield] = valid_date
else:
msg = f"Could not convert {datefield} {datestring} to valid date instance"
row_errors.append(msg)
# #######################
# Group membership checks have passed
row["Created By"] = creator
row["Group"] = target_group
if assignee:
row["Assigned To"] = assignee
# Set Completed
row["Completed"] = row["Completed"] == "Yes"
# #######################
if row_errors:
self.errors.append({self.line_count: row_errors})
return False
# No errors:
return row
def validate_date(self, datestring):
"""Inbound date string from CSV translates to a valid python date."""
try:
date_obj = datetime.datetime.strptime(datestring, "%Y-%m-%d")
return date_obj
except ValueError:
return False
|
import sys
import math
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../../")
from sdc_etl_libs.sdc_dataframe.Dataframe import *
import pandas as pd
import numpy as np
import json
import pytest
def test_generate_insert_query_ddl(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}},
{"name":"_SF_INSERTEDDATETIME","type":{"type":"string","logical_type":"datetime", "add_column": true }}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_ddl(df.df)
assert query == '("CULTURE", "DESCRIPTION", "KEY", "NAME", "_METADATA", "_SF_INSERTEDDATETIME") select Column1 as "CULTURE", Column2 as "DESCRIPTION", Column3 as "KEY", Column4 as "NAME", PARSE_JSON(Column5) as "_METADATA", Column6 as "_SF_INSERTEDDATETIME" from values '
def test_generate_insert_query_values(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_values(df.df)
assert query == "('cs', 'Czech', '9', 'Ceština', '{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}'), ('ze', 'Is', '9', 'This', '{'links': [{'id': '10', 'rel': 'self', 'href': '/api/v1/languages/10', 'code': 'This'}]}'), "
def test_convert_columns_to_json(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA",
"table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url":
"https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
data_before = df.df["_METADATA"][0]
df.convert_columns_to_json()
data_after = df.df["_METADATA"][0]
pytest.assume(data_before == "{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}")
pytest.assume(data_after == '{"links": [{"id": "9", "rel": "self", "href": "/api/v1/languages/9", "code": "Ce\\u0161tina"}]}') | import sys
import math
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../../")
from sdc_etl_libs.sdc_dataframe.Dataframe import *
import pandas as pd
import numpy as np
import json
import pytest
def test_generate_insert_query_ddl(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}},
{"name":"_SF_INSERTEDDATETIME","type":{"type":"string","logical_type":"datetime", "add_column": true }}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_ddl(df.df)
assert query == '("CULTURE", "DESCRIPTION", "KEY", "NAME", "_METADATA", "_SF_INSERTEDDATETIME") select Column1 as "CULTURE", Column2 as "DESCRIPTION", Column3 as "KEY", Column4 as "NAME", PARSE_JSON(Column5) as "_METADATA", Column6 as "_SF_INSERTEDDATETIME" from values '
def test_generate_insert_query_values(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_values(df.df)
assert query == "('cs', 'Czech', '9', 'Ceština', '{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}'), ('ze', 'Is', '9', 'This', '{'links': [{'id': '10', 'rel': 'self', 'href': '/api/v1/languages/10', 'code': 'This'}]}'), "
def test_convert_columns_to_json(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA",
"table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url":
"https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
data_before = df.df["_METADATA"][0]
df.convert_columns_to_json()
data_after = df.df["_METADATA"][0]
pytest.assume(data_before == "{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}")
pytest.assume(data_after == '{"links": [{"id": "9", "rel": "self", "href": "/api/v1/languages/9", "code": "Ce\\u0161tina"}]}') |
"""BERT Training Script."""
import functools
from typing import Any, Callable, Dict, Tuple, Optional, Type
from absl import logging
from clu import metric_writers
from clu import periodic_actions
from flax import jax_utils
import flax.linen as nn
import jax
from jax.experimental import optimizers as jax_optimizers
import jax.numpy as jnp
import jax.profiler
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.projects.baselines.bert import bert_base_model
from scenic.projects.baselines.bert import train_utils as bert_train_utils
from scenic.train_lib import lr_schedules
from scenic.train_lib import optimizers
from scenic.train_lib import pretrain_utils
from scenic.train_lib import train_utils
def train_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
learning_rate_fn: Callable[[int], float],
loss_fn: bert_base_model.LossFn,
metrics_fn: bert_base_model.MetricFn,
config: ml_collections.ConfigDict,
debug: Optional[bool] = False
) -> Tuple[train_utils.TrainState, Dict[str, Tuple[float, int]], float]:
"""Runs a single step of training.
Given the state of the training and a batch of data, computes
the loss and updates the parameters of the model.
Note that in this code, the buffers of the first (train_state) and second
(batch) arguments are donated to the computation.
Args:
flax_model: A Flax model.
train_state: The state of training including the current global_step,
model_state, rng, and optimizer. The buffer of this argument can be
donated to the computation.
batch: A single batch of data. The buffer of this argument can be donated to
the computation.
learning_rate_fn: Learning rate scheduler which given the global_step
generates the learning rate.
loss_fn: A loss function that given logits, a batch, and parameters of the
model calculates the loss.
metrics_fn: A metrics function that given logits and batch of data,
calculates the metrics as well as the loss.
config: Configurations of the experiment.
debug: Whether the debug mode is enabled during training. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Updated state of training, computed metrics, and learning rate for logging.
"""
new_rng, rng = jax.random.split(train_state.rng)
# Bind the rng to the host/device we are on.
dropout_rng = train_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to='device')
def training_loss_fn(params):
variables = {'params': params, **train_state.model_state}
output, new_model_state = flax_model.apply(
variables,
batch,
mutable=['batch_stats'],
train=True,
rngs={'dropout': dropout_rng},
debug=debug)
loss = loss_fn(output, batch, variables['params'])
return loss, (new_model_state, output)
compute_gradient_fn = jax.value_and_grad(training_loss_fn, has_aux=True)
step = train_state.global_step
lr = learning_rate_fn(step)
(train_cost,
(new_model_state,
output)), grad = compute_gradient_fn(train_state.optimizer.target)
del train_cost
# We clip gradients before pmean in BERT.
if config.get('max_grad_norm', None) is not None:
grad = jax_optimizers.clip_grads(grad, config.max_grad_norm)
# Re-use same axis_name as in the call to `pmap(...train_step...)` below.
grad = jax.lax.pmean(grad, axis_name='batch')
new_optimizer = train_state.optimizer.apply_gradient(grad, learning_rate=lr)
# Explicit weight decay, if necessary.
if config.get('explicit_weight_decay', None) is not None:
new_optimizer = new_optimizer.replace(
target=optimizers.tree_map_with_names(
functools.partial(
optimizers.decay_weight_fn,
lr=lr,
decay=config.explicit_weight_decay),
new_optimizer.target,
match_name_fn=lambda name: 'kernel' in name))
metrics = metrics_fn(output, batch)
new_train_state = train_state.replace( # pytype: disable=attribute-error
global_step=step + 1,
optimizer=new_optimizer,
model_state=new_model_state,
rng=new_rng)
return new_train_state, metrics, lr
def eval_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
metrics_fn: bert_base_model.MetricFn,
all_gather: bool = False,
debug: Optional[bool] = False
) -> Tuple[Dict[str, Tuple[float, int]], Optional[jnp.ndarray],
Optional[jnp.ndarray]]:
"""Runs a single step of training.
Note that in this code, the buffer of the second argument (batch) is donated
to the computation.
Assumed API of metrics_fn is:
```metrics = metrics_fn(logits, batch)
where batch is yielded by the batch iterator, and metrics is a dictionary
mapping metric name to a vector of per example measurements. eval_step will
aggregate (by summing) all per example measurements and divide by the
aggregated normalizers. For each given metric we compute:
1/N sum_{b in batch_iter} metric(b), where N is the sum of normalizer
over all batches.
Args:
flax_model: A Flax model.
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. a metrics function, that given logits and
batch of data, calculates the metrics as well as the loss.
metrics_fn: A metrics function, that given logits and batch of data,
calculates the metrics as well as the loss.
all_gather: If True, the function gather batch and output of
model in from all hosts, using `jax.lax.all_gather` and return it, e.g.,
for computing global metrics on CPU.
debug: Whether the debug mode is enabled during evaluation. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics and optionally output, and batch after all_gather.
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
output = flax_model.apply(
variables, batch, train=False, mutable=False, debug=debug)
metrics = metrics_fn(output, batch)
if all_gather:
output = jax.lax.all_gather(output, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return metrics, output, batch
else:
return metrics, None, None
def representation_fn(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
representation_layer: str,
gather_to_host: bool = True
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Feeds the inputs to the model and returns their representations.
Args:
flax_model: A Flax model.
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data from the dataset.
representation_layer: The name of the layer to use as the representation.
gather_to_host: Whether to gather results from all devices to the host,
rather than leaving them distributed.
Returns:
Representation learned by the model for the given inputs and the labels and
masks. If `gather_to_host` is True, these are collected from all hosts.
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
representation_layer_parts = representation_layer.split('/')
filter_rep = lambda mdl, _: mdl.name == representation_layer_parts[-1]
_, model_state = flax_model.apply(
variables,
batch,
train=False,
capture_intermediates=filter_rep,
mutable=['intermediates'],
transfer_mode=True,
debug=False)
if 'intermediates' not in model_state:
raise ValueError(f'Layer with name "{representation_layer}"'
' does not exist in your model.')
representation = model_state['intermediates']
for rep_layer in representation_layer_parts:
if rep_layer:
representation = representation[rep_layer]
representation = representation['__call__'][0]
if gather_to_host:
representation = jax.lax.all_gather(representation, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return representation, batch['label'], batch['batch_mask']
def train(
*,
rng: jnp.ndarray,
config: ml_collections.ConfigDict,
model_cls: Type[bert_base_model.BERTBaseModel],
dataset: dataset_utils.Dataset,
workdir: str,
writer: metric_writers.MetricWriter,
) -> Tuple[train_utils.TrainState, Dict[str, Any], Dict[str, Any]]:
"""Main training loop lives in this function.
Given the model class and dataset, it prepares the items needed to run the
training, including the TrainState.
Args:
rng: Jax rng key.
config: Configurations of the experiment.
model_cls: Model class; A model has a flax_module, a loss_fn, and a
metrics_fn associated with it.
dataset: The dataset that has train_iter, eval_iter, meta_data, and
optionally, test_iter.
workdir: Directory for checkpointing.
writer: CLU metrics writer instance.
Returns:
train_state that has the state of training (including current
global_step, model_state, rng, and the optimizer), train_summary
and eval_summary which are dict of metrics. These outputs are used for
regression testing.
"""
lead_host = jax.process_index() == 0
# Build the loss_fn, metrics, and flax_model.
model = model_cls(config, dataset.meta_data)
# Initialize model.
rng, init_rng = jax.random.split(rng)
(params, model_state, num_trainable_params,
gflops) = bert_train_utils.initialize_bert_model(
model_def=model.flax_model,
input_spec=dataset.meta_data['input_spec'],
config=config,
rngs=init_rng)
# Create optimizer.
# We jit this, such that the arrays that are created are created on the same
# device as the input is, in this case the CPU. Else they'd be on device[0].
optimizer = jax.jit(
optimizers.get_optimizer(config).create, backend='cpu')(
params)
rng, train_rng = jax.random.split(rng)
train_state = train_utils.TrainState(
global_step=0,
optimizer=optimizer,
model_state=model_state,
rng=train_rng,
accum_train_time=0)
start_step = train_state.global_step
if config.checkpoint:
train_state, start_step = train_utils.restore_checkpoint(
workdir, train_state)
if (start_step == 0 # Which means "no" checkpoint is restored!
and config.get('init_from') is not None):
restored_model_cfg = config.init_from.get('model_config')
init_checkpoint_path = config.init_from.get('checkpoint_path')
restored_train_state = pretrain_utils.restore_pretrained_checkpoint(
init_checkpoint_path, train_state, assert_exist=True)
# Load params from the init_model.
train_state = model.init_from_train_state( # pytype: disable=attribute-error
train_state, restored_train_state, restored_model_cfg)
del restored_train_state
# Replicate the optimzier, state, and rng.
train_state = jax_utils.replicate(train_state)
del params # Do not keep a copy of the initial params.
# Calculate the total number of training steps.
total_steps, steps_per_epoch = train_utils.get_num_training_steps(
config, dataset.meta_data)
# Get learning rate scheduler.
learning_rate_fn = lr_schedules.get_learning_rate_fn(config)
train_step_pmapped = jax.pmap(
functools.partial(
train_step,
flax_model=model.flax_model,
learning_rate_fn=learning_rate_fn,
loss_fn=model.loss_function,
metrics_fn=model.get_metrics_fn('train'),
config=config,
debug=config.debug_train),
axis_name='batch',
# We can donate both buffers of train_state and train_batch.
donate_argnums=(0, 1),
)
eval_step_pmapped = jax.pmap(
functools.partial(
eval_step,
flax_model=model.flax_model,
metrics_fn=model.get_metrics_fn('validation'),
all_gather=config.get('global_metrics', False),
debug=config.debug_eval),
axis_name='batch',
# We can donate the eval_batch's buffer.
donate_argnums=(1,),
)
if 'fewshot' in config:
representation_fn_pmaped = jax.pmap(
functools.partial(
representation_fn,
flax_model=model.flax_model,
representation_layer=config.fewshot.representation_layer),
# We can donate the batch's buffer.
donate_argnums=(1,),
axis_name='batch')
fewshotter = bert_train_utils.BERTFewShotEvaluator(representation_fn_pmaped,
config.fewshot)
log_eval_steps = config.get('log_eval_steps') or steps_per_epoch
if not log_eval_steps:
raise ValueError("'log_eval_steps' should be specified in the config.")
checkpoint_steps = config.get('checkpoint_steps') or log_eval_steps
log_summary_steps = config.get('log_summary_steps') or log_eval_steps
# Ceil rounding such that we include the last incomplete batch.
total_eval_steps = int(
np.ceil(dataset.meta_data['num_eval_examples'] / config.batch_size))
steps_per_eval = config.get('steps_per_eval') or total_eval_steps
# If `global_metrics` are set in the config and we are the the lead host
compute_global_metrics = False
if config.get('global_metrics', False) and lead_host:
compute_global_metrics = True
if compute_global_metrics:
global_metrics_evaluator = bert_train_utils.BERTGlobalEvaluator(
config.global_metrics)
train_metrics, extra_training_logs = [], []
train_summary, eval_summary = None, None
chrono = train_utils.Chrono(
first_step=start_step,
total_steps=total_steps,
steps_per_epoch=steps_per_epoch,
global_bs=config.batch_size,
accum_train_time=int(jax_utils.unreplicate(train_state.accum_train_time)),
example_type='example')
logging.info('Starting training loop at step %d.', start_step + 1)
report_progress = periodic_actions.ReportProgress(
num_train_steps=total_steps, writer=writer)
hooks = [report_progress]
if config.get('xprof', True) and lead_host:
hooks.append(periodic_actions.Profile(num_profile_steps=5, logdir=workdir))
if start_step == 0:
step0_log = {'num_trainable_params': num_trainable_params}
if gflops:
step0_log['gflops'] = gflops
writer.write_scalars(1, step0_log)
for step in range(start_step + 1, total_steps + 1):
with jax.profiler.StepTraceContext('train', step_num=step):
train_batch = next(dataset.train_iter)
train_state, t_metrics, lr = train_step_pmapped(
train_state=train_state, batch=train_batch)
# This will accumulate metrics in TPU memory up to the point that we log
# them. This is no problem for small metrics but may be a problem for
# large (e.g. segmentation) metrics. An alternative is to set
# `log_summary_steps` to a small number, or to use
# `train_utils.unreplicate_and_get` here instead of right before writing
# summaries, but that means in each step, we have data transfer between
# tpu and host, which might slow down the training.
train_metrics.append(t_metrics)
# Additional training logs: learning rate:
extra_training_logs.append({'learning_rate': lr})
for h in hooks:
h(step)
chrono.pause() # Below are once-in-a-while ops -> pause.
###################### LOG TRAIN SUMMARY ########################
if (step % log_summary_steps == 1) or (step == total_steps):
if lead_host:
chrono.tick(step, writer=writer)
# train_metrics is list of a dictionaries of metrics, where the shape of
# the metrics[key] is [n_local_devices]. However, because metric functions
# have a psum, we have already summed across the whole sharded batch, and
# what's returned is n_local_devices copies of the same summed metric.
# So we do unreplicate and fetch them to host using `unreplicate_and_get`.
train_summary = train_utils.log_train_summary(
step=step,
train_metrics=jax.tree_map(train_utils.unreplicate_and_get,
train_metrics),
extra_training_logs=jax.tree_map(train_utils.unreplicate_and_get,
extra_training_logs),
writer=writer)
# Reset metric accumulation for next evaluation cycle.
train_metrics, extra_training_logs = [], []
################### EVALUATION #######################
if (step % log_eval_steps == 1) or (step == total_steps):
with report_progress.timed('eval'):
eval_metrics = []
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(
train_state)
for _ in range(steps_per_eval):
eval_batch = next(dataset.valid_iter)
e_metrics, e_output, e_batch = eval_step_pmapped(
train_state=train_state, batch=eval_batch)
eval_metrics.append(train_utils.unreplicate_and_get(e_metrics))
if compute_global_metrics:
# Unreplicate outputs of eval_step_pmapped that are coming from
# `lax.all_gather`, fetch to the host and add to the Evaluator:
e_batch_mask = train_utils.unreplicate_and_get(
e_batch['batch_mask']).astype(bool)
# Classification: 'label', regression: 'target'
t_key = 'label' if 'label' in e_batch else 'targets'
global_metrics_evaluator.add_batch_of_examples(
target=train_utils.unreplicate_and_get(
e_batch[t_key])[e_batch_mask],
output=train_utils.unreplicate_and_get(e_output)
[e_batch_mask])
del e_batch, e_output, e_batch_mask
eval_global_metrics_summary = None
if compute_global_metrics:
if (len(global_metrics_evaluator) !=
dataset.meta_data['num_eval_examples']):
# Make sure no example is lost (specially in multi-host setup).
raise ValueError(f'Number of eval examples should be '
f'{dataset.meta_data['num_eval_examples']}, '
f'but it is {len(global_metrics_evaluator)}.')
eval_global_metrics_summary = (
global_metrics_evaluator.compute_metrics(
clear_annotations=True))
eval_summary = train_utils.log_eval_summary(
step=step,
eval_metrics=eval_metrics,
extra_eval_summary=eval_global_metrics_summary,
writer=writer)
writer.flush()
del eval_metrics, eval_global_metrics_summary
##################### CHECKPOINTING ###################
if ((step % checkpoint_steps == 0 and step > 0) or
(step == total_steps)) and config.checkpoint:
with report_progress.timed('checkpoint'):
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(train_state)
if lead_host:
train_state.replace( # pytype: disable=attribute-error
accum_train_time=chrono.accum_train_time)
train_utils.save_checkpoint(workdir, train_state)
##################### FEWSHOT EVALUATION ############################
if 'fewshot' in config:
# Compute few-shot on-the-fly evaluation.
if (step % config.fewshot.log_eval_steps == 1) or (step == total_steps):
with report_progress.timed('fewshot'):
results = fewshotter.run_all(train_state, config.fewshot.datasets)
fewshotter.log_fewshot_summary(
writer=writer, step=step, results=results)
del results
writer.write_scalars(step, {'zz/epoch': step / steps_per_epoch})
writer.flush()
chrono.resume() # un-pause now
# Wait until computations are done before exiting.
jax.random.normal(jax.random.PRNGKey(0), ()).block_until_ready()
# Return the train and eval summary after last step for regresesion testing.
return train_state, train_summary, eval_summary
| """BERT Training Script."""
import functools
from typing import Any, Callable, Dict, Tuple, Optional, Type
from absl import logging
from clu import metric_writers
from clu import periodic_actions
from flax import jax_utils
import flax.linen as nn
import jax
from jax.experimental import optimizers as jax_optimizers
import jax.numpy as jnp
import jax.profiler
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.projects.baselines.bert import bert_base_model
from scenic.projects.baselines.bert import train_utils as bert_train_utils
from scenic.train_lib import lr_schedules
from scenic.train_lib import optimizers
from scenic.train_lib import pretrain_utils
from scenic.train_lib import train_utils
def train_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
learning_rate_fn: Callable[[int], float],
loss_fn: bert_base_model.LossFn,
metrics_fn: bert_base_model.MetricFn,
config: ml_collections.ConfigDict,
debug: Optional[bool] = False
) -> Tuple[train_utils.TrainState, Dict[str, Tuple[float, int]], float]:
"""Runs a single step of training.
Given the state of the training and a batch of data, computes
the loss and updates the parameters of the model.
Note that in this code, the buffers of the first (train_state) and second
(batch) arguments are donated to the computation.
Args:
flax_model: A Flax model.
train_state: The state of training including the current global_step,
model_state, rng, and optimizer. The buffer of this argument can be
donated to the computation.
batch: A single batch of data. The buffer of this argument can be donated to
the computation.
learning_rate_fn: Learning rate scheduler which given the global_step
generates the learning rate.
loss_fn: A loss function that given logits, a batch, and parameters of the
model calculates the loss.
metrics_fn: A metrics function that given logits and batch of data,
calculates the metrics as well as the loss.
config: Configurations of the experiment.
debug: Whether the debug mode is enabled during training. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Updated state of training, computed metrics, and learning rate for logging.
"""
new_rng, rng = jax.random.split(train_state.rng)
# Bind the rng to the host/device we are on.
dropout_rng = train_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to='device')
def training_loss_fn(params):
variables = {'params': params, **train_state.model_state}
output, new_model_state = flax_model.apply(
variables,
batch,
mutable=['batch_stats'],
train=True,
rngs={'dropout': dropout_rng},
debug=debug)
loss = loss_fn(output, batch, variables['params'])
return loss, (new_model_state, output)
compute_gradient_fn = jax.value_and_grad(training_loss_fn, has_aux=True)
step = train_state.global_step
lr = learning_rate_fn(step)
(train_cost,
(new_model_state,
output)), grad = compute_gradient_fn(train_state.optimizer.target)
del train_cost
# We clip gradients before pmean in BERT.
if config.get('max_grad_norm', None) is not None:
grad = jax_optimizers.clip_grads(grad, config.max_grad_norm)
# Re-use same axis_name as in the call to `pmap(...train_step...)` below.
grad = jax.lax.pmean(grad, axis_name='batch')
new_optimizer = train_state.optimizer.apply_gradient(grad, learning_rate=lr)
# Explicit weight decay, if necessary.
if config.get('explicit_weight_decay', None) is not None:
new_optimizer = new_optimizer.replace(
target=optimizers.tree_map_with_names(
functools.partial(
optimizers.decay_weight_fn,
lr=lr,
decay=config.explicit_weight_decay),
new_optimizer.target,
match_name_fn=lambda name: 'kernel' in name))
metrics = metrics_fn(output, batch)
new_train_state = train_state.replace( # pytype: disable=attribute-error
global_step=step + 1,
optimizer=new_optimizer,
model_state=new_model_state,
rng=new_rng)
return new_train_state, metrics, lr
def eval_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
metrics_fn: bert_base_model.MetricFn,
all_gather: bool = False,
debug: Optional[bool] = False
) -> Tuple[Dict[str, Tuple[float, int]], Optional[jnp.ndarray],
Optional[jnp.ndarray]]:
"""Runs a single step of training.
Note that in this code, the buffer of the second argument (batch) is donated
to the computation.
Assumed API of metrics_fn is:
```metrics = metrics_fn(logits, batch)
where batch is yielded by the batch iterator, and metrics is a dictionary
mapping metric name to a vector of per example measurements. eval_step will
aggregate (by summing) all per example measurements and divide by the
aggregated normalizers. For each given metric we compute:
1/N sum_{b in batch_iter} metric(b), where N is the sum of normalizer
over all batches.
Args:
flax_model: A Flax model.
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. a metrics function, that given logits and
batch of data, calculates the metrics as well as the loss.
metrics_fn: A metrics function, that given logits and batch of data,
calculates the metrics as well as the loss.
all_gather: If True, the function gather batch and output of
model in from all hosts, using `jax.lax.all_gather` and return it, e.g.,
for computing global metrics on CPU.
debug: Whether the debug mode is enabled during evaluation. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics and optionally output, and batch after all_gather.
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
output = flax_model.apply(
variables, batch, train=False, mutable=False, debug=debug)
metrics = metrics_fn(output, batch)
if all_gather:
output = jax.lax.all_gather(output, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return metrics, output, batch
else:
return metrics, None, None
def representation_fn(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
representation_layer: str,
gather_to_host: bool = True
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Feeds the inputs to the model and returns their representations.
Args:
flax_model: A Flax model.
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data from the dataset.
representation_layer: The name of the layer to use as the representation.
gather_to_host: Whether to gather results from all devices to the host,
rather than leaving them distributed.
Returns:
Representation learned by the model for the given inputs and the labels and
masks. If `gather_to_host` is True, these are collected from all hosts.
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
representation_layer_parts = representation_layer.split('/')
filter_rep = lambda mdl, _: mdl.name == representation_layer_parts[-1]
_, model_state = flax_model.apply(
variables,
batch,
train=False,
capture_intermediates=filter_rep,
mutable=['intermediates'],
transfer_mode=True,
debug=False)
if 'intermediates' not in model_state:
raise ValueError(f'Layer with name "{representation_layer}"'
' does not exist in your model.')
representation = model_state['intermediates']
for rep_layer in representation_layer_parts:
if rep_layer:
representation = representation[rep_layer]
representation = representation['__call__'][0]
if gather_to_host:
representation = jax.lax.all_gather(representation, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return representation, batch['label'], batch['batch_mask']
def train(
*,
rng: jnp.ndarray,
config: ml_collections.ConfigDict,
model_cls: Type[bert_base_model.BERTBaseModel],
dataset: dataset_utils.Dataset,
workdir: str,
writer: metric_writers.MetricWriter,
) -> Tuple[train_utils.TrainState, Dict[str, Any], Dict[str, Any]]:
"""Main training loop lives in this function.
Given the model class and dataset, it prepares the items needed to run the
training, including the TrainState.
Args:
rng: Jax rng key.
config: Configurations of the experiment.
model_cls: Model class; A model has a flax_module, a loss_fn, and a
metrics_fn associated with it.
dataset: The dataset that has train_iter, eval_iter, meta_data, and
optionally, test_iter.
workdir: Directory for checkpointing.
writer: CLU metrics writer instance.
Returns:
train_state that has the state of training (including current
global_step, model_state, rng, and the optimizer), train_summary
and eval_summary which are dict of metrics. These outputs are used for
regression testing.
"""
lead_host = jax.process_index() == 0
# Build the loss_fn, metrics, and flax_model.
model = model_cls(config, dataset.meta_data)
# Initialize model.
rng, init_rng = jax.random.split(rng)
(params, model_state, num_trainable_params,
gflops) = bert_train_utils.initialize_bert_model(
model_def=model.flax_model,
input_spec=dataset.meta_data['input_spec'],
config=config,
rngs=init_rng)
# Create optimizer.
# We jit this, such that the arrays that are created are created on the same
# device as the input is, in this case the CPU. Else they'd be on device[0].
optimizer = jax.jit(
optimizers.get_optimizer(config).create, backend='cpu')(
params)
rng, train_rng = jax.random.split(rng)
train_state = train_utils.TrainState(
global_step=0,
optimizer=optimizer,
model_state=model_state,
rng=train_rng,
accum_train_time=0)
start_step = train_state.global_step
if config.checkpoint:
train_state, start_step = train_utils.restore_checkpoint(
workdir, train_state)
if (start_step == 0 # Which means "no" checkpoint is restored!
and config.get('init_from') is not None):
restored_model_cfg = config.init_from.get('model_config')
init_checkpoint_path = config.init_from.get('checkpoint_path')
restored_train_state = pretrain_utils.restore_pretrained_checkpoint(
init_checkpoint_path, train_state, assert_exist=True)
# Load params from the init_model.
train_state = model.init_from_train_state( # pytype: disable=attribute-error
train_state, restored_train_state, restored_model_cfg)
del restored_train_state
# Replicate the optimzier, state, and rng.
train_state = jax_utils.replicate(train_state)
del params # Do not keep a copy of the initial params.
# Calculate the total number of training steps.
total_steps, steps_per_epoch = train_utils.get_num_training_steps(
config, dataset.meta_data)
# Get learning rate scheduler.
learning_rate_fn = lr_schedules.get_learning_rate_fn(config)
train_step_pmapped = jax.pmap(
functools.partial(
train_step,
flax_model=model.flax_model,
learning_rate_fn=learning_rate_fn,
loss_fn=model.loss_function,
metrics_fn=model.get_metrics_fn('train'),
config=config,
debug=config.debug_train),
axis_name='batch',
# We can donate both buffers of train_state and train_batch.
donate_argnums=(0, 1),
)
eval_step_pmapped = jax.pmap(
functools.partial(
eval_step,
flax_model=model.flax_model,
metrics_fn=model.get_metrics_fn('validation'),
all_gather=config.get('global_metrics', False),
debug=config.debug_eval),
axis_name='batch',
# We can donate the eval_batch's buffer.
donate_argnums=(1,),
)
if 'fewshot' in config:
representation_fn_pmaped = jax.pmap(
functools.partial(
representation_fn,
flax_model=model.flax_model,
representation_layer=config.fewshot.representation_layer),
# We can donate the batch's buffer.
donate_argnums=(1,),
axis_name='batch')
fewshotter = bert_train_utils.BERTFewShotEvaluator(representation_fn_pmaped,
config.fewshot)
log_eval_steps = config.get('log_eval_steps') or steps_per_epoch
if not log_eval_steps:
raise ValueError("'log_eval_steps' should be specified in the config.")
checkpoint_steps = config.get('checkpoint_steps') or log_eval_steps
log_summary_steps = config.get('log_summary_steps') or log_eval_steps
# Ceil rounding such that we include the last incomplete batch.
total_eval_steps = int(
np.ceil(dataset.meta_data['num_eval_examples'] / config.batch_size))
steps_per_eval = config.get('steps_per_eval') or total_eval_steps
# If `global_metrics` are set in the config and we are the the lead host
compute_global_metrics = False
if config.get('global_metrics', False) and lead_host:
compute_global_metrics = True
if compute_global_metrics:
global_metrics_evaluator = bert_train_utils.BERTGlobalEvaluator(
config.global_metrics)
train_metrics, extra_training_logs = [], []
train_summary, eval_summary = None, None
chrono = train_utils.Chrono(
first_step=start_step,
total_steps=total_steps,
steps_per_epoch=steps_per_epoch,
global_bs=config.batch_size,
accum_train_time=int(jax_utils.unreplicate(train_state.accum_train_time)),
example_type='example')
logging.info('Starting training loop at step %d.', start_step + 1)
report_progress = periodic_actions.ReportProgress(
num_train_steps=total_steps, writer=writer)
hooks = [report_progress]
if config.get('xprof', True) and lead_host:
hooks.append(periodic_actions.Profile(num_profile_steps=5, logdir=workdir))
if start_step == 0:
step0_log = {'num_trainable_params': num_trainable_params}
if gflops:
step0_log['gflops'] = gflops
writer.write_scalars(1, step0_log)
for step in range(start_step + 1, total_steps + 1):
with jax.profiler.StepTraceContext('train', step_num=step):
train_batch = next(dataset.train_iter)
train_state, t_metrics, lr = train_step_pmapped(
train_state=train_state, batch=train_batch)
# This will accumulate metrics in TPU memory up to the point that we log
# them. This is no problem for small metrics but may be a problem for
# large (e.g. segmentation) metrics. An alternative is to set
# `log_summary_steps` to a small number, or to use
# `train_utils.unreplicate_and_get` here instead of right before writing
# summaries, but that means in each step, we have data transfer between
# tpu and host, which might slow down the training.
train_metrics.append(t_metrics)
# Additional training logs: learning rate:
extra_training_logs.append({'learning_rate': lr})
for h in hooks:
h(step)
chrono.pause() # Below are once-in-a-while ops -> pause.
###################### LOG TRAIN SUMMARY ########################
if (step % log_summary_steps == 1) or (step == total_steps):
if lead_host:
chrono.tick(step, writer=writer)
# train_metrics is list of a dictionaries of metrics, where the shape of
# the metrics[key] is [n_local_devices]. However, because metric functions
# have a psum, we have already summed across the whole sharded batch, and
# what's returned is n_local_devices copies of the same summed metric.
# So we do unreplicate and fetch them to host using `unreplicate_and_get`.
train_summary = train_utils.log_train_summary(
step=step,
train_metrics=jax.tree_map(train_utils.unreplicate_and_get,
train_metrics),
extra_training_logs=jax.tree_map(train_utils.unreplicate_and_get,
extra_training_logs),
writer=writer)
# Reset metric accumulation for next evaluation cycle.
train_metrics, extra_training_logs = [], []
################### EVALUATION #######################
if (step % log_eval_steps == 1) or (step == total_steps):
with report_progress.timed('eval'):
eval_metrics = []
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(
train_state)
for _ in range(steps_per_eval):
eval_batch = next(dataset.valid_iter)
e_metrics, e_output, e_batch = eval_step_pmapped(
train_state=train_state, batch=eval_batch)
eval_metrics.append(train_utils.unreplicate_and_get(e_metrics))
if compute_global_metrics:
# Unreplicate outputs of eval_step_pmapped that are coming from
# `lax.all_gather`, fetch to the host and add to the Evaluator:
e_batch_mask = train_utils.unreplicate_and_get(
e_batch['batch_mask']).astype(bool)
# Classification: 'label', regression: 'target'
t_key = 'label' if 'label' in e_batch else 'targets'
global_metrics_evaluator.add_batch_of_examples(
target=train_utils.unreplicate_and_get(
e_batch[t_key])[e_batch_mask],
output=train_utils.unreplicate_and_get(e_output)
[e_batch_mask])
del e_batch, e_output, e_batch_mask
eval_global_metrics_summary = None
if compute_global_metrics:
if (len(global_metrics_evaluator) !=
dataset.meta_data['num_eval_examples']):
# Make sure no example is lost (specially in multi-host setup).
raise ValueError(f'Number of eval examples should be '
f'{dataset.meta_data["num_eval_examples"]}, '
f'but it is {len(global_metrics_evaluator)}.')
eval_global_metrics_summary = (
global_metrics_evaluator.compute_metrics(
clear_annotations=True))
eval_summary = train_utils.log_eval_summary(
step=step,
eval_metrics=eval_metrics,
extra_eval_summary=eval_global_metrics_summary,
writer=writer)
writer.flush()
del eval_metrics, eval_global_metrics_summary
##################### CHECKPOINTING ###################
if ((step % checkpoint_steps == 0 and step > 0) or
(step == total_steps)) and config.checkpoint:
with report_progress.timed('checkpoint'):
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(train_state)
if lead_host:
train_state.replace( # pytype: disable=attribute-error
accum_train_time=chrono.accum_train_time)
train_utils.save_checkpoint(workdir, train_state)
##################### FEWSHOT EVALUATION ############################
if 'fewshot' in config:
# Compute few-shot on-the-fly evaluation.
if (step % config.fewshot.log_eval_steps == 1) or (step == total_steps):
with report_progress.timed('fewshot'):
results = fewshotter.run_all(train_state, config.fewshot.datasets)
fewshotter.log_fewshot_summary(
writer=writer, step=step, results=results)
del results
writer.write_scalars(step, {'zz/epoch': step / steps_per_epoch})
writer.flush()
chrono.resume() # un-pause now
# Wait until computations are done before exiting.
jax.random.normal(jax.random.PRNGKey(0), ()).block_until_ready()
# Return the train and eval summary after last step for regresesion testing.
return train_state, train_summary, eval_summary
|
"""Config flow to configure the Netgear integration."""
from __future__ import annotations
import logging
from typing import cast
from urllib.parse import urlparse
from pynetgear import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_USER
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.util.network import is_ipv4_address
from .const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DEFAULT_NAME,
DOMAIN,
MODELS_PORT_80,
MODELS_PORT_5555,
PORT_80,
PORT_5555,
)
from .errors import CannotLoginException
from .router import get_api
_LOGGER = logging.getLogger(__name__)
def _discovery_schema_with_defaults(discovery_info):
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input):
user_schema = {vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(schema_input):
return {
vol.Optional(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
}
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): int,
}
)
return self.async_show_form(step_id="init", data_schema=settings_schema)
class NetgearFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the netgear config flow."""
self.placeholders = {
CONF_HOST: DEFAULT_HOST,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: DEFAULT_USER,
CONF_SSL: False,
}
self.discovered = False
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered:
data_schema = _discovery_schema_with_defaults(user_input)
else:
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.placeholders,
)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Initialize flow from ssdp."""
updated_data: dict[str, str | int | bool] = {}
device_url = urlparse(discovery_info.ssdp_location)
if hostname := device_url.hostname:
hostname = cast(str, hostname)
updated_data[CONF_HOST] = hostname
if not is_ipv4_address(str(hostname)):
return self.async_abort(reason="not_ipv4_address")
_LOGGER.debug("Netgear ssdp discovery info: %s", discovery_info)
await self.async_set_unique_id(discovery_info.upnp[ssdp.ATTR_UPNP_SERIAL])
self._abort_if_unique_id_configured(updates=updated_data)
if device_url.scheme == "https":
updated_data[CONF_SSL] = True
else:
updated_data[CONF_SSL] = False
updated_data[CONF_PORT] = DEFAULT_PORT
for model in MODELS_PORT_80:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_80
for model in MODELS_PORT_5555:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_5555
updated_data[CONF_SSL] = True
self.placeholders.update(updated_data)
self.discovered = True
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form()
host = user_input.get(CONF_HOST, self.placeholders[CONF_HOST])
port = self.placeholders[CONF_PORT]
ssl = self.placeholders[CONF_SSL]
username = user_input.get(CONF_USERNAME, self.placeholders[CONF_USERNAME])
password = user_input[CONF_PASSWORD]
if not username:
username = self.placeholders[CONF_USERNAME]
# Open connection and check authentication
try:
api = await self.hass.async_add_executor_job(
get_api, password, host, username, port, ssl
)
except CannotLoginException:
errors["base"] = "config"
if errors:
return await self._show_setup_form(user_input, errors)
# Check if already configured
info = await self.hass.async_add_executor_job(api.get_info)
await self.async_set_unique_id(info["SerialNumber"], raise_on_progress=False)
self._abort_if_unique_id_configured()
config_data = {
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_HOST: host,
CONF_PORT: api.port,
CONF_SSL: api.ssl,
}
if info.get("ModelName") is not None and info.get("DeviceName") is not None:
name = f"{info["ModelName"]} - {info["DeviceName"]}"
else:
name = info.get("ModelName", DEFAULT_NAME)
return self.async_create_entry(
title=name,
data=config_data,
)
| """Config flow to configure the Netgear integration."""
from __future__ import annotations
import logging
from typing import cast
from urllib.parse import urlparse
from pynetgear import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_USER
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.util.network import is_ipv4_address
from .const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DEFAULT_NAME,
DOMAIN,
MODELS_PORT_80,
MODELS_PORT_5555,
PORT_80,
PORT_5555,
)
from .errors import CannotLoginException
from .router import get_api
_LOGGER = logging.getLogger(__name__)
def _discovery_schema_with_defaults(discovery_info):
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input):
user_schema = {vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(schema_input):
return {
vol.Optional(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
}
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): int,
}
)
return self.async_show_form(step_id="init", data_schema=settings_schema)
class NetgearFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the netgear config flow."""
self.placeholders = {
CONF_HOST: DEFAULT_HOST,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: DEFAULT_USER,
CONF_SSL: False,
}
self.discovered = False
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered:
data_schema = _discovery_schema_with_defaults(user_input)
else:
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.placeholders,
)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Initialize flow from ssdp."""
updated_data: dict[str, str | int | bool] = {}
device_url = urlparse(discovery_info.ssdp_location)
if hostname := device_url.hostname:
hostname = cast(str, hostname)
updated_data[CONF_HOST] = hostname
if not is_ipv4_address(str(hostname)):
return self.async_abort(reason="not_ipv4_address")
_LOGGER.debug("Netgear ssdp discovery info: %s", discovery_info)
await self.async_set_unique_id(discovery_info.upnp[ssdp.ATTR_UPNP_SERIAL])
self._abort_if_unique_id_configured(updates=updated_data)
if device_url.scheme == "https":
updated_data[CONF_SSL] = True
else:
updated_data[CONF_SSL] = False
updated_data[CONF_PORT] = DEFAULT_PORT
for model in MODELS_PORT_80:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_80
for model in MODELS_PORT_5555:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_5555
updated_data[CONF_SSL] = True
self.placeholders.update(updated_data)
self.discovered = True
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form()
host = user_input.get(CONF_HOST, self.placeholders[CONF_HOST])
port = self.placeholders[CONF_PORT]
ssl = self.placeholders[CONF_SSL]
username = user_input.get(CONF_USERNAME, self.placeholders[CONF_USERNAME])
password = user_input[CONF_PASSWORD]
if not username:
username = self.placeholders[CONF_USERNAME]
# Open connection and check authentication
try:
api = await self.hass.async_add_executor_job(
get_api, password, host, username, port, ssl
)
except CannotLoginException:
errors["base"] = "config"
if errors:
return await self._show_setup_form(user_input, errors)
# Check if already configured
info = await self.hass.async_add_executor_job(api.get_info)
await self.async_set_unique_id(info["SerialNumber"], raise_on_progress=False)
self._abort_if_unique_id_configured()
config_data = {
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_HOST: host,
CONF_PORT: api.port,
CONF_SSL: api.ssl,
}
if info.get("ModelName") is not None and info.get("DeviceName") is not None:
name = f"{info['ModelName']} - {info['DeviceName']}"
else:
name = info.get("ModelName", DEFAULT_NAME)
return self.async_create_entry(
title=name,
data=config_data,
)
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=not-callable
import re
import unittest
from unittest import mock
import pytest
from google.cloud.bigquery import DEFAULT_RETRY, DatasetReference, Table, TableReference
from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem
from google.cloud.exceptions import NotFound
from parameterized import parameterized
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.bigquery import (
BigQueryCursor,
BigQueryHook,
_api_resource_configs_duplication_check,
_cleanse_time_partitioning,
_split_tablename,
_validate_src_fmt_configs,
_validate_value,
)
PROJECT_ID = "bq-project"
CREDENTIALS = "bq-credentials"
DATASET_ID = "bq_dataset"
TABLE_ID = "bq_table"
PARTITION_ID = "20200101"
VIEW_ID = 'bq_view'
JOB_ID = "1234"
LOCATION = 'europe-north1'
TABLE_REFERENCE_REPR = {
'tableId': TABLE_ID,
'datasetId': DATASET_ID,
'projectId': PROJECT_ID,
}
TABLE_REFERENCE = TableReference.from_api_repr(TABLE_REFERENCE_REPR)
class _BigQueryBaseTestClass(unittest.TestCase):
def setUp(self) -> None:
class MockedBigQueryHook(BigQueryHook):
def _get_credentials_and_project_id(self):
return CREDENTIALS, PROJECT_ID
self.hook = MockedBigQueryHook()
class TestBigQueryHookMethods(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryConnection")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.build")
def test_bigquery_client_creation(self, mock_build, mock_authorize, mock_bigquery_connection):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'bigquery', 'v2', http=mock_authorize.return_value, cache_discovery=False
)
mock_bigquery_connection.assert_called_once_with(
service=mock_build.return_value,
project_id=PROJECT_ID,
hook=self.hook,
use_legacy_sql=self.hook.use_legacy_sql,
location=self.hook.location,
num_retries=self.hook.num_retries,
)
assert mock_bigquery_connection.return_value == result
@mock.patch("airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__")
def test_bigquery_bigquery_conn_id_deprecation_warning(
self,
mock_base_hook_init,
):
bigquery_conn_id = "bigquery conn id"
warning_message = (
"The bigquery_conn_id parameter has been deprecated. "
"You should pass the gcp_conn_id parameter."
)
with pytest.warns(DeprecationWarning) as warnings:
BigQueryHook(bigquery_conn_id=bigquery_conn_id)
mock_base_hook_init.assert_called_once_with(
delegate_to=None,
gcp_conn_id='bigquery conn id',
impersonation_chain=None,
)
assert warning_message == str(warnings[0].message)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_location_propagates_properly(self, run_with_config, _):
# TODO: this creates side effect
assert self.hook.location is None
self.hook.run_query(sql='select 1', location='US')
assert run_with_config.call_count == 1
assert self.hook.location == 'US'
def test_bigquery_insert_rows_not_implemented(self):
with pytest.raises(NotImplementedError):
self.hook.insert_rows(table="table", rows=[1, 2])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_true(self, mock_client):
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_false(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_true(self, mock_client):
mock_client.return_value.list_partitions.return_value = [PARTITION_ID]
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_table(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_partition(self, mock_client):
mock_client.return_value.list_partitions.return_value = []
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.read_gbq')
def test_get_pandas_df(self, mock_read_gbq):
self.hook.get_pandas_df('select 1')
mock_read_gbq.assert_called_once_with(
'select 1', credentials=CREDENTIALS, dialect='legacy', project_id=PROJECT_ID, verbose=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_options(self, mock_get_service):
with pytest.raises(
Exception,
match=(
r"\['THIS IS NOT VALID'\] contains invalid schema update options."
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]"
),
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_and_write_disposition(self, mock_get_service):
with pytest.raises(
Exception,
match="schema_update_options is only allowed if"
" write_disposition is 'WRITE_APPEND' or 'WRITE_TRUNCATE'.",
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY',
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete",
side_effect=[False, True],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_cancel_queries(self, mock_client, mock_poll_job_complete):
running_job_id = 3
self.hook.running_job_id = running_job_id
self.hook.cancel_query()
mock_poll_job_complete.has_calls(mock.call(running_job_id), mock.call(running_job_id))
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=None)
mock_client.return_value.cancel_job.assert_called_once_with(job_id=running_job_id)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_default(
self,
mock_insert,
_,
):
self.hook.run_query('query')
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect(self, mock_insert, _):
self.hook.run_query('query', use_legacy_sql=False)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_legacy_with_query_params(self, mock_insert, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
self.hook.run_query('query', use_legacy_sql=False, query_params=params)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_sql_dialect_legacy_with_query_params_fails(self, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
with pytest.raises(ValueError, match="Query parameters are not allowed when using legacy SQL"):
self.hook.run_query('query', use_legacy_sql=True, query_params=params)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_without_sql_fails(self, _):
with pytest.raises(
TypeError, match=r"`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`"
):
self.hook.run_query(sql=None)
@parameterized.expand(
[
(['ALLOW_FIELD_ADDITION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_schema_update_options(
self,
schema_update_options,
write_disposition,
mock_insert,
mock_get_service,
):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['schemaUpdateOptions'] == schema_update_options
assert kwargs['configuration']['query']['writeDisposition'] == write_disposition
@parameterized.expand(
[
(
['INCORRECT_OPTION'],
None,
r"\['INCORRECT_OPTION'\] contains invalid schema update options\. "
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'],
None,
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'\] contains invalid "
r"schema update options\. Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION'],
None,
r"schema_update_options is only allowed if write_disposition is "
r"'WRITE_APPEND' or 'WRITE_TRUNCATE'",
),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_schema_update_options_incorrect(
self,
schema_update_options,
write_disposition,
expected_regex,
mock_get_service,
):
with pytest.raises(ValueError, match=expected_regex):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
@parameterized.expand([(True,), (False,)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_api_resource_configs(
self,
bool_val,
mock_insert,
_,
):
self.hook.run_query('query', api_resource_configs={'query': {'useQueryCache': bool_val}})
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useQueryCache'] is bool_val
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_api_resource_configs_duplication_warning(self, mock_get_service):
with pytest.raises(
ValueError,
match=(
r"Values of useLegacySql param are duplicated\. api_resource_configs "
r"contained useLegacySql param in `query` config and useLegacySql was "
r"also provided with arg to run_query\(\) method\. Please remove duplicates\."
),
):
self.hook.run_query(
'query', use_legacy_sql=True, api_resource_configs={'query': {'useLegacySql': False}}
)
def test_validate_value(self):
with pytest.raises(
TypeError, match="case_1 argument must have a type <class 'dict'> not <class 'str'>"
):
_validate_value("case_1", "a", dict)
assert _validate_value("case_2", 0, int) is None
def test_duplication_check(self):
with pytest.raises(
ValueError,
match=r"Values of key_one param are duplicated. api_resource_configs contained key_one param in"
r" `query` config and key_one was also provided with arg to run_query\(\) method. "
r"Please remove duplicates.",
):
key_one = True
_api_resource_configs_duplication_check("key_one", key_one, {"key_one": False})
assert _api_resource_configs_duplication_check("key_one", key_one, {"key_one": True}) is None
def test_validate_src_fmt_configs(self):
source_format = "test_format"
valid_configs = ["test_config_known", "compatibility_val"]
backward_compatibility_configs = {"compatibility_val": "val"}
with pytest.raises(
ValueError, match="test_config_unknown is not a valid src_fmt_configs for type test_format."
):
# This config should raise a value error.
src_fmt_configs = {"test_config_unknown": "val"}
_validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
src_fmt_configs = {"test_config_known": "val"}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
assert (
"test_config_known" in src_fmt_configs
), "src_fmt_configs should contain al known src_fmt_configs"
assert (
"compatibility_val" in src_fmt_configs
), "_validate_src_fmt_configs should add backward_compatibility config"
@parameterized.expand([("AVRO",), ("PARQUET",), ("NEWLINE_DELIMITED_JSON",), ("DATASTORE_BACKUP",)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_non_csv_as_src_fmt(self, fmt, _):
try:
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
source_uris=[],
source_format=fmt,
autodetect=True,
)
except ValueError:
self.fail("run_load() raised ValueError unexpectedly!")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_extract(self, mock_insert):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
destination_cloud_storage_uris = ["gs://bucket/file.csv"]
expected_configuration = {
"extract": {
"sourceTable": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"compression": "NONE",
"destinationUris": destination_cloud_storage_uris,
"destinationFormat": "CSV",
"fieldDelimiter": ",",
"printHeader": True,
}
}
self.hook.run_extract(
source_project_dataset_table=source_project_dataset_table,
destination_cloud_storage_uris=destination_cloud_storage_uris,
)
mock_insert.assert_called_once_with(configuration=expected_configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.SchemaField")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows(self, mock_client, mock_schema, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
selected_fields=["field_1", "field_2"],
page_token="page123",
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_schema.has_calls([mock.call(x, "") for x in ["field_1", "field_2"]])
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
selected_fields=mock.ANY,
page_token='page123',
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows_with_empty_selected_fields(self, mock_client, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
page_token="page123",
selected_fields=[],
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
page_token='page123',
selected_fields=None,
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_run_table_delete(self, mock_client, mock_table):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
self.hook.run_table_delete(source_project_dataset_table, ignore_if_missing=False)
mock_table.from_string.assert_called_once_with(source_project_dataset_table)
mock_client.return_value.delete_table.assert_called_once_with(
table=mock_table.from_string.return_value, not_found_ok=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_create_new_table(self, mock_get, mock_create):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = []
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_create.assert_called_once_with(table_resource=table_resource, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_already_exists(self, mock_get, mock_update):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = [{"tableId": TABLE_ID}]
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_update.assert_called_once_with(table_resource=table_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_granting(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = []
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert view_access in dataset.access_entries
mock_update.assert_called_once_with(
fields=["access"],
dataset_resource=dataset.to_api_repr(),
project_id=PROJECT_ID,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_already_granted(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = [view_access]
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert len(mock_update.calls) == 0
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset_tables_list(self, mock_client):
table_list = [
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-2"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-2"},
]
table_list_response = [Table.from_api_repr({"tableReference": t}) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables_list(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference, max_results=None
)
assert table_list == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_poll_job_complete(self, mock_client):
self.hook.poll_job_complete(job_id=JOB_ID, location=LOCATION, project_id=PROJECT_ID)
mock_client.assert_called_once_with(location=LOCATION, project_id=PROJECT_ID)
mock_client.return_value.get_job.assert_called_once_with(job_id=JOB_ID)
mock_client.return_value.get_job.return_value.done.assert_called_once_with(retry=DEFAULT_RETRY)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("logging.Logger.info")
def test_cancel_query_jobs_to_cancel(
self,
mock_logger_info,
poll_job_complete,
):
poll_job_complete.return_value = True
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
poll_job_complete.assert_called_once_with(job_id=JOB_ID)
mock_logger_info.has_call(mock.call("No running BigQuery jobs to cancel."))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_timeout(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 13
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(
mock.call(
f"Stopping polling due to timeout. Job with id {JOB_ID} "
"has not completed cancel and may or may not finish."
)
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_completed(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 12 + [True]
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(mock.call(f"Job successfully canceled: {PROJECT_ID}, {PROJECT_ID}"))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_schema(self, mock_client):
table = {
"tableReference": TABLE_REFERENCE_REPR,
"schema": {
"fields": [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
]
},
}
mock_client.return_value.get_table.return_value = Table.from_api_repr(table)
result = self.hook.get_schema(dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
assert "fields" in result
assert len(result["fields"]) == 2
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_with_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee', 'policyTags': {'names': ['sensitive']}},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{
'name': 'emp_name',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Name of employee',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
'policyTags': {'names': ['sensitive']},
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=True,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_without_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'salary', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{'name': 'field_1', 'type': 'STRING', 'mode': 'REQUIRED'},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee'},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED', 'description': 'Name of employee'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=False,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_source_format(self, mock_get_service):
with pytest.raises(
Exception,
match=r"JSON is not a valid source format. Please use one of the following types: \['CSV', "
r"'NEWLINE_DELIMITED_JSON', 'AVRO', 'GOOGLE_SHEETS', 'DATASTORE_BACKUP', 'PARQUET'\]",
):
self.hook.run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_succeed(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
self.hook.insert_all(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.return_value.insert_rows.assert_called_once_with(
table=mock_client.return_value.get_table.return_value,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_fail(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
mock_client.return_value.insert_rows.return_value = ["some", "errors"]
with pytest.raises(AirflowException, match="insert error"):
self.hook.insert_all(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, rows=rows, fail_on_error=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
labels={'label1': 'test1', 'label2': 'test2'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['labels'] == {'label1': 'test1', 'label2': 'test2'}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.QueryJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_insert_job(self, mock_client, mock_query_job):
job_conf = {
"query": {
"query": "SELECT * FROM test",
"useLegacySql": "False",
}
}
mock_query_job._JOB_TYPE = "query"
self.hook.insert_job(
configuration=job_conf,
job_id=JOB_ID,
project_id=PROJECT_ID,
location=LOCATION,
)
mock_client.assert_called_once_with(
project_id=PROJECT_ID,
location=LOCATION,
)
mock_query_job.from_api_repr.assert_called_once_with(
{
'configuration': job_conf,
'jobReference': {'jobId': JOB_ID, 'projectId': PROJECT_ID, 'location': LOCATION},
},
mock_client.return_value,
)
mock_query_job.from_api_repr.return_value.result.assert_called_once_with()
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with pytest.raises(Exception, match="INTERNAL: No default project is specified"):
_split_tablename("dataset.table", None)
@parameterized.expand(
[
("project", "dataset", "table", "dataset.table"),
("alternative", "dataset", "table", "alternative:dataset.table"),
("alternative", "dataset", "table", "alternative.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt:dataset.table"),
]
)
def test_split_tablename(self, project_expected, dataset_expected, table_expected, table_input):
default_project_id = "project"
project, dataset, table = _split_tablename(table_input, default_project_id)
assert project_expected == project
assert dataset_expected == dataset
assert table_expected == table
@parameterized.expand(
[
("alt1:alt2:alt3:dataset.table", None, "Use either : or . to specify project got {}"),
(
"alt1.alt.dataset.table",
None,
r"Expect format of \(<project\.\|<project\:\)<dataset>\.<table>, got {}",
),
(
"alt1:alt2:alt.dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1:alt2:alt:dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1.alt.dataset.table",
"var_x",
r"Format exception for var_x: Expect format of "
r"\(<project\.\|<project:\)<dataset>.<table>, got {}",
),
]
)
def test_invalid_syntax(self, table_input, var_name, exception_message):
default_project_id = "project"
with pytest.raises(Exception, match=exception_message.format(table_input)):
_split_tablename(table_input, default_project_id, var_name)
class TestTableOperations(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_view(self, mock_bq_client, mock_table):
view = {
'query': 'SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*`',
"useLegacySql": False,
}
self.hook.create_empty_table(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, view=view, retry=DEFAULT_RETRY
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'view': view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_patch_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
self.hook.patch_table(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
description=description_patched,
expiration_time=expiration_time_patched,
friendly_name=friendly_name_patched,
labels=labels_patched,
schema=schema_patched,
time_partitioning=time_partitioning_patched,
require_partition_filter=require_partition_filter_patched,
view=view_patched,
)
body = {
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
body["tableReference"] = TABLE_REFERENCE_REPR
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_succeed(self, mock_bq_client, mock_table):
self.hook.create_empty_table(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
}
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_extras_succeed(self, mock_bq_client, mock_table):
schema_fields = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'created', 'type': 'DATE', 'mode': 'REQUIRED'},
]
time_partitioning = {"field": "created", "type": "DAY"}
cluster_fields = ['name']
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
time_partitioning=time_partitioning,
cluster_fields=cluster_fields,
)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
},
'schema': {'fields': schema_fields},
'timePartitioning': time_partitioning,
'clustering': {'fields': cluster_fields},
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_tables_list(self, mock_client):
table_list = [
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table1",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table1",
},
"type": "TABLE",
"creationTime": "1565781859261",
},
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table2",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table2",
},
"type": "TABLE",
"creationTime": "1565782713480",
},
]
table_list_response = [Table.from_api_repr(t) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference,
max_results=None,
retry=DEFAULT_RETRY,
)
for res, exp in zip(result, table_list):
assert res["tableId"] == exp["tableReference"]["tableId"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_materialized_view(self, mock_bq_client, mock_table):
query = """
SELECT product, SUM(amount)
FROM `test-project-id.test_dataset_id.test_table_prefix*`
GROUP BY product
"""
materialized_view = {
'query': query,
'enableRefresh': True,
'refreshIntervalMs': 2000000,
}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
materialized_view=materialized_view,
retry=DEFAULT_RETRY,
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'materializedView': materialized_view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
class TestBigQueryCursor(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_with_parameters(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
conf = {
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
}
mock_insert.assert_called_once_with(configuration=conf, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_many(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.executemany("SELECT %(foo)s", [{"foo": "bar"}, {"foo": "baz"}])
assert mock_insert.call_count == 2
assert mock_insert.has_calls(
mock.call(
configuration={
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
mock.call(
configuration={
'query': {
'query': "SELECT 'baz'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_description(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
with pytest.raises(NotImplementedError):
bq_cursor.description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_close(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.close() # pylint: disable=assignment-from-no-return
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_rowcount(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.rowcount
assert -1 == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.next")
def test_fetchone(self, mock_next, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchone()
mock_next.call_count == 1
assert mock_next.return_value == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone", side_effect=[1, 2, 3, None]
)
def test_fetchall(self, mock_fetchone, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchall()
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone")
def test_fetchmany(self, mock_fetchone, mock_get_service):
side_effect_values = [1, 2, 3, None]
bq_cursor = self.hook.get_cursor()
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany()
assert [1] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(2)
assert [1, 2] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(5)
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_no_jobid(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = None
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_buffer(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.buffer = [1, 2]
result = bq_cursor.next()
assert 1 == result
result = bq_cursor.next()
assert 2 == result
bq_cursor.all_pages_loaded = True
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next(self, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {
"rows": [
{"f": [{"v": "one"}, {"v": 1}]},
{"f": [{"v": "two"}, {"v": 2}]},
],
"pageToken": None,
"schema": {
"fields": [
{"name": "field_1", "type": "STRING"},
{"name": "field_2", "type": "INTEGER"},
]
},
}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.location = LOCATION
result = bq_cursor.next()
assert ['one', 1] == result
result = bq_cursor.next()
assert ['two', 2] == result
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=LOCATION, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_next_no_rows(self, mock_flush_results, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
result = bq_cursor.next()
assert result is None
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=None, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
assert mock_flush_results.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_flush_cursor_in_execute(self, _, mock_insert, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
assert mock_insert.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_flush_cursor(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.page_token = '456dcea9-fcbf-4f02-b570-83f5297c685e'
bq_cursor.job_id = 'c0a79ae4-0e72-4593-a0d0-7dbbf726f193'
bq_cursor.all_pages_loaded = True
bq_cursor.buffer = [('a', 100, 200), ('b', 200, 300)]
bq_cursor.flush_results()
assert bq_cursor.page_token is None
assert bq_cursor.job_id is None
assert not bq_cursor.all_pages_loaded
assert bq_cursor.buffer == []
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_arraysize(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
assert bq_cursor.buffersize is None
assert bq_cursor.arraysize == 1
bq_cursor.set_arraysize(10)
assert bq_cursor.buffersize == 10
assert bq_cursor.arraysize == 10
class TestDatasetsOperations(_BigQueryBaseTestClass):
def test_create_empty_dataset_no_dataset_id_err(self):
with pytest.raises(ValueError, match=r"Please specify `datasetId`"):
self.hook.create_empty_dataset(dataset_id=None, project_id=None)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_params(self, mock_client, mock_dataset):
self.hook.create_empty_dataset(project_id=PROJECT_ID, dataset_id=DATASET_ID, location=LOCATION)
expected_body = {
"location": LOCATION,
"datasetReference": {"datasetId": DATASET_ID, "projectId": PROJECT_ID},
}
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(expected_body)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(dataset_reference=dataset)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_use_values_from_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(
dataset_reference=dataset,
location="Unknown location",
dataset_id="Fashionable Dataset",
project_id="Amazing Project",
)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset(self, mock_client):
_expected_result = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
expected_result = Dataset.from_api_repr(_expected_result)
mock_client.return_value.get_dataset.return_value = expected_result
result = self.hook.get_dataset(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.get_dataset.assert_called_once_with(
dataset_ref=DatasetReference(PROJECT_ID, DATASET_ID)
)
assert result == expected_result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_datasets_list(self, mock_client):
datasets = [
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
},
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_1_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_1_test"},
},
]
return_value = [DatasetListItem(d) for d in datasets]
mock_client.return_value.list_datasets.return_value = return_value
result = self.hook.get_datasets_list(project_id=PROJECT_ID)
mock_client.return_value.list_datasets.assert_called_once_with(
project=PROJECT_ID,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
)
for exp, res in zip(datasets, result):
assert res.full_dataset_id == exp["id"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_delete_dataset(self, mock_client):
delete_contents = True
self.hook.delete_dataset(
project_id=PROJECT_ID, dataset_id=DATASET_ID, delete_contents=delete_contents
)
mock_client.return_value.delete_dataset.assert_called_once_with(
dataset=DatasetReference(PROJECT_ID, DATASET_ID),
delete_contents=delete_contents,
retry=DEFAULT_RETRY,
not_found_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_patch_dataset(self, mock_get_service):
dataset_resource = {"access": [{"role": "WRITER", "groupByEmail": "cloud-logs@google.com"}]}
method = mock_get_service.return_value.datasets.return_value.patch
self.hook.patch_dataset(
dataset_id=DATASET_ID, project_id=PROJECT_ID, dataset_resource=dataset_resource
)
method.assert_called_once_with(projectId=PROJECT_ID, datasetId=DATASET_ID, body=dataset_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_dataset(self, mock_client, mock_dataset):
dataset_resource = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
method = mock_client.return_value.update_dataset
dataset = Dataset.from_api_repr(dataset_resource)
mock_dataset.from_api_repr.return_value = dataset
method.return_value = dataset
result = self.hook.update_dataset(
dataset_id=DATASET_ID,
project_id=PROJECT_ID,
dataset_resource=dataset_resource,
fields=["location"],
)
mock_dataset.from_api_repr.assert_called_once_with(dataset_resource)
method.assert_called_once_with(
dataset=dataset,
fields=["location"],
retry=DEFAULT_RETRY,
)
assert result == dataset
class TestTimePartitioningInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('timePartitioning') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_with_auto_detect(self, mock_insert):
destination_project_dataset_table = "autodetect.table"
self.hook.run_load(destination_project_dataset_table, [], [], autodetect=True)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['autodetect'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
schema_fields=[],
source_uris=[],
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'load': {
'autodetect': False,
'createDisposition': 'CREATE_IF_NEEDED',
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'sourceFormat': 'CSV',
'sourceUris': [],
'writeDisposition': 'WRITE_EMPTY',
'ignoreUnknownValues': False,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'skipLeadingRows': 0,
'fieldDelimiter': ',',
'quote': None,
'allowQuotedNewlines': False,
'encoding': 'UTF-8',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'query': {
'query': 'select 1',
'priority': 'INTERACTIVE',
'useLegacySql': True,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'schemaUpdateOptions': [],
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'allowLargeResults': False,
'flattenResults': None,
'writeDisposition': 'WRITE_EMPTY',
'createDisposition': 'CREATE_IF_NEEDED',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
def test_dollar_makes_partition(self):
tp_out = _cleanse_time_partitioning('test.teast$20170101', {})
expect = {'type': 'DAY'}
assert tp_out == expect
def test_extra_time_partitioning_options(self):
tp_out = _cleanse_time_partitioning(
'test.teast', {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
expect = {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
assert tp_out == expect
class TestClusteringInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['clustering'] == {'fields': ['field1', 'field2']}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_default(self, mock_insert):
self.hook.run_query(sql='select 1')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['clustering'] == {'fields': ['field1', 'field2']}
class TestBigQueryHookLegacySql(_BigQueryBaseTestClass):
"""Ensure `use_legacy_sql` param in `BigQueryHook` propagates properly."""
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_hook_uses_legacy_sql_by_default(self, mock_insert, _):
self.hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_legacy_sql_override_propagates_properly(
self, mock_insert, mock_get_service, mock_get_creds_and_proj_id
):
bq_hook = BigQueryHook(use_legacy_sql=False)
bq_hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is False
class TestBigQueryHookRunWithConfiguration(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.LoadJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_run_with_configuration_location(self, mock_client, mock_job):
running_job_id = 'job_vjdi28vskdui2onru23'
location = 'asia-east1'
mock_job._JOB_TYPE = "load"
conf = {"load": {}}
self.hook.running_job_id = running_job_id
self.hook.location = location
self.hook.run_with_configuration(conf)
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=location)
mock_job.from_api_repr.assert_called_once_with(
{
"configuration": conf,
"jobReference": {"jobId": mock.ANY, "projectId": PROJECT_ID, "location": location},
},
mock_client.return_value,
)
class TestBigQueryWithKMS(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_kms(self, mock_bq_client, mock_table):
schema_fields = [{"name": "id", "type": "STRING", "mode": "REQUIRED"}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
"tableReference": {"tableId": TABLE_ID, 'projectId': PROJECT_ID, 'datasetId': DATASET_ID},
"schema": {"fields": schema_fields},
"encryptionConfiguration": encryption_configuration,
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
# pylint: disable=too-many-locals
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_with_kms(self, mock_create):
external_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
source_uris = ['test_data.csv']
source_format = 'CSV'
autodetect = False
compression = 'NONE'
ignore_unknown_values = False
max_bad_records = 10
skip_leading_rows = 1
field_delimiter = ','
quote_character = None
allow_quoted_newlines = False
allow_jagged_rows = False
encoding = "UTF-8"
labels = {'label1': 'test1', 'label2': 'test2'}
schema_fields = [{'mode': 'REQUIRED', 'name': 'id', 'type': 'STRING', 'description': None}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_external_table(
external_project_dataset_table=external_project_dataset_table,
source_uris=source_uris,
source_format=source_format,
autodetect=autodetect,
compression=compression,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
skip_leading_rows=skip_leading_rows,
field_delimiter=field_delimiter,
quote_character=quote_character,
allow_jagged_rows=allow_jagged_rows,
encoding=encoding,
allow_quoted_newlines=allow_quoted_newlines,
labels=labels,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values,
'schema': {'fields': schema_fields},
'maxBadRecords': max_bad_records,
'csvOptions': {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'allowJaggedRows': allow_jagged_rows,
'encoding': encoding,
},
},
'tableReference': {
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
'tableId': TABLE_ID,
},
'labels': labels,
"encryptionConfiguration": encryption_configuration,
}
mock_create.assert_called_once_with(
table_resource=body,
project_id=PROJECT_ID,
location=None,
exists_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
body = {
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
self.hook.update_table(
table_resource=body,
fields=fields,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
)
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_query(sql='query', encryption_configuration=encryption_configuration)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['query']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_copy_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_copy(
source_project_dataset_tables='p.d.st',
destination_project_dataset_table='p.d.dt',
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['copy']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_load(
destination_project_dataset_table='p.d.dt',
source_uris=['abc.csv'],
autodetect=True,
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['load']['destinationEncryptionConfiguration'] is encryption_configuration
)
class TestBigQueryBaseCursorMethodsDeprecationWarning(unittest.TestCase):
@parameterized.expand(
[
("create_empty_table",),
("create_empty_dataset",),
("get_dataset_tables",),
("delete_dataset",),
("create_external_table",),
("patch_table",),
("insert_all",),
("update_dataset",),
("patch_dataset",),
("get_dataset_tables_list",),
("get_datasets_list",),
("get_dataset",),
("run_grant_dataset_view_access",),
("run_table_upsert",),
("run_table_delete",),
("get_tabledata",),
("get_schema",),
("poll_job_complete",),
("cancel_query",),
("run_with_configuration",),
("run_load",),
("run_copy",),
("run_extract",),
("run_query",),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook")
def test_deprecation_warning(self, func_name, mock_bq_hook):
args, kwargs = [1], {"param1": "val1"}
new_path = re.escape(f"`airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.{func_name}`")
message_pattern = fr"This method is deprecated\.\s+Please use {new_path}"
message_regex = re.compile(message_pattern, re.MULTILINE)
mocked_func = getattr(mock_bq_hook, func_name)
bq_cursor = BigQueryCursor(mock.MagicMock(), PROJECT_ID, mock_bq_hook)
func = getattr(bq_cursor, func_name)
with pytest.warns(DeprecationWarning, match=message_regex):
_ = func(*args, **kwargs)
mocked_func.assert_called_once_with(*args, **kwargs)
assert re.search(f".*{new_path}.*", func.__doc__)
class TestBigQueryWithLabelsAndDescription(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_labels(self, mock_insert):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['labels'] is labels
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_description(self, mock_insert):
description = "Test Description"
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['description'] is description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_labels(self, mock_create):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_create.call_args
self.assertDictEqual(kwargs['table_resource']['labels'], labels)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_description(self, mock_create):
description = "Test Description"
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_create.call_args
assert kwargs['table_resource']['description'] is description
| #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=not-callable
import re
import unittest
from unittest import mock
import pytest
from google.cloud.bigquery import DEFAULT_RETRY, DatasetReference, Table, TableReference
from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem
from google.cloud.exceptions import NotFound
from parameterized import parameterized
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.bigquery import (
BigQueryCursor,
BigQueryHook,
_api_resource_configs_duplication_check,
_cleanse_time_partitioning,
_split_tablename,
_validate_src_fmt_configs,
_validate_value,
)
PROJECT_ID = "bq-project"
CREDENTIALS = "bq-credentials"
DATASET_ID = "bq_dataset"
TABLE_ID = "bq_table"
PARTITION_ID = "20200101"
VIEW_ID = 'bq_view'
JOB_ID = "1234"
LOCATION = 'europe-north1'
TABLE_REFERENCE_REPR = {
'tableId': TABLE_ID,
'datasetId': DATASET_ID,
'projectId': PROJECT_ID,
}
TABLE_REFERENCE = TableReference.from_api_repr(TABLE_REFERENCE_REPR)
class _BigQueryBaseTestClass(unittest.TestCase):
def setUp(self) -> None:
class MockedBigQueryHook(BigQueryHook):
def _get_credentials_and_project_id(self):
return CREDENTIALS, PROJECT_ID
self.hook = MockedBigQueryHook()
class TestBigQueryHookMethods(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryConnection")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.build")
def test_bigquery_client_creation(self, mock_build, mock_authorize, mock_bigquery_connection):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'bigquery', 'v2', http=mock_authorize.return_value, cache_discovery=False
)
mock_bigquery_connection.assert_called_once_with(
service=mock_build.return_value,
project_id=PROJECT_ID,
hook=self.hook,
use_legacy_sql=self.hook.use_legacy_sql,
location=self.hook.location,
num_retries=self.hook.num_retries,
)
assert mock_bigquery_connection.return_value == result
@mock.patch("airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__")
def test_bigquery_bigquery_conn_id_deprecation_warning(
self,
mock_base_hook_init,
):
bigquery_conn_id = "bigquery conn id"
warning_message = (
"The bigquery_conn_id parameter has been deprecated. "
"You should pass the gcp_conn_id parameter."
)
with pytest.warns(DeprecationWarning) as warnings:
BigQueryHook(bigquery_conn_id=bigquery_conn_id)
mock_base_hook_init.assert_called_once_with(
delegate_to=None,
gcp_conn_id='bigquery conn id',
impersonation_chain=None,
)
assert warning_message == str(warnings[0].message)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_location_propagates_properly(self, run_with_config, _):
# TODO: this creates side effect
assert self.hook.location is None
self.hook.run_query(sql='select 1', location='US')
assert run_with_config.call_count == 1
assert self.hook.location == 'US'
def test_bigquery_insert_rows_not_implemented(self):
with pytest.raises(NotImplementedError):
self.hook.insert_rows(table="table", rows=[1, 2])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_true(self, mock_client):
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_false(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_true(self, mock_client):
mock_client.return_value.list_partitions.return_value = [PARTITION_ID]
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_table(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_partition(self, mock_client):
mock_client.return_value.list_partitions.return_value = []
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.read_gbq')
def test_get_pandas_df(self, mock_read_gbq):
self.hook.get_pandas_df('select 1')
mock_read_gbq.assert_called_once_with(
'select 1', credentials=CREDENTIALS, dialect='legacy', project_id=PROJECT_ID, verbose=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_options(self, mock_get_service):
with pytest.raises(
Exception,
match=(
r"\['THIS IS NOT VALID'\] contains invalid schema update options."
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]"
),
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_and_write_disposition(self, mock_get_service):
with pytest.raises(
Exception,
match="schema_update_options is only allowed if"
" write_disposition is 'WRITE_APPEND' or 'WRITE_TRUNCATE'.",
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY',
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete",
side_effect=[False, True],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_cancel_queries(self, mock_client, mock_poll_job_complete):
running_job_id = 3
self.hook.running_job_id = running_job_id
self.hook.cancel_query()
mock_poll_job_complete.has_calls(mock.call(running_job_id), mock.call(running_job_id))
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=None)
mock_client.return_value.cancel_job.assert_called_once_with(job_id=running_job_id)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_default(
self,
mock_insert,
_,
):
self.hook.run_query('query')
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect(self, mock_insert, _):
self.hook.run_query('query', use_legacy_sql=False)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_legacy_with_query_params(self, mock_insert, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
self.hook.run_query('query', use_legacy_sql=False, query_params=params)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_sql_dialect_legacy_with_query_params_fails(self, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
with pytest.raises(ValueError, match="Query parameters are not allowed when using legacy SQL"):
self.hook.run_query('query', use_legacy_sql=True, query_params=params)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_without_sql_fails(self, _):
with pytest.raises(
TypeError, match=r"`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`"
):
self.hook.run_query(sql=None)
@parameterized.expand(
[
(['ALLOW_FIELD_ADDITION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_schema_update_options(
self,
schema_update_options,
write_disposition,
mock_insert,
mock_get_service,
):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['schemaUpdateOptions'] == schema_update_options
assert kwargs['configuration']['query']['writeDisposition'] == write_disposition
@parameterized.expand(
[
(
['INCORRECT_OPTION'],
None,
r"\['INCORRECT_OPTION'\] contains invalid schema update options\. "
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'],
None,
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'\] contains invalid "
r"schema update options\. Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION'],
None,
r"schema_update_options is only allowed if write_disposition is "
r"'WRITE_APPEND' or 'WRITE_TRUNCATE'",
),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_schema_update_options_incorrect(
self,
schema_update_options,
write_disposition,
expected_regex,
mock_get_service,
):
with pytest.raises(ValueError, match=expected_regex):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
@parameterized.expand([(True,), (False,)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_api_resource_configs(
self,
bool_val,
mock_insert,
_,
):
self.hook.run_query('query', api_resource_configs={'query': {'useQueryCache': bool_val}})
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useQueryCache'] is bool_val
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_api_resource_configs_duplication_warning(self, mock_get_service):
with pytest.raises(
ValueError,
match=(
r"Values of useLegacySql param are duplicated\. api_resource_configs "
r"contained useLegacySql param in `query` config and useLegacySql was "
r"also provided with arg to run_query\(\) method\. Please remove duplicates\."
),
):
self.hook.run_query(
'query', use_legacy_sql=True, api_resource_configs={'query': {'useLegacySql': False}}
)
def test_validate_value(self):
with pytest.raises(
TypeError, match="case_1 argument must have a type <class 'dict'> not <class 'str'>"
):
_validate_value("case_1", "a", dict)
assert _validate_value("case_2", 0, int) is None
def test_duplication_check(self):
with pytest.raises(
ValueError,
match=r"Values of key_one param are duplicated. api_resource_configs contained key_one param in"
r" `query` config and key_one was also provided with arg to run_query\(\) method. "
r"Please remove duplicates.",
):
key_one = True
_api_resource_configs_duplication_check("key_one", key_one, {"key_one": False})
assert _api_resource_configs_duplication_check("key_one", key_one, {"key_one": True}) is None
def test_validate_src_fmt_configs(self):
source_format = "test_format"
valid_configs = ["test_config_known", "compatibility_val"]
backward_compatibility_configs = {"compatibility_val": "val"}
with pytest.raises(
ValueError, match="test_config_unknown is not a valid src_fmt_configs for type test_format."
):
# This config should raise a value error.
src_fmt_configs = {"test_config_unknown": "val"}
_validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
src_fmt_configs = {"test_config_known": "val"}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
assert (
"test_config_known" in src_fmt_configs
), "src_fmt_configs should contain al known src_fmt_configs"
assert (
"compatibility_val" in src_fmt_configs
), "_validate_src_fmt_configs should add backward_compatibility config"
@parameterized.expand([("AVRO",), ("PARQUET",), ("NEWLINE_DELIMITED_JSON",), ("DATASTORE_BACKUP",)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_non_csv_as_src_fmt(self, fmt, _):
try:
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
source_uris=[],
source_format=fmt,
autodetect=True,
)
except ValueError:
self.fail("run_load() raised ValueError unexpectedly!")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_extract(self, mock_insert):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
destination_cloud_storage_uris = ["gs://bucket/file.csv"]
expected_configuration = {
"extract": {
"sourceTable": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"compression": "NONE",
"destinationUris": destination_cloud_storage_uris,
"destinationFormat": "CSV",
"fieldDelimiter": ",",
"printHeader": True,
}
}
self.hook.run_extract(
source_project_dataset_table=source_project_dataset_table,
destination_cloud_storage_uris=destination_cloud_storage_uris,
)
mock_insert.assert_called_once_with(configuration=expected_configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.SchemaField")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows(self, mock_client, mock_schema, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
selected_fields=["field_1", "field_2"],
page_token="page123",
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_schema.has_calls([mock.call(x, "") for x in ["field_1", "field_2"]])
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
selected_fields=mock.ANY,
page_token='page123',
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows_with_empty_selected_fields(self, mock_client, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
page_token="page123",
selected_fields=[],
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
page_token='page123',
selected_fields=None,
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_run_table_delete(self, mock_client, mock_table):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
self.hook.run_table_delete(source_project_dataset_table, ignore_if_missing=False)
mock_table.from_string.assert_called_once_with(source_project_dataset_table)
mock_client.return_value.delete_table.assert_called_once_with(
table=mock_table.from_string.return_value, not_found_ok=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_create_new_table(self, mock_get, mock_create):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = []
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_create.assert_called_once_with(table_resource=table_resource, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_already_exists(self, mock_get, mock_update):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = [{"tableId": TABLE_ID}]
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_update.assert_called_once_with(table_resource=table_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_granting(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = []
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert view_access in dataset.access_entries
mock_update.assert_called_once_with(
fields=["access"],
dataset_resource=dataset.to_api_repr(),
project_id=PROJECT_ID,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_already_granted(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = [view_access]
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert len(mock_update.calls) == 0
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset_tables_list(self, mock_client):
table_list = [
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-2"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-2"},
]
table_list_response = [Table.from_api_repr({"tableReference": t}) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables_list(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference, max_results=None
)
assert table_list == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_poll_job_complete(self, mock_client):
self.hook.poll_job_complete(job_id=JOB_ID, location=LOCATION, project_id=PROJECT_ID)
mock_client.assert_called_once_with(location=LOCATION, project_id=PROJECT_ID)
mock_client.return_value.get_job.assert_called_once_with(job_id=JOB_ID)
mock_client.return_value.get_job.return_value.done.assert_called_once_with(retry=DEFAULT_RETRY)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("logging.Logger.info")
def test_cancel_query_jobs_to_cancel(
self,
mock_logger_info,
poll_job_complete,
):
poll_job_complete.return_value = True
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
poll_job_complete.assert_called_once_with(job_id=JOB_ID)
mock_logger_info.has_call(mock.call("No running BigQuery jobs to cancel."))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_timeout(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 13
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(
mock.call(
f"Stopping polling due to timeout. Job with id {JOB_ID} "
"has not completed cancel and may or may not finish."
)
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_completed(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 12 + [True]
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(mock.call(f"Job successfully canceled: {PROJECT_ID}, {PROJECT_ID}"))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_schema(self, mock_client):
table = {
"tableReference": TABLE_REFERENCE_REPR,
"schema": {
"fields": [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
]
},
}
mock_client.return_value.get_table.return_value = Table.from_api_repr(table)
result = self.hook.get_schema(dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
assert "fields" in result
assert len(result["fields"]) == 2
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_with_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee', 'policyTags': {'names': ['sensitive']}},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{
'name': 'emp_name',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Name of employee',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
'policyTags': {'names': ['sensitive']},
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=True,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_without_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'salary', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{'name': 'field_1', 'type': 'STRING', 'mode': 'REQUIRED'},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee'},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED', 'description': 'Name of employee'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=False,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_source_format(self, mock_get_service):
with pytest.raises(
Exception,
match=r"JSON is not a valid source format. Please use one of the following types: \['CSV', "
r"'NEWLINE_DELIMITED_JSON', 'AVRO', 'GOOGLE_SHEETS', 'DATASTORE_BACKUP', 'PARQUET'\]",
):
self.hook.run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_succeed(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
self.hook.insert_all(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.return_value.insert_rows.assert_called_once_with(
table=mock_client.return_value.get_table.return_value,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_fail(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
mock_client.return_value.insert_rows.return_value = ["some", "errors"]
with pytest.raises(AirflowException, match="insert error"):
self.hook.insert_all(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, rows=rows, fail_on_error=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
labels={'label1': 'test1', 'label2': 'test2'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['labels'] == {'label1': 'test1', 'label2': 'test2'}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.QueryJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_insert_job(self, mock_client, mock_query_job):
job_conf = {
"query": {
"query": "SELECT * FROM test",
"useLegacySql": "False",
}
}
mock_query_job._JOB_TYPE = "query"
self.hook.insert_job(
configuration=job_conf,
job_id=JOB_ID,
project_id=PROJECT_ID,
location=LOCATION,
)
mock_client.assert_called_once_with(
project_id=PROJECT_ID,
location=LOCATION,
)
mock_query_job.from_api_repr.assert_called_once_with(
{
'configuration': job_conf,
'jobReference': {'jobId': JOB_ID, 'projectId': PROJECT_ID, 'location': LOCATION},
},
mock_client.return_value,
)
mock_query_job.from_api_repr.return_value.result.assert_called_once_with()
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with pytest.raises(Exception, match="INTERNAL: No default project is specified"):
_split_tablename("dataset.table", None)
@parameterized.expand(
[
("project", "dataset", "table", "dataset.table"),
("alternative", "dataset", "table", "alternative:dataset.table"),
("alternative", "dataset", "table", "alternative.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt:dataset.table"),
]
)
def test_split_tablename(self, project_expected, dataset_expected, table_expected, table_input):
default_project_id = "project"
project, dataset, table = _split_tablename(table_input, default_project_id)
assert project_expected == project
assert dataset_expected == dataset
assert table_expected == table
@parameterized.expand(
[
("alt1:alt2:alt3:dataset.table", None, "Use either : or . to specify project got {}"),
(
"alt1.alt.dataset.table",
None,
r"Expect format of \(<project\.\|<project\:\)<dataset>\.<table>, got {}",
),
(
"alt1:alt2:alt.dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1:alt2:alt:dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1.alt.dataset.table",
"var_x",
r"Format exception for var_x: Expect format of "
r"\(<project\.\|<project:\)<dataset>.<table>, got {}",
),
]
)
def test_invalid_syntax(self, table_input, var_name, exception_message):
default_project_id = "project"
with pytest.raises(Exception, match=exception_message.format(table_input)):
_split_tablename(table_input, default_project_id, var_name)
class TestTableOperations(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_view(self, mock_bq_client, mock_table):
view = {
'query': 'SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*`',
"useLegacySql": False,
}
self.hook.create_empty_table(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, view=view, retry=DEFAULT_RETRY
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'view': view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_patch_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
self.hook.patch_table(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
description=description_patched,
expiration_time=expiration_time_patched,
friendly_name=friendly_name_patched,
labels=labels_patched,
schema=schema_patched,
time_partitioning=time_partitioning_patched,
require_partition_filter=require_partition_filter_patched,
view=view_patched,
)
body = {
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
body["tableReference"] = TABLE_REFERENCE_REPR
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_succeed(self, mock_bq_client, mock_table):
self.hook.create_empty_table(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
}
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_extras_succeed(self, mock_bq_client, mock_table):
schema_fields = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'created', 'type': 'DATE', 'mode': 'REQUIRED'},
]
time_partitioning = {"field": "created", "type": "DAY"}
cluster_fields = ['name']
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
time_partitioning=time_partitioning,
cluster_fields=cluster_fields,
)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
},
'schema': {'fields': schema_fields},
'timePartitioning': time_partitioning,
'clustering': {'fields': cluster_fields},
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_tables_list(self, mock_client):
table_list = [
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table1",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table1",
},
"type": "TABLE",
"creationTime": "1565781859261",
},
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table2",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table2",
},
"type": "TABLE",
"creationTime": "1565782713480",
},
]
table_list_response = [Table.from_api_repr(t) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference,
max_results=None,
retry=DEFAULT_RETRY,
)
for res, exp in zip(result, table_list):
assert res["tableId"] == exp["tableReference"]["tableId"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_materialized_view(self, mock_bq_client, mock_table):
query = """
SELECT product, SUM(amount)
FROM `test-project-id.test_dataset_id.test_table_prefix*`
GROUP BY product
"""
materialized_view = {
'query': query,
'enableRefresh': True,
'refreshIntervalMs': 2000000,
}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
materialized_view=materialized_view,
retry=DEFAULT_RETRY,
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'materializedView': materialized_view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
class TestBigQueryCursor(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_with_parameters(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
conf = {
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
}
mock_insert.assert_called_once_with(configuration=conf, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_many(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.executemany("SELECT %(foo)s", [{"foo": "bar"}, {"foo": "baz"}])
assert mock_insert.call_count == 2
assert mock_insert.has_calls(
mock.call(
configuration={
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
mock.call(
configuration={
'query': {
'query': "SELECT 'baz'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_description(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
with pytest.raises(NotImplementedError):
bq_cursor.description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_close(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.close() # pylint: disable=assignment-from-no-return
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_rowcount(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.rowcount
assert -1 == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.next")
def test_fetchone(self, mock_next, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchone()
mock_next.call_count == 1
assert mock_next.return_value == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone", side_effect=[1, 2, 3, None]
)
def test_fetchall(self, mock_fetchone, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchall()
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone")
def test_fetchmany(self, mock_fetchone, mock_get_service):
side_effect_values = [1, 2, 3, None]
bq_cursor = self.hook.get_cursor()
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany()
assert [1] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(2)
assert [1, 2] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(5)
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_no_jobid(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = None
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_buffer(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.buffer = [1, 2]
result = bq_cursor.next()
assert 1 == result
result = bq_cursor.next()
assert 2 == result
bq_cursor.all_pages_loaded = True
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next(self, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {
"rows": [
{"f": [{"v": "one"}, {"v": 1}]},
{"f": [{"v": "two"}, {"v": 2}]},
],
"pageToken": None,
"schema": {
"fields": [
{"name": "field_1", "type": "STRING"},
{"name": "field_2", "type": "INTEGER"},
]
},
}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.location = LOCATION
result = bq_cursor.next()
assert ['one', 1] == result
result = bq_cursor.next()
assert ['two', 2] == result
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=LOCATION, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_next_no_rows(self, mock_flush_results, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
result = bq_cursor.next()
assert result is None
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=None, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
assert mock_flush_results.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_flush_cursor_in_execute(self, _, mock_insert, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
assert mock_insert.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_flush_cursor(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.page_token = '456dcea9-fcbf-4f02-b570-83f5297c685e'
bq_cursor.job_id = 'c0a79ae4-0e72-4593-a0d0-7dbbf726f193'
bq_cursor.all_pages_loaded = True
bq_cursor.buffer = [('a', 100, 200), ('b', 200, 300)]
bq_cursor.flush_results()
assert bq_cursor.page_token is None
assert bq_cursor.job_id is None
assert not bq_cursor.all_pages_loaded
assert bq_cursor.buffer == []
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_arraysize(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
assert bq_cursor.buffersize is None
assert bq_cursor.arraysize == 1
bq_cursor.set_arraysize(10)
assert bq_cursor.buffersize == 10
assert bq_cursor.arraysize == 10
class TestDatasetsOperations(_BigQueryBaseTestClass):
def test_create_empty_dataset_no_dataset_id_err(self):
with pytest.raises(ValueError, match=r"Please specify `datasetId`"):
self.hook.create_empty_dataset(dataset_id=None, project_id=None)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_params(self, mock_client, mock_dataset):
self.hook.create_empty_dataset(project_id=PROJECT_ID, dataset_id=DATASET_ID, location=LOCATION)
expected_body = {
"location": LOCATION,
"datasetReference": {"datasetId": DATASET_ID, "projectId": PROJECT_ID},
}
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(expected_body)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(dataset_reference=dataset)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_use_values_from_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(
dataset_reference=dataset,
location="Unknown location",
dataset_id="Fashionable Dataset",
project_id="Amazing Project",
)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset(self, mock_client):
_expected_result = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
expected_result = Dataset.from_api_repr(_expected_result)
mock_client.return_value.get_dataset.return_value = expected_result
result = self.hook.get_dataset(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.get_dataset.assert_called_once_with(
dataset_ref=DatasetReference(PROJECT_ID, DATASET_ID)
)
assert result == expected_result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_datasets_list(self, mock_client):
datasets = [
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
},
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_1_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_1_test"},
},
]
return_value = [DatasetListItem(d) for d in datasets]
mock_client.return_value.list_datasets.return_value = return_value
result = self.hook.get_datasets_list(project_id=PROJECT_ID)
mock_client.return_value.list_datasets.assert_called_once_with(
project=PROJECT_ID,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
)
for exp, res in zip(datasets, result):
assert res.full_dataset_id == exp["id"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_delete_dataset(self, mock_client):
delete_contents = True
self.hook.delete_dataset(
project_id=PROJECT_ID, dataset_id=DATASET_ID, delete_contents=delete_contents
)
mock_client.return_value.delete_dataset.assert_called_once_with(
dataset=DatasetReference(PROJECT_ID, DATASET_ID),
delete_contents=delete_contents,
retry=DEFAULT_RETRY,
not_found_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_patch_dataset(self, mock_get_service):
dataset_resource = {"access": [{"role": "WRITER", "groupByEmail": "cloud-logs@google.com"}]}
method = mock_get_service.return_value.datasets.return_value.patch
self.hook.patch_dataset(
dataset_id=DATASET_ID, project_id=PROJECT_ID, dataset_resource=dataset_resource
)
method.assert_called_once_with(projectId=PROJECT_ID, datasetId=DATASET_ID, body=dataset_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_dataset(self, mock_client, mock_dataset):
dataset_resource = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
method = mock_client.return_value.update_dataset
dataset = Dataset.from_api_repr(dataset_resource)
mock_dataset.from_api_repr.return_value = dataset
method.return_value = dataset
result = self.hook.update_dataset(
dataset_id=DATASET_ID,
project_id=PROJECT_ID,
dataset_resource=dataset_resource,
fields=["location"],
)
mock_dataset.from_api_repr.assert_called_once_with(dataset_resource)
method.assert_called_once_with(
dataset=dataset,
fields=["location"],
retry=DEFAULT_RETRY,
)
assert result == dataset
class TestTimePartitioningInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('timePartitioning') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_with_auto_detect(self, mock_insert):
destination_project_dataset_table = "autodetect.table"
self.hook.run_load(destination_project_dataset_table, [], [], autodetect=True)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['autodetect'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
schema_fields=[],
source_uris=[],
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'load': {
'autodetect': False,
'createDisposition': 'CREATE_IF_NEEDED',
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'sourceFormat': 'CSV',
'sourceUris': [],
'writeDisposition': 'WRITE_EMPTY',
'ignoreUnknownValues': False,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'skipLeadingRows': 0,
'fieldDelimiter': ',',
'quote': None,
'allowQuotedNewlines': False,
'encoding': 'UTF-8',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'query': {
'query': 'select 1',
'priority': 'INTERACTIVE',
'useLegacySql': True,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'schemaUpdateOptions': [],
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'allowLargeResults': False,
'flattenResults': None,
'writeDisposition': 'WRITE_EMPTY',
'createDisposition': 'CREATE_IF_NEEDED',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
def test_dollar_makes_partition(self):
tp_out = _cleanse_time_partitioning('test.teast$20170101', {})
expect = {'type': 'DAY'}
assert tp_out == expect
def test_extra_time_partitioning_options(self):
tp_out = _cleanse_time_partitioning(
'test.teast', {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
expect = {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
assert tp_out == expect
class TestClusteringInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['clustering'] == {'fields': ['field1', 'field2']}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_default(self, mock_insert):
self.hook.run_query(sql='select 1')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['clustering'] == {'fields': ['field1', 'field2']}
class TestBigQueryHookLegacySql(_BigQueryBaseTestClass):
"""Ensure `use_legacy_sql` param in `BigQueryHook` propagates properly."""
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_hook_uses_legacy_sql_by_default(self, mock_insert, _):
self.hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_legacy_sql_override_propagates_properly(
self, mock_insert, mock_get_service, mock_get_creds_and_proj_id
):
bq_hook = BigQueryHook(use_legacy_sql=False)
bq_hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is False
class TestBigQueryHookRunWithConfiguration(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.LoadJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_run_with_configuration_location(self, mock_client, mock_job):
running_job_id = 'job_vjdi28vskdui2onru23'
location = 'asia-east1'
mock_job._JOB_TYPE = "load"
conf = {"load": {}}
self.hook.running_job_id = running_job_id
self.hook.location = location
self.hook.run_with_configuration(conf)
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=location)
mock_job.from_api_repr.assert_called_once_with(
{
"configuration": conf,
"jobReference": {"jobId": mock.ANY, "projectId": PROJECT_ID, "location": location},
},
mock_client.return_value,
)
class TestBigQueryWithKMS(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_kms(self, mock_bq_client, mock_table):
schema_fields = [{"name": "id", "type": "STRING", "mode": "REQUIRED"}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
"tableReference": {"tableId": TABLE_ID, 'projectId': PROJECT_ID, 'datasetId': DATASET_ID},
"schema": {"fields": schema_fields},
"encryptionConfiguration": encryption_configuration,
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
# pylint: disable=too-many-locals
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_with_kms(self, mock_create):
external_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
source_uris = ['test_data.csv']
source_format = 'CSV'
autodetect = False
compression = 'NONE'
ignore_unknown_values = False
max_bad_records = 10
skip_leading_rows = 1
field_delimiter = ','
quote_character = None
allow_quoted_newlines = False
allow_jagged_rows = False
encoding = "UTF-8"
labels = {'label1': 'test1', 'label2': 'test2'}
schema_fields = [{'mode': 'REQUIRED', 'name': 'id', 'type': 'STRING', 'description': None}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_external_table(
external_project_dataset_table=external_project_dataset_table,
source_uris=source_uris,
source_format=source_format,
autodetect=autodetect,
compression=compression,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
skip_leading_rows=skip_leading_rows,
field_delimiter=field_delimiter,
quote_character=quote_character,
allow_jagged_rows=allow_jagged_rows,
encoding=encoding,
allow_quoted_newlines=allow_quoted_newlines,
labels=labels,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values,
'schema': {'fields': schema_fields},
'maxBadRecords': max_bad_records,
'csvOptions': {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'allowJaggedRows': allow_jagged_rows,
'encoding': encoding,
},
},
'tableReference': {
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
'tableId': TABLE_ID,
},
'labels': labels,
"encryptionConfiguration": encryption_configuration,
}
mock_create.assert_called_once_with(
table_resource=body,
project_id=PROJECT_ID,
location=None,
exists_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
body = {
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
self.hook.update_table(
table_resource=body,
fields=fields,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
)
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_query(sql='query', encryption_configuration=encryption_configuration)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['query']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_copy_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_copy(
source_project_dataset_tables='p.d.st',
destination_project_dataset_table='p.d.dt',
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['copy']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_load(
destination_project_dataset_table='p.d.dt',
source_uris=['abc.csv'],
autodetect=True,
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['load']['destinationEncryptionConfiguration'] is encryption_configuration
)
class TestBigQueryBaseCursorMethodsDeprecationWarning(unittest.TestCase):
@parameterized.expand(
[
("create_empty_table",),
("create_empty_dataset",),
("get_dataset_tables",),
("delete_dataset",),
("create_external_table",),
("patch_table",),
("insert_all",),
("update_dataset",),
("patch_dataset",),
("get_dataset_tables_list",),
("get_datasets_list",),
("get_dataset",),
("run_grant_dataset_view_access",),
("run_table_upsert",),
("run_table_delete",),
("get_tabledata",),
("get_schema",),
("poll_job_complete",),
("cancel_query",),
("run_with_configuration",),
("run_load",),
("run_copy",),
("run_extract",),
("run_query",),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook")
def test_deprecation_warning(self, func_name, mock_bq_hook):
args, kwargs = [1], {"param1": "val1"}
new_path = re.escape(f"`airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.{func_name}`")
message_pattern = fr"This method is deprecated\.\s+Please use {new_path}"
message_regex = re.compile(message_pattern, re.MULTILINE)
mocked_func = getattr(mock_bq_hook, func_name)
bq_cursor = BigQueryCursor(mock.MagicMock(), PROJECT_ID, mock_bq_hook)
func = getattr(bq_cursor, func_name)
with pytest.warns(DeprecationWarning, match=message_regex):
_ = func(*args, **kwargs)
mocked_func.assert_called_once_with(*args, **kwargs)
assert re.search(f".*{new_path}.*", func.__doc__)
class TestBigQueryWithLabelsAndDescription(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_labels(self, mock_insert):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['labels'] is labels
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_description(self, mock_insert):
description = "Test Description"
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['description'] is description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_labels(self, mock_create):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_create.call_args
self.assertDictEqual(kwargs['table_resource']['labels'], labels)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_description(self, mock_create):
description = "Test Description"
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_create.call_args
assert kwargs['table_resource']['description'] is description
|
from typing import Dict
from flask_babel import _
from anyway.backend_constants import InjurySeverity
from anyway.infographics_dictionaries import segment_dictionary
from anyway.models import InvolvedMarkerView
from anyway.request_params import RequestParams
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
from anyway.widgets.widget import register
from anyway.widgets.widget_utils import (
get_accidents_stats,
gen_entity_labels,
get_injured_filters,
format_2_level_items,
sort_and_fill_gaps_for_stacked_bar,
)
@register
class InjuredCountByAccidentYearWidget(SubUrbanWidget):
name: str = "injured_count_by_accident_year"
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 9
self.information = (
"Fatal, severe and light injured count in the specified years, split by injury severity"
)
def generate_items(self) -> None:
res1 = get_accidents_stats(
table_obj=InvolvedMarkerView,
filters=get_injured_filters(self.request_params.location_info),
group_by=("accident_year", "injury_severity"),
count="injury_severity",
start_time=self.request_params.start_time,
end_time=self.request_params.end_time,
)
res2 = sort_and_fill_gaps_for_stacked_bar(
res1,
range(self.request_params.start_time.year, self.request_params.end_time.year + 1),
{
InjurySeverity.KILLED.value: 0,
InjurySeverity.SEVERE_INJURED.value: 0,
InjurySeverity.LIGHT_INJURED.value: 0,
},
)
self.items = format_2_level_items(res2, None, InjurySeverity)
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": _("Number of injured in accidents, per year, split by severity")
+ f" - {segment_dictionary[request_params.location_info["road_segment_name"]]}",
"labels_map": gen_entity_labels(InjurySeverity),
}
return items
_("Fatal, severe and light injured count in the specified years, split by injury severity")
| from typing import Dict
from flask_babel import _
from anyway.backend_constants import InjurySeverity
from anyway.infographics_dictionaries import segment_dictionary
from anyway.models import InvolvedMarkerView
from anyway.request_params import RequestParams
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
from anyway.widgets.widget import register
from anyway.widgets.widget_utils import (
get_accidents_stats,
gen_entity_labels,
get_injured_filters,
format_2_level_items,
sort_and_fill_gaps_for_stacked_bar,
)
@register
class InjuredCountByAccidentYearWidget(SubUrbanWidget):
name: str = "injured_count_by_accident_year"
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.rank = 9
self.information = (
"Fatal, severe and light injured count in the specified years, split by injury severity"
)
def generate_items(self) -> None:
res1 = get_accidents_stats(
table_obj=InvolvedMarkerView,
filters=get_injured_filters(self.request_params.location_info),
group_by=("accident_year", "injury_severity"),
count="injury_severity",
start_time=self.request_params.start_time,
end_time=self.request_params.end_time,
)
res2 = sort_and_fill_gaps_for_stacked_bar(
res1,
range(self.request_params.start_time.year, self.request_params.end_time.year + 1),
{
InjurySeverity.KILLED.value: 0,
InjurySeverity.SEVERE_INJURED.value: 0,
InjurySeverity.LIGHT_INJURED.value: 0,
},
)
self.items = format_2_level_items(res2, None, InjurySeverity)
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
items["data"]["text"] = {
"title": _("Number of injured in accidents, per year, split by severity")
+ f" - {segment_dictionary[request_params.location_info['road_segment_name']]}",
"labels_map": gen_entity_labels(InjurySeverity),
}
return items
_("Fatal, severe and light injured count in the specified years, split by injury severity")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 10:38:14 2021
@author: kunal001
"""
import logging
logger = logging.getLogger(__name__)
class CreateDatabase:
def __init__(self,hier_graph,const_parse):
self.hier_graph_dict = {}
self.const_parse = const_parse
self.G = hier_graph
def read_inputs(self,name:str):
"""
read circuit graphs
"""
top_ports = []
ports_weight = {}
for node, attr in self.G.nodes(data=True):
if 'source' in attr['inst_type']:
for source_nets in self.G.neighbors(node):
top_ports.append(source_nets)
elif 'net_type' in attr:
if attr['net_type'] == "external":
top_ports.append(node)
ports_weight[node]=[]
for nbr in list(self.G.neighbors(node)):
ports_weight[node].append(self.G.get_edge_data(node, nbr)['weight'])
logger.debug("Merging nested graph hierarchies to dictionary: ")
const = self.const_parse.read_user_const(name)
self.hier_graph_dict[name] = {
"graph": self.G,
"ports": top_ports,
"ports_weight": ports_weight,
"const": const
}
self._traverse_hier_in_graph(self.G)
logger.debug(f"read graph {self.hier_graph_dict}")
return self.hier_graph_dict
def _traverse_hier_in_graph(self,G):
"""
Recusively reads all hierachies in the graph and convert them to dictionary
"""
for node, attr in G.nodes(data=True):
if "sub_graph" in attr and attr["sub_graph"]:
logger.debug(f'Traversing sub graph: {node} {attr['inst_type']} {attr['ports']}')
sub_ports = []
ports_weight = {}
for sub_node, sub_attr in attr["sub_graph"].nodes(data=True):
if 'net_type' in sub_attr:
if sub_attr['net_type'] == "external":
sub_ports.append(sub_node)
ports_weight[sub_node] = []
for nbr in list(attr["sub_graph"].neighbors(sub_node)):
ports_weight[sub_node].append(attr["sub_graph"].get_edge_data(sub_node, nbr)['weight'])
logger.debug(f'external ports: {sub_ports}, {attr['connection']}, {ports_weight}')
const = self.const_parse.read_user_const(attr["inst_type"])
self.hier_graph_dict[attr["inst_type"]] = {
"graph": attr["sub_graph"],
"ports": sub_ports,
"const": const,
"ports_weight": ports_weight
}
self._traverse_hier_in_graph(attr["sub_graph"])
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 10:38:14 2021
@author: kunal001
"""
import logging
logger = logging.getLogger(__name__)
class CreateDatabase:
def __init__(self,hier_graph,const_parse):
self.hier_graph_dict = {}
self.const_parse = const_parse
self.G = hier_graph
def read_inputs(self,name:str):
"""
read circuit graphs
"""
top_ports = []
ports_weight = {}
for node, attr in self.G.nodes(data=True):
if 'source' in attr['inst_type']:
for source_nets in self.G.neighbors(node):
top_ports.append(source_nets)
elif 'net_type' in attr:
if attr['net_type'] == "external":
top_ports.append(node)
ports_weight[node]=[]
for nbr in list(self.G.neighbors(node)):
ports_weight[node].append(self.G.get_edge_data(node, nbr)['weight'])
logger.debug("Merging nested graph hierarchies to dictionary: ")
const = self.const_parse.read_user_const(name)
self.hier_graph_dict[name] = {
"graph": self.G,
"ports": top_ports,
"ports_weight": ports_weight,
"const": const
}
self._traverse_hier_in_graph(self.G)
logger.debug(f"read graph {self.hier_graph_dict}")
return self.hier_graph_dict
def _traverse_hier_in_graph(self,G):
"""
Recusively reads all hierachies in the graph and convert them to dictionary
"""
for node, attr in G.nodes(data=True):
if "sub_graph" in attr and attr["sub_graph"]:
logger.debug(f'Traversing sub graph: {node} {attr["inst_type"]} {attr["ports"]}')
sub_ports = []
ports_weight = {}
for sub_node, sub_attr in attr["sub_graph"].nodes(data=True):
if 'net_type' in sub_attr:
if sub_attr['net_type'] == "external":
sub_ports.append(sub_node)
ports_weight[sub_node] = []
for nbr in list(attr["sub_graph"].neighbors(sub_node)):
ports_weight[sub_node].append(attr["sub_graph"].get_edge_data(sub_node, nbr)['weight'])
logger.debug(f'external ports: {sub_ports}, {attr["connection"]}, {ports_weight}')
const = self.const_parse.read_user_const(attr["inst_type"])
self.hier_graph_dict[attr["inst_type"]] = {
"graph": attr["sub_graph"],
"ports": sub_ports,
"const": const,
"ports_weight": ports_weight
}
self._traverse_hier_in_graph(attr["sub_graph"])
|
# <editor-fold desc="Basic Imports">
import os
import os.path as p
import requests
from time import time
from argparse import ArgumentParser
import sys
sys.path.append(p.join(p.dirname(__file__), '..'))
sys.path.append(p.join(p.dirname(__file__), '../..'))
# </editor-fold>
# <editor-fold desc="Parse Command Line Args">
prog_file_path = p.join(p.dirname(__file__), 'progress.txt')
relative_base_path = '../../base_indexes/USE_lite_base_IVF16K.index'
base_index_path = p.abspath(p.join(p.dirname(__file__), relative_base_path))
arp = ArgumentParser(description='Vectorize Sentences for Searchable Index.')
arp.add_argument('input_dir', help='Path to raw news dir.')
arp.add_argument('output_dir', help='Path to saved index dir.')
arp.add_argument('-p', '--progress_file', default=prog_file_path,
help='For keeping track of news that has been preprocessed. '
'Default: dig-text-similarity-search/progress.txt')
arp.add_argument('-b', '--base_index_path', default=base_index_path,
help='Path to pre-trained empty faiss index. '
'Default: dig-text-similarity-search/base_indexes/*.index')
arp.add_argument('-l', '--large', action='store_true',
help='Toggle large Universal Sentence Encoder (Transformer NN).')
arp.add_argument('-m', '--m_per_batch', type=int, default=512*128,
help='Sentences per batch.')
arp.add_argument('-n', '--n_per_minibatch', type=int, default=64,
help='Sentences per mini-batch.')
arp.add_argument('-v', '--verbose', action='store_true',
help='Shows progress of batch vectorization.')
arp.add_argument('-t', '--num_threads', default='2',
help='Set CPU thread budget for numpy.')
arp.add_argument('-d', '--no_delete', action='store_false', default=True,
help='Keeps faiss indexes for each batch after merging on-disk.')
arp.add_argument('-a', '--add_shard', action='store_true',
help='Adds shard to running similarity server.')
arp.add_argument('-u', '--url', default='http://localhost:5954/faiss',
help='Port handling similarity server.')
arp.add_argument('-T', '--TF_logging', action='store_false', default=True,
help='Increase verbosity of TensorFlow.')
opts = arp.parse_args()
# </editor-fold>
if opts.num_threads:
print(f'\nRestricting numpy to {opts.num_threads} thread(s)\n')
os.environ['OPENBLAS_NUM_THREADS'] = opts.num_threads
os.environ['NUMEXPR_NUM_THREADS'] = opts.num_threads
os.environ['MKL_NUM_THREADS'] = opts.num_threads
os.environ['OMP_NUM_THREADS'] = opts.num_threads
from dt_sim.data_reader.jl_io_funcs import check_all_docs, get_all_docs
from dt_sim.data_reader.misc_io_funcs import check_unique, clear_dir
from dt_sim.vectorizer.sentence_vectorizer import SentenceVectorizer
from dt_sim.indexer.index_builder import OnDiskIVFBuilder
from dt_sim.processor.corpus_processor import CorpusProcessor
# Suppress TF logging
if opts.TF_logging:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Init
sv = SentenceVectorizer(large=opts.large)
idx_bdr = OnDiskIVFBuilder(path_to_base_index=opts.base_index_path)
cp = CorpusProcessor(vectorizer=sv, index_builder=idx_bdr,
progress_file=opts.progress_file)
# Track progress
prepped_news = cp.track_preprocessing(cp.progress_file, verbose=opts.verbose)
raw_news = cp.get_news_paths(opts.input_dir, verbose=opts.verbose)
candidates = cp.candidate_files(prepped_news, raw_news, verbose=opts.verbose)
file_to_process = candidates[:1] # Preprocesses one news.jl per call
def main(raw_jl, output_dir: str = opts.output_dir,
m_per_batch: int = opts.m_per_batch, n_per_minibatch: int = opts.n_per_minibatch,
no_delete: bool = opts.no_delete, verbose: bool = opts.verbose,
add_shard: bool = opts.add_shard, url: str = opts.url):
subidx_dir, shard_date = cp.init_paths(raw_jl)
if verbose:
print(f'Will process: {raw_jl}\n')
# Check File Content
if verbose:
print(f'\nReading file: {raw_jl}')
jl_stats = check_all_docs(raw_jl, batch_size=m_per_batch)
(doc_count, line_count, junk, n_batches) = jl_stats
if verbose:
print(f'* Found {doc_count} good documents with {line_count} total sentences\n'
f'* Will skip {junk} junk documents\n'
f'* Processing {n_batches} batches\n')
# Preprocess
t_start = time()
doc_batch_gen = get_all_docs(raw_jl, batch_size=m_per_batch)
for i, (batched_sents, batched_ids) in enumerate(doc_batch_gen):
t_0 = time()
if verbose:
print(f' Starting doc batch: {i+1:3d}')
subidx = str(raw_jl.split('/')[-1]).replace('.jl', f'_{i:03d}_sub.index')
subidx_path = p.join(subidx_dir, subidx)
if p.exists(subidx_path):
print(f' File exists: {subidx_path} \n Skipping... ')
cp.index_builder.include_subidx_path(subidx_path)
else:
# Vectorize
emb_batch, id_batch = cp.batch_vectorize(
text_batch=batched_sents, id_batch=batched_ids,
n_minibatch=n_per_minibatch, very_verbose=False
)
t_vect = time()
if verbose:
print(f' * Vectorized in {t_vect - t_0:6.2f}s')
# Make faiss subindex
subidx_path = check_unique(subidx_path)
cp.index_builder.generate_subindex(subidx_path, emb_batch, id_batch)
t_subidx = time()
if verbose:
print(f' * Subindexed in {t_subidx - t_vect:6.2f}s')
# Clear graph
del emb_batch, batched_sents, id_batch
cp.vectorizer.close_session()
t_reset = time()
if verbose:
print(f' * Cleared TF in {t_reset - t_subidx:6.2f}s')
# Restart TF session if necessary
if i < n_batches - 1:
cp.vectorizer.start_session()
if verbose:
print(f' * Started TF in {time() - t_reset:6.2f}s')
if verbose:
mp, sp = divmod(time() - t_start, 60)
print(f' Completed doc batch: {i+1:3d}/{n_batches} '
f' Total time passed: {int(mp):3d}m{sp:0.2f}s\n')
# Merge
# TODO: Title indexes
t_merge = time()
merged_index_path = shard_date + '_all.index'
merged_index_path = p.join(output_dir, merged_index_path)
merged_index_path = check_unique(merged_index_path)
merged_ivfdata_path = shard_date + '_all.ivfdata'
merged_ivfdata_path = p.join(output_dir, merged_ivfdata_path)
merged_ivfdata_path = check_unique(merged_ivfdata_path)
if verbose:
print(f'\n Merging {merged_index_path.split('/')[-1]} on-disk')
assert cp.index_builder.index_path_clear(merged_index_path)
assert cp.index_builder.index_path_clear(merged_ivfdata_path, '.ivfdata')
n_vect = cp.index_builder.merge_IVFs(index_path=merged_index_path,
ivfdata_path=merged_ivfdata_path)
if verbose:
mm, sm = divmod(time() - t_merge, 60)
print(f' Merged subindexes ({n_vect} vectors) in: {int(mm):3d}m{sm:0.2f}s')
# Record progress
cp.record_progress(raw_jl)
# Clear sub.index files after merge
if no_delete:
clear_dir(subidx_dir)
if verbose:
print('\n Cleared sub.index files')
if add_shard:
try:
url = url
payload = {'path': merged_index_path}
r = requests.put(url, params=payload)
print(r.text)
except Exception as e:
print(f'Shard was not added because an exception occurred: {e}')
if __name__ == '__main__':
if len(file_to_process):
jl = file_to_process[0]
main(raw_jl=jl)
else:
print('Nothing to process.')
| # <editor-fold desc="Basic Imports">
import os
import os.path as p
import requests
from time import time
from argparse import ArgumentParser
import sys
sys.path.append(p.join(p.dirname(__file__), '..'))
sys.path.append(p.join(p.dirname(__file__), '../..'))
# </editor-fold>
# <editor-fold desc="Parse Command Line Args">
prog_file_path = p.join(p.dirname(__file__), 'progress.txt')
relative_base_path = '../../base_indexes/USE_lite_base_IVF16K.index'
base_index_path = p.abspath(p.join(p.dirname(__file__), relative_base_path))
arp = ArgumentParser(description='Vectorize Sentences for Searchable Index.')
arp.add_argument('input_dir', help='Path to raw news dir.')
arp.add_argument('output_dir', help='Path to saved index dir.')
arp.add_argument('-p', '--progress_file', default=prog_file_path,
help='For keeping track of news that has been preprocessed. '
'Default: dig-text-similarity-search/progress.txt')
arp.add_argument('-b', '--base_index_path', default=base_index_path,
help='Path to pre-trained empty faiss index. '
'Default: dig-text-similarity-search/base_indexes/*.index')
arp.add_argument('-l', '--large', action='store_true',
help='Toggle large Universal Sentence Encoder (Transformer NN).')
arp.add_argument('-m', '--m_per_batch', type=int, default=512*128,
help='Sentences per batch.')
arp.add_argument('-n', '--n_per_minibatch', type=int, default=64,
help='Sentences per mini-batch.')
arp.add_argument('-v', '--verbose', action='store_true',
help='Shows progress of batch vectorization.')
arp.add_argument('-t', '--num_threads', default='2',
help='Set CPU thread budget for numpy.')
arp.add_argument('-d', '--no_delete', action='store_false', default=True,
help='Keeps faiss indexes for each batch after merging on-disk.')
arp.add_argument('-a', '--add_shard', action='store_true',
help='Adds shard to running similarity server.')
arp.add_argument('-u', '--url', default='http://localhost:5954/faiss',
help='Port handling similarity server.')
arp.add_argument('-T', '--TF_logging', action='store_false', default=True,
help='Increase verbosity of TensorFlow.')
opts = arp.parse_args()
# </editor-fold>
if opts.num_threads:
print(f'\nRestricting numpy to {opts.num_threads} thread(s)\n')
os.environ['OPENBLAS_NUM_THREADS'] = opts.num_threads
os.environ['NUMEXPR_NUM_THREADS'] = opts.num_threads
os.environ['MKL_NUM_THREADS'] = opts.num_threads
os.environ['OMP_NUM_THREADS'] = opts.num_threads
from dt_sim.data_reader.jl_io_funcs import check_all_docs, get_all_docs
from dt_sim.data_reader.misc_io_funcs import check_unique, clear_dir
from dt_sim.vectorizer.sentence_vectorizer import SentenceVectorizer
from dt_sim.indexer.index_builder import OnDiskIVFBuilder
from dt_sim.processor.corpus_processor import CorpusProcessor
# Suppress TF logging
if opts.TF_logging:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Init
sv = SentenceVectorizer(large=opts.large)
idx_bdr = OnDiskIVFBuilder(path_to_base_index=opts.base_index_path)
cp = CorpusProcessor(vectorizer=sv, index_builder=idx_bdr,
progress_file=opts.progress_file)
# Track progress
prepped_news = cp.track_preprocessing(cp.progress_file, verbose=opts.verbose)
raw_news = cp.get_news_paths(opts.input_dir, verbose=opts.verbose)
candidates = cp.candidate_files(prepped_news, raw_news, verbose=opts.verbose)
file_to_process = candidates[:1] # Preprocesses one news.jl per call
def main(raw_jl, output_dir: str = opts.output_dir,
m_per_batch: int = opts.m_per_batch, n_per_minibatch: int = opts.n_per_minibatch,
no_delete: bool = opts.no_delete, verbose: bool = opts.verbose,
add_shard: bool = opts.add_shard, url: str = opts.url):
subidx_dir, shard_date = cp.init_paths(raw_jl)
if verbose:
print(f'Will process: {raw_jl}\n')
# Check File Content
if verbose:
print(f'\nReading file: {raw_jl}')
jl_stats = check_all_docs(raw_jl, batch_size=m_per_batch)
(doc_count, line_count, junk, n_batches) = jl_stats
if verbose:
print(f'* Found {doc_count} good documents with {line_count} total sentences\n'
f'* Will skip {junk} junk documents\n'
f'* Processing {n_batches} batches\n')
# Preprocess
t_start = time()
doc_batch_gen = get_all_docs(raw_jl, batch_size=m_per_batch)
for i, (batched_sents, batched_ids) in enumerate(doc_batch_gen):
t_0 = time()
if verbose:
print(f' Starting doc batch: {i+1:3d}')
subidx = str(raw_jl.split('/')[-1]).replace('.jl', f'_{i:03d}_sub.index')
subidx_path = p.join(subidx_dir, subidx)
if p.exists(subidx_path):
print(f' File exists: {subidx_path} \n Skipping... ')
cp.index_builder.include_subidx_path(subidx_path)
else:
# Vectorize
emb_batch, id_batch = cp.batch_vectorize(
text_batch=batched_sents, id_batch=batched_ids,
n_minibatch=n_per_minibatch, very_verbose=False
)
t_vect = time()
if verbose:
print(f' * Vectorized in {t_vect - t_0:6.2f}s')
# Make faiss subindex
subidx_path = check_unique(subidx_path)
cp.index_builder.generate_subindex(subidx_path, emb_batch, id_batch)
t_subidx = time()
if verbose:
print(f' * Subindexed in {t_subidx - t_vect:6.2f}s')
# Clear graph
del emb_batch, batched_sents, id_batch
cp.vectorizer.close_session()
t_reset = time()
if verbose:
print(f' * Cleared TF in {t_reset - t_subidx:6.2f}s')
# Restart TF session if necessary
if i < n_batches - 1:
cp.vectorizer.start_session()
if verbose:
print(f' * Started TF in {time() - t_reset:6.2f}s')
if verbose:
mp, sp = divmod(time() - t_start, 60)
print(f' Completed doc batch: {i+1:3d}/{n_batches} '
f' Total time passed: {int(mp):3d}m{sp:0.2f}s\n')
# Merge
# TODO: Title indexes
t_merge = time()
merged_index_path = shard_date + '_all.index'
merged_index_path = p.join(output_dir, merged_index_path)
merged_index_path = check_unique(merged_index_path)
merged_ivfdata_path = shard_date + '_all.ivfdata'
merged_ivfdata_path = p.join(output_dir, merged_ivfdata_path)
merged_ivfdata_path = check_unique(merged_ivfdata_path)
if verbose:
print(f'\n Merging {merged_index_path.split("/")[-1]} on-disk')
assert cp.index_builder.index_path_clear(merged_index_path)
assert cp.index_builder.index_path_clear(merged_ivfdata_path, '.ivfdata')
n_vect = cp.index_builder.merge_IVFs(index_path=merged_index_path,
ivfdata_path=merged_ivfdata_path)
if verbose:
mm, sm = divmod(time() - t_merge, 60)
print(f' Merged subindexes ({n_vect} vectors) in: {int(mm):3d}m{sm:0.2f}s')
# Record progress
cp.record_progress(raw_jl)
# Clear sub.index files after merge
if no_delete:
clear_dir(subidx_dir)
if verbose:
print('\n Cleared sub.index files')
if add_shard:
try:
url = url
payload = {'path': merged_index_path}
r = requests.put(url, params=payload)
print(r.text)
except Exception as e:
print(f'Shard was not added because an exception occurred: {e}')
if __name__ == '__main__':
if len(file_to_process):
jl = file_to_process[0]
main(raw_jl=jl)
else:
print('Nothing to process.')
|
import json
import time
from dataclasses import dataclass
from logging import Logger
import requests
from insightconnect_plugin_runtime.exceptions import PluginException
from insightconnect_plugin_runtime.helper import clean
from requests.auth import HTTPBasicAuth
@dataclass
class AlertParams:
alert_type: [str]
severity: [str]
source_type: [str]
network_type: [str]
matched_asset_value: str
remediation_status: [str]
source_date_from: str
source_date_to: str
found_date_from: str
found_date_to: str
assigned: str
is_flagged: str
is_closed: str
has_ioc: bool
def to_dict(self) -> dict:
return clean(
{
"alertType": ",".join(self.alert_type) if self.alert_type else None,
"severity": ",".join(self.severity) if self.severity else None,
"sourceType": ",".join(self.source_type) if self.source_type else None,
"networkType": ",".join(self.network_type) if self.network_type else None,
"matchedAssetValue": ",".join(self.matched_asset_value) if self.matched_asset_value else None,
"remediationStatus": ",".join(self.remediation_status) if self.remediation_status else None,
"sourceDateFrom": int(self.source_date_from) if self.source_date_from else None,
"sourceDateTo": int(self.source_date_to) if self.source_date_to else None,
"foundDateFrom": int(self.found_date_from) if self.found_date_from else None,
"foundDateTo": int(self.found_date_to) if self.found_date_to else None,
"assigned": self.assigned == "Assigned" if self.assigned else None,
"isFlagged": self.is_flagged == "Flagged" if self.is_flagged else None,
"isClosed": self.is_closed == "Closed" if self.is_closed else None,
"hasIoc": self.has_ioc,
}
)
@dataclass
class Image:
type: str
data: str
@dataclass
class ManualAlertParams:
title: str
found_date: str
description: str
type: str
sub_type: str
severity: str
source_type: int
source_network_type: int
source_url: int
source_date: int
images: [Image]
def to_dict(self) -> dict:
images = []
if self.images:
for image in self.images:
if not image:
continue
try:
images.append({"Type": image["type"], "Data": image["data"]})
except KeyError as e:
raise PluginException(cause="Wrong input parameter.", assistance=f"Wrong image: {e}.")
return clean(
{
"FoundDate": self.found_date,
"Details": {
"Title": self.title,
"Description": self.description,
"Type": self.type,
"SubType": self.sub_type,
"Severity": self.severity,
"Source": {
"Type": self.source_type,
"NetworkType": self.source_network_type,
"URL": self.source_url,
"Date": self.source_date,
},
"Images": images,
},
}
)
class IntSightsAPI:
def __init__(self, account_id: str, api_key: str, logger: Logger):
self.account_id = account_id
self.api_key = api_key
self.url = "https://api.intsights.com"
self.logger = logger
def get_indicator_by_value(self, ioc_value: str) -> dict:
return self.make_json_request("GET", f"public/v2/iocs/ioc-by-value?iocValue={ioc_value}")
def enrich_indicator(self, ioc_value: str) -> dict:
response = {}
for _ in range(0, 9999):
response = self.make_json_request("GET", f"public/v1/iocs/enrich/{ioc_value}")
if response.get("Status", "InProgress") in ["Done", "Failed"]:
break
time.sleep(5)
return response
def rescan_indicator(self, indicator_file_hash: str) -> dict:
return self.make_json_request("POST", "public/v1/iocs/rescan", json_data={"IocValue": indicator_file_hash})
def get_scan_status(self, task_id: str) -> dict:
return self.make_json_request("GET", f"public/v1/iocs/rescan/status/{task_id}")
def get_complete_alert_by_id(self, alert_id: str) -> dict:
return self.make_json_request("GET", f"public/v1/data/alerts/get-complete-alert/{alert_id}")
def takedown_request(self, alert_id: str, target: str) -> dict:
return self.make_json_request(
"PATCH", f"public/v1/data/alerts/takedown-request/{alert_id}", json_data={"Target": target}
)
def get_alerts(self, alert_params: AlertParams) -> list:
return self.make_request("GET", "public/v1/data/alerts/alerts-list", params=alert_params.to_dict()).json()
def add_manual_alert(self, manual_alert_params: ManualAlertParams) -> str:
return self.make_request("PUT", "public/v1/data/alerts/add-alert", json_data=manual_alert_params.to_dict()).text
def test_credentials(self) -> bool:
return self.make_request("HEAD", "public/v1/test-credentials").status_code == 200
def make_json_request(self, method: str, path: str, json_data: dict = None, params: dict = None) -> dict:
try:
response = self.make_request(method=method, path=path, json_data=json_data, params=params)
if response.status_code == 204:
return {}
json_response = response.json()
if json_response.get("Status") == "Invalid":
raise PluginException(
cause="IntSights returned an error response: ", assistance=f"{json_response.get("FailedReason")}."
)
return json_response
except json.decoder.JSONDecodeError as e:
raise PluginException(preset=PluginException.Preset.INVALID_JSON, data=e)
def make_request(self, method: str, path: str, json_data: dict = None, params: dict = None) -> requests.Response:
try:
response = requests.request(
method=method,
url=f"{self.url}/{path}",
headers={"Content-Type": "application/json"},
verify=True,
params=params,
json=json_data,
auth=HTTPBasicAuth(self.account_id, self.api_key),
)
if response.status_code == 401:
raise PluginException(preset=PluginException.Preset.USERNAME_PASSWORD, data=response.text)
if response.status_code == 403:
raise PluginException(preset=PluginException.Preset.API_KEY, data=response.text)
if response.status_code == 404:
raise PluginException(preset=PluginException.Preset.NOT_FOUND, data=response.text)
if 400 <= response.status_code < 500:
raise PluginException(
preset=PluginException.Preset.UNKNOWN,
data=response.text,
)
if response.status_code >= 500:
raise PluginException(preset=PluginException.Preset.SERVER_ERROR, data=response.text)
if 200 <= response.status_code < 300:
return response
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=response.text)
except requests.exceptions.HTTPError as e:
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e)
| import json
import time
from dataclasses import dataclass
from logging import Logger
import requests
from insightconnect_plugin_runtime.exceptions import PluginException
from insightconnect_plugin_runtime.helper import clean
from requests.auth import HTTPBasicAuth
@dataclass
class AlertParams:
alert_type: [str]
severity: [str]
source_type: [str]
network_type: [str]
matched_asset_value: str
remediation_status: [str]
source_date_from: str
source_date_to: str
found_date_from: str
found_date_to: str
assigned: str
is_flagged: str
is_closed: str
has_ioc: bool
def to_dict(self) -> dict:
return clean(
{
"alertType": ",".join(self.alert_type) if self.alert_type else None,
"severity": ",".join(self.severity) if self.severity else None,
"sourceType": ",".join(self.source_type) if self.source_type else None,
"networkType": ",".join(self.network_type) if self.network_type else None,
"matchedAssetValue": ",".join(self.matched_asset_value) if self.matched_asset_value else None,
"remediationStatus": ",".join(self.remediation_status) if self.remediation_status else None,
"sourceDateFrom": int(self.source_date_from) if self.source_date_from else None,
"sourceDateTo": int(self.source_date_to) if self.source_date_to else None,
"foundDateFrom": int(self.found_date_from) if self.found_date_from else None,
"foundDateTo": int(self.found_date_to) if self.found_date_to else None,
"assigned": self.assigned == "Assigned" if self.assigned else None,
"isFlagged": self.is_flagged == "Flagged" if self.is_flagged else None,
"isClosed": self.is_closed == "Closed" if self.is_closed else None,
"hasIoc": self.has_ioc,
}
)
@dataclass
class Image:
type: str
data: str
@dataclass
class ManualAlertParams:
title: str
found_date: str
description: str
type: str
sub_type: str
severity: str
source_type: int
source_network_type: int
source_url: int
source_date: int
images: [Image]
def to_dict(self) -> dict:
images = []
if self.images:
for image in self.images:
if not image:
continue
try:
images.append({"Type": image["type"], "Data": image["data"]})
except KeyError as e:
raise PluginException(cause="Wrong input parameter.", assistance=f"Wrong image: {e}.")
return clean(
{
"FoundDate": self.found_date,
"Details": {
"Title": self.title,
"Description": self.description,
"Type": self.type,
"SubType": self.sub_type,
"Severity": self.severity,
"Source": {
"Type": self.source_type,
"NetworkType": self.source_network_type,
"URL": self.source_url,
"Date": self.source_date,
},
"Images": images,
},
}
)
class IntSightsAPI:
def __init__(self, account_id: str, api_key: str, logger: Logger):
self.account_id = account_id
self.api_key = api_key
self.url = "https://api.intsights.com"
self.logger = logger
def get_indicator_by_value(self, ioc_value: str) -> dict:
return self.make_json_request("GET", f"public/v2/iocs/ioc-by-value?iocValue={ioc_value}")
def enrich_indicator(self, ioc_value: str) -> dict:
response = {}
for _ in range(0, 9999):
response = self.make_json_request("GET", f"public/v1/iocs/enrich/{ioc_value}")
if response.get("Status", "InProgress") in ["Done", "Failed"]:
break
time.sleep(5)
return response
def rescan_indicator(self, indicator_file_hash: str) -> dict:
return self.make_json_request("POST", "public/v1/iocs/rescan", json_data={"IocValue": indicator_file_hash})
def get_scan_status(self, task_id: str) -> dict:
return self.make_json_request("GET", f"public/v1/iocs/rescan/status/{task_id}")
def get_complete_alert_by_id(self, alert_id: str) -> dict:
return self.make_json_request("GET", f"public/v1/data/alerts/get-complete-alert/{alert_id}")
def takedown_request(self, alert_id: str, target: str) -> dict:
return self.make_json_request(
"PATCH", f"public/v1/data/alerts/takedown-request/{alert_id}", json_data={"Target": target}
)
def get_alerts(self, alert_params: AlertParams) -> list:
return self.make_request("GET", "public/v1/data/alerts/alerts-list", params=alert_params.to_dict()).json()
def add_manual_alert(self, manual_alert_params: ManualAlertParams) -> str:
return self.make_request("PUT", "public/v1/data/alerts/add-alert", json_data=manual_alert_params.to_dict()).text
def test_credentials(self) -> bool:
return self.make_request("HEAD", "public/v1/test-credentials").status_code == 200
def make_json_request(self, method: str, path: str, json_data: dict = None, params: dict = None) -> dict:
try:
response = self.make_request(method=method, path=path, json_data=json_data, params=params)
if response.status_code == 204:
return {}
json_response = response.json()
if json_response.get("Status") == "Invalid":
raise PluginException(
cause="IntSights returned an error response: ", assistance=f"{json_response.get('FailedReason')}."
)
return json_response
except json.decoder.JSONDecodeError as e:
raise PluginException(preset=PluginException.Preset.INVALID_JSON, data=e)
def make_request(self, method: str, path: str, json_data: dict = None, params: dict = None) -> requests.Response:
try:
response = requests.request(
method=method,
url=f"{self.url}/{path}",
headers={"Content-Type": "application/json"},
verify=True,
params=params,
json=json_data,
auth=HTTPBasicAuth(self.account_id, self.api_key),
)
if response.status_code == 401:
raise PluginException(preset=PluginException.Preset.USERNAME_PASSWORD, data=response.text)
if response.status_code == 403:
raise PluginException(preset=PluginException.Preset.API_KEY, data=response.text)
if response.status_code == 404:
raise PluginException(preset=PluginException.Preset.NOT_FOUND, data=response.text)
if 400 <= response.status_code < 500:
raise PluginException(
preset=PluginException.Preset.UNKNOWN,
data=response.text,
)
if response.status_code >= 500:
raise PluginException(preset=PluginException.Preset.SERVER_ERROR, data=response.text)
if 200 <= response.status_code < 300:
return response
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=response.text)
except requests.exceptions.HTTPError as e:
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e)
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ""}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ""}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
| #
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
|
from math import log, exp
from numpy import inf, zeros, zeros_like as np_zeros_like, arange, asarray, empty
from pandas import concat
from anndata import AnnData
from torch import cat, no_grad, randn, zeros_like, zeros as torch_zeros, ones, argmax
from torch.nn import Module, Linear, Sequential, RNNCell, Softplus, Parameter, Softmax
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
from .Layers import Input_Block, FF_Block, LambdaLayer, Dual_Forward
class sciPENN_Model(Module):
def __init__(self, p_mod1, p_mod2, loss1, loss2, quantiles, categories):
super(sciPENN_Model, self).__init__()
h_size, drop_rate = 512, 0.25
self.RNNCell = RNNCell(h_size, h_size)
self.input_block = Input_Block(p_mod1, h_size, drop_rate, drop_rate)
self.skip_1 = FF_Block(h_size, drop_rate)
self.skip_2 = FF_Block(h_size, drop_rate)
self.skip_3 = FF_Block(h_size, drop_rate)
MSE_output = Linear(h_size, p_mod2)
if len(quantiles) > 0:
quantile_layer = []
quantile_layer.append(Linear(h_size, p_mod2 * len(quantiles)))
quantile_layer.append(LambdaLayer(lambda x: x.view(-1, p_mod2, len(quantiles))))
quantile_layer = Sequential(*quantile_layer)
self.mod2_out = Dual_Forward(MSE_output, quantile_layer)
else:
self.mod2_out = MSE_output
if categories is not None:
self.celltype_out = Sequential(Linear(h_size, len(categories)), Softmax(1))
self.forward = self.forward_transfer
self.categories_arr = empty((len(categories), ), dtype = 'object')
for cat in categories:
self.categories_arr[categories[cat]] = cat
else:
self.forward = self.forward_simple
self.categories_arr = None
self.quantiles = quantiles
self.loss1, self.loss2 = loss1, loss2
def forward_transfer(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': self.celltype_out(h.detach()), 'modality 2': self.mod2_out(h), 'embedding': h}
def forward_simple(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': None, 'modality 2': self.mod2_out(h), 'embedding': h}
def train_backprop(self, train_loader, val_loader,
n_epoch = 10000, ES_max = 30, decay_max = 10, decay_step = 0.1, lr = 10**(-3)):
optimizer = Adam(self.parameters(), lr = lr)
scheduler = StepLR(optimizer, step_size = 1, gamma = decay_step)
patience = 0
bestloss = inf
if self.categories_arr is None:
get_correct = lambda x: 0
else:
get_correct = lambda outputs: (argmax(outputs['celltypes'], axis = 1) == celltypes).sum()
for epoch in range(n_epoch):
with no_grad():
running_loss, rtype_acc = 0., 0.
self.eval()
for batch, inputs in enumerate(val_loader):
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
n_correct = get_correct(outputs)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
rtype_acc += n_correct
running_loss += mod2_loss.item() * len(mod2)
if self.categories_arr is None:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}")
else:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}, validation accuracy = {rtype_acc/len(val_loader):.3f}")
patience += 1
if bestloss/1.005 > running_loss:
bestloss, patience = running_loss, 0
if (patience + 1) % decay_max == 0:
scheduler.step()
print(f"Decaying loss to {optimizer.param_groups[0]["lr"]}")
if (patience + 1) > ES_max:
break
self.train()
for batch, inputs in enumerate(train_loader):
optimizer.zero_grad()
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
mod1_loss = self.loss1(outputs['celltypes'], celltypes)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
loss = mod1_loss + mod2_loss
loss.backward()
optimizer.step()
def impute(self, impute_loader, requested_quantiles, denoise_genes, proteins):
imputed_test = proteins.copy()
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = self.fill_predicted(imputed_test.X[start:end], mod2_impute, bools)
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
return imputed_test
def embed(self, impute_loader, test_loader, cells_train, cells_test):
if cells_test is not None:
embedding = AnnData(zeros(shape = (len(cells_train) + len(cells_test), 512)))
embedding.obs = concat((cells_train, cells_test), join = 'inner')
else:
embedding = AnnData(zeros(shape = (len(cells_train), 512)))
embedding.obs = cells_train
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
if cells_test is not None:
for mod1 in test_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
return embedding
def fill_predicted(self, array, predicted, bools):
bools = bools.cpu().numpy()
return (1. - bools) * predicted.cpu().numpy() + array
def predict(self, test_loader, requested_quantiles, denoise_genes, proteins, cells):
imputed_test = AnnData(zeros(shape = (len(cells), len(proteins.var))))
imputed_test.obs = cells
imputed_test.var.index = proteins.var.index
if self.categories_arr is not None:
celltypes = ['None'] * len(cells)
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1 in test_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if self.categories_arr is not None:
predicted_types = argmax(outputs['celltypes'], axis = 1).cpu().numpy()
celltypes[start:end] = self.categories_arr[predicted_types].tolist()
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = mod2_impute.cpu().numpy()
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
if self.categories_arr is not None:
imputed_test.obs['transfered cell labels'] = celltypes
return imputed_test | from math import log, exp
from numpy import inf, zeros, zeros_like as np_zeros_like, arange, asarray, empty
from pandas import concat
from anndata import AnnData
from torch import cat, no_grad, randn, zeros_like, zeros as torch_zeros, ones, argmax
from torch.nn import Module, Linear, Sequential, RNNCell, Softplus, Parameter, Softmax
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
from .Layers import Input_Block, FF_Block, LambdaLayer, Dual_Forward
class sciPENN_Model(Module):
def __init__(self, p_mod1, p_mod2, loss1, loss2, quantiles, categories):
super(sciPENN_Model, self).__init__()
h_size, drop_rate = 512, 0.25
self.RNNCell = RNNCell(h_size, h_size)
self.input_block = Input_Block(p_mod1, h_size, drop_rate, drop_rate)
self.skip_1 = FF_Block(h_size, drop_rate)
self.skip_2 = FF_Block(h_size, drop_rate)
self.skip_3 = FF_Block(h_size, drop_rate)
MSE_output = Linear(h_size, p_mod2)
if len(quantiles) > 0:
quantile_layer = []
quantile_layer.append(Linear(h_size, p_mod2 * len(quantiles)))
quantile_layer.append(LambdaLayer(lambda x: x.view(-1, p_mod2, len(quantiles))))
quantile_layer = Sequential(*quantile_layer)
self.mod2_out = Dual_Forward(MSE_output, quantile_layer)
else:
self.mod2_out = MSE_output
if categories is not None:
self.celltype_out = Sequential(Linear(h_size, len(categories)), Softmax(1))
self.forward = self.forward_transfer
self.categories_arr = empty((len(categories), ), dtype = 'object')
for cat in categories:
self.categories_arr[categories[cat]] = cat
else:
self.forward = self.forward_simple
self.categories_arr = None
self.quantiles = quantiles
self.loss1, self.loss2 = loss1, loss2
def forward_transfer(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': self.celltype_out(h.detach()), 'modality 2': self.mod2_out(h), 'embedding': h}
def forward_simple(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': None, 'modality 2': self.mod2_out(h), 'embedding': h}
def train_backprop(self, train_loader, val_loader,
n_epoch = 10000, ES_max = 30, decay_max = 10, decay_step = 0.1, lr = 10**(-3)):
optimizer = Adam(self.parameters(), lr = lr)
scheduler = StepLR(optimizer, step_size = 1, gamma = decay_step)
patience = 0
bestloss = inf
if self.categories_arr is None:
get_correct = lambda x: 0
else:
get_correct = lambda outputs: (argmax(outputs['celltypes'], axis = 1) == celltypes).sum()
for epoch in range(n_epoch):
with no_grad():
running_loss, rtype_acc = 0., 0.
self.eval()
for batch, inputs in enumerate(val_loader):
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
n_correct = get_correct(outputs)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
rtype_acc += n_correct
running_loss += mod2_loss.item() * len(mod2)
if self.categories_arr is None:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}")
else:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}, validation accuracy = {rtype_acc/len(val_loader):.3f}")
patience += 1
if bestloss/1.005 > running_loss:
bestloss, patience = running_loss, 0
if (patience + 1) % decay_max == 0:
scheduler.step()
print(f"Decaying loss to {optimizer.param_groups[0]['lr']}")
if (patience + 1) > ES_max:
break
self.train()
for batch, inputs in enumerate(train_loader):
optimizer.zero_grad()
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
mod1_loss = self.loss1(outputs['celltypes'], celltypes)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
loss = mod1_loss + mod2_loss
loss.backward()
optimizer.step()
def impute(self, impute_loader, requested_quantiles, denoise_genes, proteins):
imputed_test = proteins.copy()
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = self.fill_predicted(imputed_test.X[start:end], mod2_impute, bools)
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
return imputed_test
def embed(self, impute_loader, test_loader, cells_train, cells_test):
if cells_test is not None:
embedding = AnnData(zeros(shape = (len(cells_train) + len(cells_test), 512)))
embedding.obs = concat((cells_train, cells_test), join = 'inner')
else:
embedding = AnnData(zeros(shape = (len(cells_train), 512)))
embedding.obs = cells_train
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
if cells_test is not None:
for mod1 in test_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
return embedding
def fill_predicted(self, array, predicted, bools):
bools = bools.cpu().numpy()
return (1. - bools) * predicted.cpu().numpy() + array
def predict(self, test_loader, requested_quantiles, denoise_genes, proteins, cells):
imputed_test = AnnData(zeros(shape = (len(cells), len(proteins.var))))
imputed_test.obs = cells
imputed_test.var.index = proteins.var.index
if self.categories_arr is not None:
celltypes = ['None'] * len(cells)
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1 in test_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if self.categories_arr is not None:
predicted_types = argmax(outputs['celltypes'], axis = 1).cpu().numpy()
celltypes[start:end] = self.categories_arr[predicted_types].tolist()
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = mod2_impute.cpu().numpy()
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
if self.categories_arr is not None:
imputed_test.obs['transfered cell labels'] = celltypes
return imputed_test |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses Modin configs originated from environment variables."""
import os
import sys
from textwrap import dedent
import warnings
from packaging import version
import secrets
from .pubsub import Parameter, _TYPE_PARAMS, ExactStr, ValueSource
class EnvironmentVariable(Parameter, type=str, abstract=True):
"""Base class for environment variables-based configuration."""
varname: str = None
@classmethod
def _get_raw_from_config(cls) -> str:
"""
Read the value from environment variable.
Returns
-------
str
Config raw value.
Raises
------
KeyError
If value is absent.
"""
return os.environ[cls.varname]
@classmethod
def get_help(cls) -> str:
"""
Generate user-presentable help for the config.
Returns
-------
str
"""
help = f"{cls.varname}: {dedent(cls.__doc__ or "Unknown").strip()}\n\tProvide {_TYPE_PARAMS[cls.type].help}"
if cls.choices:
help += f" (valid examples are: {", ".join(str(c) for c in cls.choices)})"
return help
class IsDebug(EnvironmentVariable, type=bool):
"""Force Modin engine to be "Python" unless specified by $MODIN_ENGINE."""
varname = "MODIN_DEBUG"
class Engine(EnvironmentVariable, type=str):
"""Distribution engine to run queries by."""
varname = "MODIN_ENGINE"
choices = ("Ray", "Dask", "Python", "Native")
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
str
"""
if IsDebug.get():
return "Python"
try:
import ray
except ImportError:
pass
else:
if version.parse(ray.__version__) < version.parse("1.4.0"):
raise ImportError(
"Please `pip install modin[ray]` to install compatible Ray version."
)
return "Ray"
try:
import dask
import distributed
except ImportError:
pass
else:
if version.parse(dask.__version__) < version.parse(
"2.22.0"
) or version.parse(distributed.__version__) < version.parse("2.22.0"):
raise ImportError(
"Please `pip install modin[dask]` to install compatible Dask version."
)
return "Dask"
try:
import omniscidbe # noqa
except ImportError:
try:
import dbe # noqa
except ImportError:
pass
else:
return "Native"
else:
return "Native"
raise ImportError(
"Please refer to installation documentation page to install an engine"
)
class Backend(EnvironmentVariable, type=str):
"""Engine to run on a single node of distribution."""
varname = "MODIN_BACKEND"
default = "Pandas"
choices = ("Pandas", "OmniSci", "Pyarrow", "Cudf")
class IsExperimental(EnvironmentVariable, type=bool):
"""Whether to Turn on experimental features."""
varname = "MODIN_EXPERIMENTAL"
class IsRayCluster(EnvironmentVariable, type=bool):
"""Whether Modin is running on pre-initialized Ray cluster."""
varname = "MODIN_RAY_CLUSTER"
class RayRedisAddress(EnvironmentVariable, type=ExactStr):
"""Redis address to connect to when running in Ray cluster."""
varname = "MODIN_REDIS_ADDRESS"
class RayRedisPassword(EnvironmentVariable, type=ExactStr):
"""What password to use for connecting to Redis."""
varname = "MODIN_REDIS_PASSWORD"
default = secrets.token_hex(32)
class CpuCount(EnvironmentVariable, type=int):
"""How many CPU cores to use during initialization of the Modin engine."""
varname = "MODIN_CPUS"
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
import multiprocessing
return multiprocessing.cpu_count()
class GpuCount(EnvironmentVariable, type=int):
"""How may GPU devices to utilize across the whole distribution."""
varname = "MODIN_GPUS"
class Memory(EnvironmentVariable, type=int):
"""
How much memory (in bytes) give to an execution engine.
Notes
-----
* In Ray case: the amount of memory to start the Plasma object store with.
* In Dask case: the amount of memory that is given to each worker depending on CPUs used.
"""
varname = "MODIN_MEMORY"
class NPartitions(EnvironmentVariable, type=int):
"""How many partitions to use for a Modin DataFrame (along each axis)."""
varname = "MODIN_NPARTITIONS"
@classmethod
def _put(cls, value):
"""
Put specific value if NPartitions wasn't set by a user yet.
Parameters
----------
value : int
Config value to set.
Notes
-----
This method is used to set NPartitions from cluster resources internally
and should not be called by a user.
"""
if cls.get_value_source() == ValueSource.DEFAULT:
cls.put(value)
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
if Backend.get() == "Cudf":
return GpuCount.get()
else:
return CpuCount.get()
class SocksProxy(EnvironmentVariable, type=ExactStr):
"""SOCKS proxy address if it is needed for SSH to work."""
varname = "MODIN_SOCKS_PROXY"
class DoLogRpyc(EnvironmentVariable, type=bool):
"""Whether to gather RPyC logs (applicable for remote context)."""
varname = "MODIN_LOG_RPYC"
class DoTraceRpyc(EnvironmentVariable, type=bool):
"""Whether to trace RPyC calls (applicable for remote context)."""
varname = "MODIN_TRACE_RPYC"
class OmnisciFragmentSize(EnvironmentVariable, type=int):
"""How big a fragment in OmniSci should be when creating a table (in rows)."""
varname = "MODIN_OMNISCI_FRAGMENT_SIZE"
class DoUseCalcite(EnvironmentVariable, type=bool):
"""Whether to use Calcite for OmniSci queries execution."""
varname = "MODIN_USE_CALCITE"
default = True
class TestDatasetSize(EnvironmentVariable, type=str):
"""Dataset size for running some tests."""
varname = "MODIN_TEST_DATASET_SIZE"
choices = ("Small", "Normal", "Big")
class TestRayClient(EnvironmentVariable, type=bool):
"""Set to true to start and connect Ray client before a testing session starts."""
varname = "MODIN_TEST_RAY_CLIENT"
default = False
class TrackFileLeaks(EnvironmentVariable, type=bool):
"""Whether to track for open file handles leakage during testing."""
varname = "MODIN_TEST_TRACK_FILE_LEAKS"
# Turn off tracking on Windows by default because
# psutil's open_files() can be extremely slow on Windows (up to adding a few hours).
# see https://github.com/giampaolo/psutil/pull/597
default = sys.platform != "win32"
class AsvImplementation(EnvironmentVariable, type=ExactStr):
"""Allows to select a library that we will use for testing performance."""
varname = "MODIN_ASV_USE_IMPL"
choices = ("modin", "pandas")
default = "modin"
class AsvDataSizeConfig(EnvironmentVariable, type=ExactStr):
"""Allows to override default size of data (shapes)."""
varname = "MODIN_ASV_DATASIZE_CONFIG"
default = None
class ProgressBar(EnvironmentVariable, type=bool):
"""Whether or not to show the progress bar."""
varname = "MODIN_PROGRESS_BAR"
default = False
@classmethod
def enable(cls):
"""Enable ``ProgressBar`` feature."""
cls.put(True)
@classmethod
def disable(cls):
"""Disable ``ProgressBar`` feature."""
cls.put(False)
@classmethod
def put(cls, value):
"""
Set ``ProgressBar`` value only if synchronous benchmarking is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and BenchmarkMode.get():
raise ValueError("ProgressBar isn't compatible with BenchmarkMode")
super().put(value)
class BenchmarkMode(EnvironmentVariable, type=bool):
"""Whether or not to perform computations synchronously."""
varname = "MODIN_BENCHMARK_MODE"
default = False
@classmethod
def put(cls, value):
"""
Set ``BenchmarkMode`` value only if progress bar feature is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and ProgressBar.get():
raise ValueError("BenchmarkMode isn't compatible with ProgressBar")
super().put(value)
class PersistentPickle(EnvironmentVariable, type=bool):
"""Wheather serialization should be persistent."""
varname = "MODIN_PERSISTENT_PICKLE"
# When set to off, it allows faster serialization which is only
# valid in current run (i.e. useless for saving to disk).
# When set to on, Modin objects could be saved to disk and loaded
# but serialization/deserialization could take more time.
default = False
class OmnisciLaunchParameters(EnvironmentVariable, type=dict):
"""
Additional command line options for the OmniSci engine.
Please visit OmniSci documentation for the description of available parameters:
https://docs.omnisci.com/installation-and-configuration/config-parameters#configuration-parameters-for-omniscidb
"""
varname = "MODIN_OMNISCI_LAUNCH_PARAMETERS"
default = {
"enable_union": 1,
"enable_columnar_output": 1,
"enable_lazy_fetch": 0,
"null_div_by_zero": 1,
"enable_watchdog": 0,
}
@classmethod
def get(self):
"""
Get the resulted command-line options.
Decode and merge specified command-line options with the default one.
Returns
-------
dict
Decoded and verified config value.
"""
custom_parameters = super().get()
result = self.default.copy()
result.update(
{key.replace("-", "_"): value for key, value in custom_parameters.items()}
)
return result
def _check_vars():
"""
Check validity of environment variables.
Look out for any environment variables that start with "MODIN_" prefix
that are unknown - they might be a typo, so warn a user.
"""
valid_names = {
obj.varname
for obj in globals().values()
if isinstance(obj, type)
and issubclass(obj, EnvironmentVariable)
and not obj.is_abstract
}
found_names = {name for name in os.environ if name.startswith("MODIN_")}
unknown = found_names - valid_names
if unknown:
warnings.warn(
f"Found unknown environment variable{"s" if len(unknown) > 1 else ""},"
f" please check {"their" if len(unknown) > 1 else "its"} spelling: "
+ ", ".join(sorted(unknown))
)
_check_vars()
| # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses Modin configs originated from environment variables."""
import os
import sys
from textwrap import dedent
import warnings
from packaging import version
import secrets
from .pubsub import Parameter, _TYPE_PARAMS, ExactStr, ValueSource
class EnvironmentVariable(Parameter, type=str, abstract=True):
"""Base class for environment variables-based configuration."""
varname: str = None
@classmethod
def _get_raw_from_config(cls) -> str:
"""
Read the value from environment variable.
Returns
-------
str
Config raw value.
Raises
------
KeyError
If value is absent.
"""
return os.environ[cls.varname]
@classmethod
def get_help(cls) -> str:
"""
Generate user-presentable help for the config.
Returns
-------
str
"""
help = f"{cls.varname}: {dedent(cls.__doc__ or 'Unknown').strip()}\n\tProvide {_TYPE_PARAMS[cls.type].help}"
if cls.choices:
help += f" (valid examples are: {', '.join(str(c) for c in cls.choices)})"
return help
class IsDebug(EnvironmentVariable, type=bool):
"""Force Modin engine to be "Python" unless specified by $MODIN_ENGINE."""
varname = "MODIN_DEBUG"
class Engine(EnvironmentVariable, type=str):
"""Distribution engine to run queries by."""
varname = "MODIN_ENGINE"
choices = ("Ray", "Dask", "Python", "Native")
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
str
"""
if IsDebug.get():
return "Python"
try:
import ray
except ImportError:
pass
else:
if version.parse(ray.__version__) < version.parse("1.4.0"):
raise ImportError(
"Please `pip install modin[ray]` to install compatible Ray version."
)
return "Ray"
try:
import dask
import distributed
except ImportError:
pass
else:
if version.parse(dask.__version__) < version.parse(
"2.22.0"
) or version.parse(distributed.__version__) < version.parse("2.22.0"):
raise ImportError(
"Please `pip install modin[dask]` to install compatible Dask version."
)
return "Dask"
try:
import omniscidbe # noqa
except ImportError:
try:
import dbe # noqa
except ImportError:
pass
else:
return "Native"
else:
return "Native"
raise ImportError(
"Please refer to installation documentation page to install an engine"
)
class Backend(EnvironmentVariable, type=str):
"""Engine to run on a single node of distribution."""
varname = "MODIN_BACKEND"
default = "Pandas"
choices = ("Pandas", "OmniSci", "Pyarrow", "Cudf")
class IsExperimental(EnvironmentVariable, type=bool):
"""Whether to Turn on experimental features."""
varname = "MODIN_EXPERIMENTAL"
class IsRayCluster(EnvironmentVariable, type=bool):
"""Whether Modin is running on pre-initialized Ray cluster."""
varname = "MODIN_RAY_CLUSTER"
class RayRedisAddress(EnvironmentVariable, type=ExactStr):
"""Redis address to connect to when running in Ray cluster."""
varname = "MODIN_REDIS_ADDRESS"
class RayRedisPassword(EnvironmentVariable, type=ExactStr):
"""What password to use for connecting to Redis."""
varname = "MODIN_REDIS_PASSWORD"
default = secrets.token_hex(32)
class CpuCount(EnvironmentVariable, type=int):
"""How many CPU cores to use during initialization of the Modin engine."""
varname = "MODIN_CPUS"
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
import multiprocessing
return multiprocessing.cpu_count()
class GpuCount(EnvironmentVariable, type=int):
"""How may GPU devices to utilize across the whole distribution."""
varname = "MODIN_GPUS"
class Memory(EnvironmentVariable, type=int):
"""
How much memory (in bytes) give to an execution engine.
Notes
-----
* In Ray case: the amount of memory to start the Plasma object store with.
* In Dask case: the amount of memory that is given to each worker depending on CPUs used.
"""
varname = "MODIN_MEMORY"
class NPartitions(EnvironmentVariable, type=int):
"""How many partitions to use for a Modin DataFrame (along each axis)."""
varname = "MODIN_NPARTITIONS"
@classmethod
def _put(cls, value):
"""
Put specific value if NPartitions wasn't set by a user yet.
Parameters
----------
value : int
Config value to set.
Notes
-----
This method is used to set NPartitions from cluster resources internally
and should not be called by a user.
"""
if cls.get_value_source() == ValueSource.DEFAULT:
cls.put(value)
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
if Backend.get() == "Cudf":
return GpuCount.get()
else:
return CpuCount.get()
class SocksProxy(EnvironmentVariable, type=ExactStr):
"""SOCKS proxy address if it is needed for SSH to work."""
varname = "MODIN_SOCKS_PROXY"
class DoLogRpyc(EnvironmentVariable, type=bool):
"""Whether to gather RPyC logs (applicable for remote context)."""
varname = "MODIN_LOG_RPYC"
class DoTraceRpyc(EnvironmentVariable, type=bool):
"""Whether to trace RPyC calls (applicable for remote context)."""
varname = "MODIN_TRACE_RPYC"
class OmnisciFragmentSize(EnvironmentVariable, type=int):
"""How big a fragment in OmniSci should be when creating a table (in rows)."""
varname = "MODIN_OMNISCI_FRAGMENT_SIZE"
class DoUseCalcite(EnvironmentVariable, type=bool):
"""Whether to use Calcite for OmniSci queries execution."""
varname = "MODIN_USE_CALCITE"
default = True
class TestDatasetSize(EnvironmentVariable, type=str):
"""Dataset size for running some tests."""
varname = "MODIN_TEST_DATASET_SIZE"
choices = ("Small", "Normal", "Big")
class TestRayClient(EnvironmentVariable, type=bool):
"""Set to true to start and connect Ray client before a testing session starts."""
varname = "MODIN_TEST_RAY_CLIENT"
default = False
class TrackFileLeaks(EnvironmentVariable, type=bool):
"""Whether to track for open file handles leakage during testing."""
varname = "MODIN_TEST_TRACK_FILE_LEAKS"
# Turn off tracking on Windows by default because
# psutil's open_files() can be extremely slow on Windows (up to adding a few hours).
# see https://github.com/giampaolo/psutil/pull/597
default = sys.platform != "win32"
class AsvImplementation(EnvironmentVariable, type=ExactStr):
"""Allows to select a library that we will use for testing performance."""
varname = "MODIN_ASV_USE_IMPL"
choices = ("modin", "pandas")
default = "modin"
class AsvDataSizeConfig(EnvironmentVariable, type=ExactStr):
"""Allows to override default size of data (shapes)."""
varname = "MODIN_ASV_DATASIZE_CONFIG"
default = None
class ProgressBar(EnvironmentVariable, type=bool):
"""Whether or not to show the progress bar."""
varname = "MODIN_PROGRESS_BAR"
default = False
@classmethod
def enable(cls):
"""Enable ``ProgressBar`` feature."""
cls.put(True)
@classmethod
def disable(cls):
"""Disable ``ProgressBar`` feature."""
cls.put(False)
@classmethod
def put(cls, value):
"""
Set ``ProgressBar`` value only if synchronous benchmarking is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and BenchmarkMode.get():
raise ValueError("ProgressBar isn't compatible with BenchmarkMode")
super().put(value)
class BenchmarkMode(EnvironmentVariable, type=bool):
"""Whether or not to perform computations synchronously."""
varname = "MODIN_BENCHMARK_MODE"
default = False
@classmethod
def put(cls, value):
"""
Set ``BenchmarkMode`` value only if progress bar feature is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and ProgressBar.get():
raise ValueError("BenchmarkMode isn't compatible with ProgressBar")
super().put(value)
class PersistentPickle(EnvironmentVariable, type=bool):
"""Wheather serialization should be persistent."""
varname = "MODIN_PERSISTENT_PICKLE"
# When set to off, it allows faster serialization which is only
# valid in current run (i.e. useless for saving to disk).
# When set to on, Modin objects could be saved to disk and loaded
# but serialization/deserialization could take more time.
default = False
class OmnisciLaunchParameters(EnvironmentVariable, type=dict):
"""
Additional command line options for the OmniSci engine.
Please visit OmniSci documentation for the description of available parameters:
https://docs.omnisci.com/installation-and-configuration/config-parameters#configuration-parameters-for-omniscidb
"""
varname = "MODIN_OMNISCI_LAUNCH_PARAMETERS"
default = {
"enable_union": 1,
"enable_columnar_output": 1,
"enable_lazy_fetch": 0,
"null_div_by_zero": 1,
"enable_watchdog": 0,
}
@classmethod
def get(self):
"""
Get the resulted command-line options.
Decode and merge specified command-line options with the default one.
Returns
-------
dict
Decoded and verified config value.
"""
custom_parameters = super().get()
result = self.default.copy()
result.update(
{key.replace("-", "_"): value for key, value in custom_parameters.items()}
)
return result
def _check_vars():
"""
Check validity of environment variables.
Look out for any environment variables that start with "MODIN_" prefix
that are unknown - they might be a typo, so warn a user.
"""
valid_names = {
obj.varname
for obj in globals().values()
if isinstance(obj, type)
and issubclass(obj, EnvironmentVariable)
and not obj.is_abstract
}
found_names = {name for name in os.environ if name.startswith("MODIN_")}
unknown = found_names - valid_names
if unknown:
warnings.warn(
f"Found unknown environment variable{'s' if len(unknown) > 1 else ''},"
f" please check {'their' if len(unknown) > 1 else 'its'} spelling: "
+ ", ".join(sorted(unknown))
)
_check_vars()
|
import pandas as pd
import tweepy
from textblob import TextBlob
from wordcloud import WordCloud
import plotly.graph_objs as go
import os
import re
import pystan
import numpy as np
import streamlit as st
import matplotlib.pyplot as plt
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from GoogleNews import GoogleNews
from ta.volatility import BollingerBands
from ta.trend import MACD
from ta.momentum import RSIIndicator
import datetime as datetime
import base64
import pandas as pd
import plotly.express as px
import datetime
import requests
from bs4 import BeautifulSoup
from datetime import date
from plotly import graph_objs
st.set_page_config(
layout="wide",
initial_sidebar_state="auto",
page_title= "Finance-Forcasting-Dashboard",
page_icon= "Images/growth.png",
)
col1, col2, col3 = st.beta_columns([1,2,1])
col1.write("")
col2.image("Images/LL.png", width = 500)
col3.write("")
st.set_option('deprecation.showPyplotGlobalUse', False)
main_bg = "Images/BACK.png"
main_bg_ext = "Images/BACK.png"
st.markdown(
f"""
<style>
.reportview-container {{
background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()})
}}
</style>
""",
unsafe_allow_html=True
)
###############################Funtions############################
# load data from yahoo finance
def load_data(ticker):
start = "2020-01-01"
today = date.today().strftime("%Y-%m-%d")
data = yf.download(ticker, start, today)
data.reset_index(inplace=True)
return data
# Plot raw data
def plot_raw_data():
fig = graph_objs.Figure()
fig.add_trace(graph_objs.Scatter(x=data['Date'], y=data['Open'], name="stock_open"))
fig.add_trace(graph_objs.Scatter(x=data['Date'], y=data['Close'], name="stock_close"))
fig.layout.update(title_text='Time Series data with Rangeslider', xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
def get_forecast(data):
model = Prophet()
model.fit(data)
future = model.make_future_dataframe(periods=7)
forecast = model.predict(future)
return model, forecast
@st.cache
def read_data():
url = "https://raw.githubusercontent.com/emrecanaltinsoy/forex_data/main/forex_usd_data.csv"
data = pd.read_csv(url)
cols = data.columns
return data, cols[1:]
@st.cache
def get_range(data, date_range):
start_index = data.index[data["date(y-m-d)"] == str(date_range[0])].tolist()[0]
end_index = data.index[data["date(y-m-d)"] == str(date_range[1])].tolist()[0]
data = data.iloc[start_index : end_index + 1]
cols = data.columns
dates = data["date(y-m-d)"]
return data, dates
@st.cache
def scrape_currency():
today = datetime.date.today()
base_url = "https://www.x-rates.com/historical/?from=USD&amount=1&date"
year = today.year
month = today.month if today.month > 9 else f"0{today.month}"
day = today.day if today.day > 9 else f"0{today.day}"
URL = f"{base_url}={year}-{month}-{day}"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
table = soup.find_all("tr")[12:]
currencies = [table[i].text.split("\n")[1:3][0] for i in range(len(table))]
currencies.insert(0, "date(y-m-d)")
currencies.insert(1, "American Dollar")
rates = [table[i].text.split("\n")[1:3][1] for i in range(len(table))]
rates.insert(0, f"{year}-{month}-{day}")
rates.insert(1, "1")
curr_data = {currencies[i]: rates[i] for i in range(len(rates))}
curr_data = pd.DataFrame(curr_data, index=[0])
cols = curr_data.columns
return curr_data, cols[1:]
@st.cache
def train_model(data, currency, period):
df_train = data[["date(y-m-d)", currency]]
df_train = df_train.iloc[-365*2 :]
df_train = df_train.rename(columns={"date(y-m-d)": "ds", currency: "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
return forecast, m
df_all, columns = read_data()
################################################################################
st.sidebar.image("Images/Menu.png", width = 330)
menu = ["Home","STOCKS Live Forcasting", "Crypto-Live Forcasting","View Historical Currency Charts", "Check Live Currency Exchange rates", "Forecast Currency Live Prices"]
choice = st.sidebar.selectbox("Menu", menu)
if choice == "Home":
st.write("")
st.write(""" <p style=" font-size: 15px; font-weight:normal; font-family:verdana"> Finance Dashboard is a special web service that allows you to view Cryptocurrencies,Stocks,and Live Currency Values by many useful methods (technical indicators, graphical patterns, sentimental analysis, and more). Trading and crypto investing requires constant analysis and monitoring. Traders need to track all their trades in order to improve results and find errors. If you don't use additional instruments, then trading will be unsystematic, and the results will be uncertain. Such a service will be useful and even extremely necessary for those who trade and invest in cryptocurrencies and Stocks. Competent selection of cryptocurrencies is at least half of investment success. Finance Dashboard has a simple interface and is great for quick analysis of the Stock market. </p>
""", unsafe_allow_html=True)
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write(""" <p style=" color:#E75480; font-size: 30px; font-weight:bold"> How does it work? </p>
""", unsafe_allow_html=True)
st.write("")
st.image("Images/How.png", width = 1300)
st.sidebar.write(" ")
st.sidebar.write(" ")
st.sidebar.image("Images/info.png", width = 300)
elif choice == "STOCKS Live Forcasting":
st.title('Stocks Weekly Forecast')
st.subheader('Enter the stock ticker:')
ticker = st.text_input('example: GOOG')
ticket = ticker.upper()
if len(ticker)>0:
data_load_state = st.text('Loading data...')
data = load_data(ticker)
if data.empty:
data_load_state.text(f'No ticker named {ticker}')
ticker = ''
else:
data_load_state.text('Loading data... done!')
st.subheader(f'Company: {yf.Ticker(ticker).info['longName']}')
st.write(data.head())
plot_raw_data()
# prepare data for forecasting
df_train = data[['Date','Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
# train and forecast
model, forecast = get_forecast(df_train)
st.subheader('Forecast')
# plot forecast
st.write(f'Forecast plot for the next week')
fig = plot_plotly(model, forecast)
st.plotly_chart(fig)
elif choice == "View Historical Currency Charts":
st.write("This app can be used to view historical **currency** charts!")
date_range = st.date_input(
"Choose date range",
value=(
datetime.date(2011, 1, 1),
datetime.date(2011, 1, 1) + datetime.timedelta(df_all.shape[0] - 1),
),
min_value=datetime.date(2011, 1, 1),
max_value=datetime.date(2011, 1, 1) + datetime.timedelta(df_all.shape[0] - 1),
)
df, dates = get_range(df_all, date_range)
selected_curr = st.multiselect("Select currencies", columns)
ok = st.button("View")
if ok:
if selected_curr:
# st.write(df[selected_curr])
for curr in selected_curr:
fig = px.line(
x=dates,
y=df[curr],
)
fig.update_layout(
xaxis_title="Date",
yaxis_title=curr,
)
st.write(fig)
elif choice == "Check Live Currency Exchange rates":
st.write("This app can be used to check current **currency** data!")
daily_df, columns = scrape_currency()
base_curr = st.selectbox("Select the base currency", columns)
selected_curr = st.multiselect("Select currencies", columns)
if selected_curr:
base = daily_df[base_curr].astype(float)
selected = daily_df[selected_curr].astype(float)
converted = selected / float(base)
st.write(converted)
elif choice == "Forecast Currency Live Prices":
currency = st.selectbox("Select the currency for prediction", columns)
n_weeks = st.slider("Weeks of prediction", 4, 20, 8, 1)
ok = st.button("Predict")
if ok:
train_state = st.text("Training the model...")
pred, model = train_model(df_all, currency, period=n_weeks * 7)
train_state.text("Model training completed!!")
st.subheader("Forecast data")
fig1 = plot_plotly(model, pred)
st.plotly_chart(fig1)
elif choice == "Crypto-Live Forcasting":
st.sidebar.header("Please select cryptocurrency")
option = st.sidebar.selectbox("Ticker Symbol",("BTC-USD", "ETH-USD", "XRP-USD", "DOGE-USD", "ADA-USD", "BNB-USD", "LTC-USD",))
today = datetime.date.today()
before = today - datetime.timedelta(days=1400)
start_date = st.sidebar.date_input('Start date', before)
end_date = st.sidebar.date_input('End date', today)
if start_date < end_date:
st.sidebar.success("Start date: `%s`\n\nEnd date: `%s` " % (start_date, end_date))
else:
st.sidebar.error("Error: End date must fall after start date.")
@st.cache(allow_output_mutation = True)
def get_data(option, start_date, end_date):
df = yf.download(option,start= start_date,end = end_date, progress=False)
return df
# Getting API_KEYS
api_key = os.environ.get("Key")
api_secret = os.environ.get("Secret")
# Function for getting tweets
# Create authentication
@st.cache(allow_output_mutation = True)
def get_tweets(key, secret, search_term):
authentication = tweepy.OAuthHandler(api_key, api_secret)
api = tweepy.API(authentication)
term = search_term+"-filter:retweets"
# Create a cursor object
tweets = tweepy.Cursor(api.search, q = term, lang = "en",
since = today, tweet_mode = "extended").items(100)
# Store the tweets
tweets_text = [tweet.full_text for tweet in tweets]
df = pd.DataFrame(tweets_text, columns = ["Tweets"])
return df
# Clean text
@st.cache(allow_output_mutation = True)
def Clean(twt):
twt = re.sub("#cryptocurrency", "cryptocurrency", twt)
twt = re.sub("#Cryptocurrency", "Cryptocurrency", twt)
twt = re.sub("#[A-Za-z0-9]+", "", twt)
twt = re.sub("RT[\s]+", "", twt)
twt = re.sub("\\n", "", twt)
twt = re.sub("https?\://\S+", '', twt)
twt = re.sub("<br />", "", twt)
twt = re.sub("\d","", twt)
twt = re.sub("it\'s", "it is", twt)
twt = re.sub("can\'t", "cannot", twt)
twt = re.sub("<(?:a\b[^>]*>|/a>)", "", twt)
return twt
# Subjectivity and Polarity
@st.cache(allow_output_mutation = True)
def subjectivity(text):
return TextBlob(text).sentiment.subjectivity
@st.cache(allow_output_mutation = True)
def polarity(text):
return TextBlob(text).sentiment.polarity
# Create a function to get sentiment text
@st.cache(allow_output_mutation = True)
def sentiment(score):
if score < 0:
return "Negative"
elif score == 0:
return "Neutral"
else:
return "Positive"
if option == "BTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
#Plot
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Bitcoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ETH-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ETH-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Etherium")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "DOGE-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Dogecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
st.write(" ")
elif option == "XRP-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("XRP")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ADA-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ADA-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("cryptocurrency")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "BNB-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BNB-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("BNB")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "LTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> LTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Litecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
# Sentiment Analysis
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> How generally users feel about cryptocurrency? </p>
""", unsafe_allow_html=True)
st.write(" ")
df = get_tweets(api_key, api_secret, "#cryptocurrency")
df["Tweets"] = df["Tweets"].apply(Clean)
df["Subjectivity"] = df["Tweets"].apply(subjectivity)
df["Polarity"] = df["Tweets"].apply(polarity)
#WordCloud
words = " ".join([twts for twts in df["Tweets"]])
cloud = WordCloud(random_state = 21, max_font_size = 100).generate(words)
plt.imshow(cloud, interpolation = "bilinear")
plt.axis("off")
st.pyplot()
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Sentiment Bar Plot </p>
""", unsafe_allow_html=True)
st.write(" ")
# Get Sentiment tweets
df["Sentiment"] = df["Polarity"].apply(sentiment)
df["Sentiment"].value_counts().plot(kind = "bar", figsize = (10,5))
plt.title("Sentiment Analysis Bar Plot")
plt.xlabel("Sentiment")
plt.ylabel("Number of Tweets")
st.pyplot()
|
import pandas as pd
import tweepy
from textblob import TextBlob
from wordcloud import WordCloud
import plotly.graph_objs as go
import os
import re
import pystan
import numpy as np
import streamlit as st
import matplotlib.pyplot as plt
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from GoogleNews import GoogleNews
from ta.volatility import BollingerBands
from ta.trend import MACD
from ta.momentum import RSIIndicator
import datetime as datetime
import base64
import pandas as pd
import plotly.express as px
import datetime
import requests
from bs4 import BeautifulSoup
from datetime import date
from plotly import graph_objs
st.set_page_config(
layout="wide",
initial_sidebar_state="auto",
page_title= "Finance-Forcasting-Dashboard",
page_icon= "Images/growth.png",
)
col1, col2, col3 = st.beta_columns([1,2,1])
col1.write("")
col2.image("Images/LL.png", width = 500)
col3.write("")
st.set_option('deprecation.showPyplotGlobalUse', False)
main_bg = "Images/BACK.png"
main_bg_ext = "Images/BACK.png"
st.markdown(
f"""
<style>
.reportview-container {{
background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()})
}}
</style>
""",
unsafe_allow_html=True
)
###############################Funtions############################
# load data from yahoo finance
def load_data(ticker):
start = "2020-01-01"
today = date.today().strftime("%Y-%m-%d")
data = yf.download(ticker, start, today)
data.reset_index(inplace=True)
return data
# Plot raw data
def plot_raw_data():
fig = graph_objs.Figure()
fig.add_trace(graph_objs.Scatter(x=data['Date'], y=data['Open'], name="stock_open"))
fig.add_trace(graph_objs.Scatter(x=data['Date'], y=data['Close'], name="stock_close"))
fig.layout.update(title_text='Time Series data with Rangeslider', xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
def get_forecast(data):
model = Prophet()
model.fit(data)
future = model.make_future_dataframe(periods=7)
forecast = model.predict(future)
return model, forecast
@st.cache
def read_data():
url = "https://raw.githubusercontent.com/emrecanaltinsoy/forex_data/main/forex_usd_data.csv"
data = pd.read_csv(url)
cols = data.columns
return data, cols[1:]
@st.cache
def get_range(data, date_range):
start_index = data.index[data["date(y-m-d)"] == str(date_range[0])].tolist()[0]
end_index = data.index[data["date(y-m-d)"] == str(date_range[1])].tolist()[0]
data = data.iloc[start_index : end_index + 1]
cols = data.columns
dates = data["date(y-m-d)"]
return data, dates
@st.cache
def scrape_currency():
today = datetime.date.today()
base_url = "https://www.x-rates.com/historical/?from=USD&amount=1&date"
year = today.year
month = today.month if today.month > 9 else f"0{today.month}"
day = today.day if today.day > 9 else f"0{today.day}"
URL = f"{base_url}={year}-{month}-{day}"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
table = soup.find_all("tr")[12:]
currencies = [table[i].text.split("\n")[1:3][0] for i in range(len(table))]
currencies.insert(0, "date(y-m-d)")
currencies.insert(1, "American Dollar")
rates = [table[i].text.split("\n")[1:3][1] for i in range(len(table))]
rates.insert(0, f"{year}-{month}-{day}")
rates.insert(1, "1")
curr_data = {currencies[i]: rates[i] for i in range(len(rates))}
curr_data = pd.DataFrame(curr_data, index=[0])
cols = curr_data.columns
return curr_data, cols[1:]
@st.cache
def train_model(data, currency, period):
df_train = data[["date(y-m-d)", currency]]
df_train = df_train.iloc[-365*2 :]
df_train = df_train.rename(columns={"date(y-m-d)": "ds", currency: "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
return forecast, m
df_all, columns = read_data()
################################################################################
st.sidebar.image("Images/Menu.png", width = 330)
menu = ["Home","STOCKS Live Forcasting", "Crypto-Live Forcasting","View Historical Currency Charts", "Check Live Currency Exchange rates", "Forecast Currency Live Prices"]
choice = st.sidebar.selectbox("Menu", menu)
if choice == "Home":
st.write("")
st.write(""" <p style=" font-size: 15px; font-weight:normal; font-family:verdana"> Finance Dashboard is a special web service that allows you to view Cryptocurrencies,Stocks,and Live Currency Values by many useful methods (technical indicators, graphical patterns, sentimental analysis, and more). Trading and crypto investing requires constant analysis and monitoring. Traders need to track all their trades in order to improve results and find errors. If you don't use additional instruments, then trading will be unsystematic, and the results will be uncertain. Such a service will be useful and even extremely necessary for those who trade and invest in cryptocurrencies and Stocks. Competent selection of cryptocurrencies is at least half of investment success. Finance Dashboard has a simple interface and is great for quick analysis of the Stock market. </p>
""", unsafe_allow_html=True)
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write(""" <p style=" color:#E75480; font-size: 30px; font-weight:bold"> How does it work? </p>
""", unsafe_allow_html=True)
st.write("")
st.image("Images/How.png", width = 1300)
st.sidebar.write(" ")
st.sidebar.write(" ")
st.sidebar.image("Images/info.png", width = 300)
elif choice == "STOCKS Live Forcasting":
st.title('Stocks Weekly Forecast')
st.subheader('Enter the stock ticker:')
ticker = st.text_input('example: GOOG')
ticket = ticker.upper()
if len(ticker)>0:
data_load_state = st.text('Loading data...')
data = load_data(ticker)
if data.empty:
data_load_state.text(f'No ticker named {ticker}')
ticker = ''
else:
data_load_state.text('Loading data... done!')
st.subheader(f'Company: {yf.Ticker(ticker).info["longName"]}')
st.write(data.head())
plot_raw_data()
# prepare data for forecasting
df_train = data[['Date','Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
# train and forecast
model, forecast = get_forecast(df_train)
st.subheader('Forecast')
# plot forecast
st.write(f'Forecast plot for the next week')
fig = plot_plotly(model, forecast)
st.plotly_chart(fig)
elif choice == "View Historical Currency Charts":
st.write("This app can be used to view historical **currency** charts!")
date_range = st.date_input(
"Choose date range",
value=(
datetime.date(2011, 1, 1),
datetime.date(2011, 1, 1) + datetime.timedelta(df_all.shape[0] - 1),
),
min_value=datetime.date(2011, 1, 1),
max_value=datetime.date(2011, 1, 1) + datetime.timedelta(df_all.shape[0] - 1),
)
df, dates = get_range(df_all, date_range)
selected_curr = st.multiselect("Select currencies", columns)
ok = st.button("View")
if ok:
if selected_curr:
# st.write(df[selected_curr])
for curr in selected_curr:
fig = px.line(
x=dates,
y=df[curr],
)
fig.update_layout(
xaxis_title="Date",
yaxis_title=curr,
)
st.write(fig)
elif choice == "Check Live Currency Exchange rates":
st.write("This app can be used to check current **currency** data!")
daily_df, columns = scrape_currency()
base_curr = st.selectbox("Select the base currency", columns)
selected_curr = st.multiselect("Select currencies", columns)
if selected_curr:
base = daily_df[base_curr].astype(float)
selected = daily_df[selected_curr].astype(float)
converted = selected / float(base)
st.write(converted)
elif choice == "Forecast Currency Live Prices":
currency = st.selectbox("Select the currency for prediction", columns)
n_weeks = st.slider("Weeks of prediction", 4, 20, 8, 1)
ok = st.button("Predict")
if ok:
train_state = st.text("Training the model...")
pred, model = train_model(df_all, currency, period=n_weeks * 7)
train_state.text("Model training completed!!")
st.subheader("Forecast data")
fig1 = plot_plotly(model, pred)
st.plotly_chart(fig1)
elif choice == "Crypto-Live Forcasting":
st.sidebar.header("Please select cryptocurrency")
option = st.sidebar.selectbox("Ticker Symbol",("BTC-USD", "ETH-USD", "XRP-USD", "DOGE-USD", "ADA-USD", "BNB-USD", "LTC-USD",))
today = datetime.date.today()
before = today - datetime.timedelta(days=1400)
start_date = st.sidebar.date_input('Start date', before)
end_date = st.sidebar.date_input('End date', today)
if start_date < end_date:
st.sidebar.success("Start date: `%s`\n\nEnd date: `%s` " % (start_date, end_date))
else:
st.sidebar.error("Error: End date must fall after start date.")
@st.cache(allow_output_mutation = True)
def get_data(option, start_date, end_date):
df = yf.download(option,start= start_date,end = end_date, progress=False)
return df
# Getting API_KEYS
api_key = os.environ.get("Key")
api_secret = os.environ.get("Secret")
# Function for getting tweets
# Create authentication
@st.cache(allow_output_mutation = True)
def get_tweets(key, secret, search_term):
authentication = tweepy.OAuthHandler(api_key, api_secret)
api = tweepy.API(authentication)
term = search_term+"-filter:retweets"
# Create a cursor object
tweets = tweepy.Cursor(api.search, q = term, lang = "en",
since = today, tweet_mode = "extended").items(100)
# Store the tweets
tweets_text = [tweet.full_text for tweet in tweets]
df = pd.DataFrame(tweets_text, columns = ["Tweets"])
return df
# Clean text
@st.cache(allow_output_mutation = True)
def Clean(twt):
twt = re.sub("#cryptocurrency", "cryptocurrency", twt)
twt = re.sub("#Cryptocurrency", "Cryptocurrency", twt)
twt = re.sub("#[A-Za-z0-9]+", "", twt)
twt = re.sub("RT[\s]+", "", twt)
twt = re.sub("\\n", "", twt)
twt = re.sub("https?\://\S+", '', twt)
twt = re.sub("<br />", "", twt)
twt = re.sub("\d","", twt)
twt = re.sub("it\'s", "it is", twt)
twt = re.sub("can\'t", "cannot", twt)
twt = re.sub("<(?:a\b[^>]*>|/a>)", "", twt)
return twt
# Subjectivity and Polarity
@st.cache(allow_output_mutation = True)
def subjectivity(text):
return TextBlob(text).sentiment.subjectivity
@st.cache(allow_output_mutation = True)
def polarity(text):
return TextBlob(text).sentiment.polarity
# Create a function to get sentiment text
@st.cache(allow_output_mutation = True)
def sentiment(score):
if score < 0:
return "Negative"
elif score == 0:
return "Neutral"
else:
return "Positive"
if option == "BTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
#Plot
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Bitcoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ETH-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ETH-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Etherium")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "DOGE-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Dogecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
st.write(" ")
elif option == "XRP-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> DOGE-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("XRP")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "ADA-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> ADA-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("cryptocurrency")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "BNB-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> BNB-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("BNB")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
elif option == "LTC-USD":
df = get_data(option, start_date, end_date)
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Raw Data </p>
""", unsafe_allow_html=True)
st.write(" ")
st.write(df)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Close Price </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(df["Close"])
st.write(" ")
# MACD
st.write(" ")
macd = MACD(df["Close"]).macd()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Moving Average Convergence Divergence </p>
""", unsafe_allow_html=True)
st.write(" ")
st.area_chart(macd)
# Bollinger Bands
bb_bands = BollingerBands(df["Close"])
bb = df
bb["bb_h"] = bb_bands.bollinger_hband()
bb["bb_l"] = bb_bands.bollinger_lband()
bb = bb[["Close","bb_h","bb_l"]]
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Bollinger Bands </p>
""", unsafe_allow_html=True)
st.line_chart(bb)
st.write(" ")
# Resistence Strength Indicator
rsi = RSIIndicator(df["Close"]).rsi()
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Resistence Strength Indicator </p>
""", unsafe_allow_html=True)
st.write(" ")
st.line_chart(rsi)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> LTC-USD Forecast using Facebook Prophet </p>
""", unsafe_allow_html=True)
st.write(" ")
data = df.reset_index()
period = st.slider("Days of prediction:", 1, 365)
# Predict forecast with Prophet.
df_train = data[["Date","Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.write(f'Forecast plot for {period} days')
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Latest News </p>
""", unsafe_allow_html=True)
st.write(" ")
news = GoogleNews()
news = GoogleNews("en", "d")
news.search("Litecoin")
news.get_page(1)
result = news.result()
st.write("1. " + result[1]["title"])
st.info("1. " + result[1]["link"])
st.write("2. " + result[2]["title"])
st.info("2. " + result[2]["link"])
st.write("3. " + result[3]["title"])
st.info("3. " + result[3]["link"])
st.write("4. " + result[4]["title"])
st.info("4. " + result[4]["link"])
st.write("5. " + result[5]["title"])
st.info("5. " + result[5]["link"])
# Sentiment Analysis
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> How generally users feel about cryptocurrency? </p>
""", unsafe_allow_html=True)
st.write(" ")
df = get_tweets(api_key, api_secret, "#cryptocurrency")
df["Tweets"] = df["Tweets"].apply(Clean)
df["Subjectivity"] = df["Tweets"].apply(subjectivity)
df["Polarity"] = df["Tweets"].apply(polarity)
#WordCloud
words = " ".join([twts for twts in df["Tweets"]])
cloud = WordCloud(random_state = 21, max_font_size = 100).generate(words)
plt.imshow(cloud, interpolation = "bilinear")
plt.axis("off")
st.pyplot()
st.write(" ")
st.write(""" <p style=" color:#FFCC00; font-size: 30px; font-weight:bold"> Sentiment Bar Plot </p>
""", unsafe_allow_html=True)
st.write(" ")
# Get Sentiment tweets
df["Sentiment"] = df["Polarity"].apply(sentiment)
df["Sentiment"].value_counts().plot(kind = "bar", figsize = (10,5))
plt.title("Sentiment Analysis Bar Plot")
plt.xlabel("Sentiment")
plt.ylabel("Number of Tweets")
st.pyplot()
|
"""HelloWorld Integration for Cortex XSOAR (aka Demisto)
This integration is a good example on you can build a Cortex XSOAR Integration
using Python 3. Please follow the documentation links below and make sure that
your integration follows the Code Conventions and passes the Linting phase.
Developer Documentation: https://xsoar.pan.dev/docs/welcome
Code Conventions: https://xsoar.pan.dev/docs/integrations/code-conventions
Linting: https://xsoar.pan.dev/docs/integrations/linting
When building a Cortex XSOAR integration that is reusable, a lot of effort
must be placed in the design. We recommend to fill a Design Document template,
that allows you to capture Use Cases, Requirements and Inputs/Outputs.
Example Design document for the this Integration (HelloWorld):
https://docs.google.com/document/d/1wETtBEKg37PHNU8tYeB56M1LE314ux086z3HFeF_cX0
HelloWorld API
--------------
The HelloWorld API is a simple API that shows a realistic use case for an XSOAR
integration. It's actually a real API that is available to the following URL:
https://soar.mastersofhack.com - if you need an API Key to test it out please
reach out to your Cortex XSOAR contacts.
This API has a few basic functions:
- Alerts: the endpoint returns mocked alerts and allows you to search based on
a number of parameters, such as state (ACTIVE or CLOSED), type, timestamp. It
can also return a single alert by ID. This is used to create new Incidents in
XSOAR by using the ``fetch-incidents`` command, which is by default invoked
every minute.
There is also an endpoint that allows to retrieve additional details about a
specific alert by ID, and one to change the alert status to "CLOSED" once
it has been resolved.
- Reputation (ip and domain): these endpoints return, for an IP and
domain respectively, a WHOIS lookup of the entity as well as a reputation score
(from 0 to 100) that is used to determine whether the entity is malicious. This
endpoint is called by XSOAR reputation commands ``ip`` and ``domain`` that
are run automatically every time an indicator is extracted in XSOAR. As a best
practice of design, it is important to map and document the mapping between
a score in the original API format (0 to 100 in this case) to a score in XSOAR
format (0 to 3). This score is called ``DBotScore``, and is returned in the
context to allow automated handling of indicators based on their reputation.
More information: https://xsoar.pan.dev/docs/integrations/dbot
- Scan: to demonstrate how to run commands that are not returning instant data,
the API provides a scan endpoint that simulates scanning a host and generating
a report after the scan is completed. The API has endpoints to start a scan,
which returns a job ID, poll for the scan status and, if the scan is completed,
retrieved the job results.
This function is used in conjunction of the HelloWorld Scan playbook that uses
the GenericPolling mechanism to implement the job polling loop. The results
can be returned in JSON or attachment file format.
Info on GenericPolling: https://xsoar.pan.dev/docs/playbooks/generic-polling
Please check the HelloWorld Design Document referenced above for details about
the raw API responsens as well as the design details for this integration.
This integration also has a ``say-hello`` command for backward compatibility,
that doesn't connect to an API and just returns a ``Hello {name}`` string,
where name is the input value provided.
Integration File Structure
--------------------------
An integration usually consists of the following parts:
- Imports
- Constants
- Client Class
- Helper Functions
- Command Functions
- Main Function
- Entry Point
Imports
-------
Here you can import Python module you need for your integration. If you need
a module that is not part of the default XSOAR Docker images, you can add
a custom one. More details: https://xsoar.pan.dev/docs/integrations/docker
There are also internal imports that are used by XSOAR:
- demistomock (imported as demisto): allows your code to work offline for
testing. The actual ``demisto`` module is provided at runtime when the
code runs in XSOAR.
- CommonServerPython.py: contains a set of helper functions, base classes
and other useful components that will make your integration code easier
to maintain.
- CommonServerUserPython.py: includes a set of user defined commands that
are specific to an XSOAR installation. Do not use it for integrations that
are meant to be shared externally.
These imports are automatically loaded at runtime within the XSOAR script
runner, so you shouldn't modify them
Constants
---------
Usually some constants that do not require user parameters or inputs, such
as the default API entry point for your service, or the maximum numbers of
incidents to fetch every time.
Client Class
------------
We recommend to use a Client class to wrap all the code that needs to interact
with your API. Moreover, we recommend, when possible, to inherit from the
BaseClient class, defined in CommonServerPython.py. This class already handles
a lot of the work, such as system proxy settings, SSL certificate verification
and exception handling for HTTP errors.
Note that the Client class should NOT contain any Cortex XSOAR specific code,
i.e. it shouldn't use anything in the ``demisto`` class (functions such as
``demisto.args()`` or ``demisto.results()`` or even ``return_results`` and
``return_error``.
You will use the Command Functions to handle XSOAR inputs and outputs.
When calling an API, you should use the ``_http.request()`` method and you
can return the raw data to the calling function (usually a Command function).
You should usually have one function for each API endpoint.
Look at the code and the commends of this specific class to better understand
the implementation details.
Helper Functions
----------------
Helper functions are usually used as utility functions that are used by several
command functions throughout your code. For example they map arguments to types
or convert severity formats from integration-specific to XSOAR.
Many helper functions are already defined in ``CommonServerPython.py`` and are
often very handy.
Command Functions
-----------------
Command functions perform the mapping between XSOAR inputs and outputs to the
Client class functions inputs and outputs. As a best practice, they shouldn't
contain calls to ``demisto.args()``, ``demisto.results()``, ``return_error``
and ``demisto.command()`` as those should be handled through the ``main()``
function.
However, in command functions, use ``demisto`` or ``CommonServerPython.py``
artifacts, such as ``demisto.debug()`` or the ``CommandResults`` class and the
``Common.*`` classes.
Usually you will have one command function for every specific XSOAR command
you want to implement in your integration, plus ``test-module``,
``fetch-incidents`` and ``fetch-indicators``(if the latter two are supported
by your integration). Each command function should invoke one specific function
of the Client class.
Command functions, when invoked through an XSOAR command usually return data
using the ``CommandResults`` class, that is then passed to ``return_results()``
in the ``main()`` function.
``return_results()`` is defined in ``CommonServerPython.py`` to return
the data to XSOAR. ``return_results()`` actually wraps ``demisto.results()``.
You should never use ``demisto.results()`` directly.
Sometimes you will need to return values in a format that is not compatible
with ``CommandResults`` (for example files): in that case you must return a
data structure that is then pass passed to ``return.results()``. (i.e.
check the ``scan_results_command`` function in this file that has the option
to return a file to Cortex XSOAR).
In any case you should never call ``return_results()`` directly from the
command functions.
When you use create the CommandResults object in command functions, you
usually pass some types of data:
- Human Readable: usually in Markdown format. This is what is presented to the
analyst in the War Room. You can use ``tableToMarkdown()``, defined in
``CommonServerPython.py``, to convert lists and dicts in Markdown and pass it
to ``return_results()`` using the ``readable_output`` argument, or the
``return_results()`` function will call ``tableToMarkdown()`` automatically for
you.
- Context Output: this is the machine readable data, JSON based, that XSOAR can
parse and manage in the Playbooks or Incident's War Room. The Context Output
fields should be defined in your integration YML file and is important during
the design phase. Make sure you define the format and follow best practices.
You can use ``demisto-sdk json-to-outputs`` to autogenerate the YML file
outputs section. Context output is passed as the ``outputs`` argument in ``demisto_results()``,
and the prefix (i.e. ``HelloWorld.Alert``) is passed via the ``outputs_prefix``
argument.
More information on Context Outputs, Standards, DBotScore and demisto-sdk:
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/context-standards
https://xsoar.pan.dev/docs/integrations/dbot
https://github.com/demisto/demisto-sdk/blob/master/demisto_sdk/commands/json_to_outputs/README.md
Also, when you write data in the Context, you want to make sure that if you
return updated information for an entity, to update it and not append to
the list of entities (i.e. in HelloWorld you want to update the status of an
existing ``HelloWorld.Alert`` in the context when you retrieve it, rather than
adding a new one if you already retrieved it). To update data in the Context,
you can define which is the key attribute to use, such as (using the example):
``outputs_key_field='alert_id'``. This means that you are using the ``alert_id``
key to determine whether adding a new entry in the context or updating an
existing one that has the same ID. You can look at the examples to understand
how it works.
More information here:
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/dt
- Raw Output: this is usually the raw result from your API and is used for
troubleshooting purposes or for invoking your command from Automation Scripts.
If not specified, ``return_results()`` will use the same data as ``outputs``.
Main Function
-------------
The ``main()`` function takes care of reading the integration parameters via
the ``demisto.params()`` function, initializes the Client class and checks the
different options provided to ``demisto.commands()``, to invoke the correct
command function passing to it ``demisto.args()`` and returning the data to
``return_results()``. If implemented, ``main()`` also invokes the function
``fetch_incidents()``with the right parameters and passes the outputs to the
``demisto.incidents()`` function. ``main()`` also catches exceptions and
returns an error message via ``return_error()``.
Entry Point
-----------
This is the integration code entry point. It checks whether the ``__name__``
variable is ``__main__`` , ``__builtin__`` (for Python 2) or ``builtins`` (for
Python 3) and then calls the ``main()`` function. Just keep this convention.
"""
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
import urllib3
import dateparser
import traceback
from typing import Any, Dict, Tuple, List, Optional, Union, cast
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_INCIDENTS_TO_FETCH = 50
HELLOWORLD_SEVERITIES = ['Low', 'Medium', 'High', 'Critical']
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this HelloWorld implementation, no special attributes defined
"""
def get_ip_reputation(self, ip: str) -> Dict[str, Any]:
"""Gets the IP reputation using the '/ip' API endpoint
:type ip: ``str``
:param ip: IP address to get the reputation for
:return: dict containing the IP reputation as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/ip',
params={
'ip': ip
}
)
def get_domain_reputation(self, domain: str) -> Dict[str, Any]:
"""Gets the Domain reputation using the '/domain' API endpoint
:type domain: ``str``
:param domain: domain name to get the reputation for
:return: dict containing the domain reputation as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/domain',
params={
'domain': domain
}
)
def search_alerts(self, alert_status: Optional[str], severity: Optional[str],
alert_type: Optional[str], max_results: Optional[int],
start_time: Optional[int]) -> List[Dict[str, Any]]:
"""Searches for HelloWorld alerts using the '/get_alerts' API endpoint
All the parameters are passed directly to the API as HTTP POST parameters in the request
:type alert_status: ``Optional[str]``
:param alert_status: status of the alert to search for. Options are: 'ACTIVE' or 'CLOSED'
:type severity: ``Optional[str]``
:param severity:
severity of the alert to search for. Comma-separated values.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type: type of alerts to search for. There is no list of predefined types
:type max_results: ``Optional[int]``
:param max_results: maximum number of results to return
:type start_time: ``Optional[int]``
:param start_time: start timestamp (epoch in seconds) for the alert search
:return: list containing the found HelloWorld alerts as dicts
:rtype: ``List[Dict[str, Any]]``
"""
request_params: Dict[str, Any] = {}
if alert_status:
request_params['alert_status'] = alert_status
if alert_type:
request_params['alert_type'] = alert_type
if severity:
request_params['severity'] = severity
if max_results:
request_params['max_results'] = max_results
if start_time:
request_params['start_time'] = start_time
return self._http_request(
method='GET',
url_suffix='/get_alerts',
params=request_params
)
def get_alert(self, alert_id: str) -> Dict[str, Any]:
"""Gets a specific HelloWorld alert by id
:type alert_id: ``str``
:param alert_id: id of the alert to return
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/get_alert_details',
params={
'alert_id': alert_id
}
)
def update_alert_status(self, alert_id: str, alert_status: str) -> Dict[str, Any]:
"""Changes the status of a specific HelloWorld alert
:type alert_id: ``str``
:param alert_id: id of the alert to return
:type alert_status: ``str``
:param alert_status: new alert status. Options are: 'ACTIVE' or 'CLOSED'
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/change_alert_status',
params={
'alert_id': alert_id,
'alert_status': alert_status
}
)
def scan_start(self, hostname: str) -> Dict[str, Any]:
"""Starts a HelloWorld scan on a specific hostname
:type hostname: ``str``
:param hostname: hostname of the machine to scan
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/start_scan',
params={
'hostname': hostname
}
)
def scan_status(self, scan_id: str) -> Dict[str, Any]:
"""Gets the status of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve status for
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/check_scan',
params={
'scan_id': scan_id
}
)
def scan_results(self, scan_id: str) -> Dict[str, Any]:
"""Gets the results of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve results for
:return: dict containing the scan results as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/get_scan_results',
params={
'scan_id': scan_id
}
)
def say_hello(self, name: str) -> str:
"""Returns 'Hello {name}'
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: string containing 'Hello {name}'
:rtype: ``str``
"""
return f'Hello {name}'
''' HELPER FUNCTIONS '''
def parse_domain_date(domain_date: Union[List[str], str], date_format: str = '%Y-%m-%dT%H:%M:%S.000Z') -> Optional[str]:
"""Converts whois date format to an ISO8601 string
Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format
in a datetime. If a list is returned with multiple elements, takes only
the first one.
:type domain_date: ``Union[List[str],str]``
:param date_format:
a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'
:return: Parsed time in ISO8601 format
:rtype: ``Optional[str]``
"""
if isinstance(domain_date, str):
# if str parse the value
domain_date_dt = dateparser.parse(domain_date)
if domain_date_dt:
return domain_date_dt.strftime(date_format)
elif isinstance(domain_date, list) and len(domain_date) > 0 and isinstance(domain_date[0], str):
# if list with at least one element, parse the first element
domain_date_dt = dateparser.parse(domain_date[0])
if domain_date_dt:
return domain_date_dt.strftime(date_format)
# in any other case return nothing
return None
def convert_to_demisto_severity(severity: str) -> int:
"""Maps HelloWorld severity to Cortex XSOAR severity
Converts the HelloWorld alert severity level ('Low', 'Medium',
'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``str``
:param severity: severity as returned from the HelloWorld API (str)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
"""
# In this case the mapping is straightforward, but more complex mappings
# might be required in your integration, so a dedicated function is
# recommended. This mapping should also be documented.
return {
'Low': IncidentSeverity.LOW,
'Medium': IncidentSeverity.MEDIUM,
'High': IncidentSeverity.HIGH,
'Critical': IncidentSeverity.CRITICAL
}[severity]
''' COMMAND FUNCTIONS '''
def test_module(client: Client, first_fetch_time: int) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: HelloWorld client to use
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
# INTEGRATION DEVELOPER TIP
# Client class should raise the exceptions, but if the test fails
# the exception text is printed to the Cortex XSOAR UI.
# If you have some specific errors you want to capture (i.e. auth failure)
# you should catch the exception here and return a string with a more
# readable output (for example return 'Authentication Error, API Key
# invalid').
# Cortex XSOAR will print everything you return different than 'ok' as
# an error
try:
client.search_alerts(max_results=1, start_time=first_fetch_time, alert_status=None, alert_type=None,
severity=None)
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def say_hello_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-say-hello command: Returns Hello {somename}
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``str``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['name']`` is used as input name
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains the hello world message
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# In this case 'name' is an argument set in the HelloWorld.yml file as mandatory,
# so the null check here as XSOAR will always check it before your code is called.
# Although it's not mandatory to check, you are welcome to do so.
name = args.get('name', None)
if not name:
raise ValueError('name not specified')
# Call the Client function and get the raw response
result = client.say_hello(name)
# Create the human readable output.
# It will be in markdown format - https://www.markdownguide.org/basic-syntax/
# More complex output can be formatted using ``tableToMarkDown()`` defined
# in ``CommonServerPython.py``
readable_output = f'## {result}'
# More information about Context:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# We return a ``CommandResults`` object, and we want to pass a custom
# markdown here, so the argument ``readable_output`` is explicit. If not
# passed, ``CommandResults``` will do a ``tableToMarkdown()`` do the data
# to generate the readable output.
return CommandResults(
readable_output=readable_output,
outputs_prefix='hello',
outputs_key_field='',
outputs=result
)
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int],
first_fetch_time: Optional[int], alert_status: Optional[str],
min_severity: str, alert_type: Optional[str]
) -> Tuple[Dict[str, int], List[dict]]:
"""This function retrieves new alerts every interval (default is 1 minute).
This function has to implement the logic of making sure that incidents are
fetched only onces and no incidents are missed. By default it's invoked by
XSOAR every minute. It will use last_run to save the timestamp of the last
incident it processed. If last_run is not provided, it should use the
integration parameter first_fetch_time to determine when to start fetching
the first time.
:type client: ``Client``
:param Client: HelloWorld client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, int]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[int]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type alert_status: ``Optional[str]``
:param alert_status:
status of the alert to search for. Options are: 'ACTIVE'
or 'CLOSED'
:type min_severity: ``str``
:param min_severity:
minimum severity of the alert to search for.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type:
type of alerts to search for. There is no list of predefined types
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
# Get the last fetch time, if exists
# last_run is a dict with a single key, called last_fetch
last_fetch = last_run.get('last_fetch', None)
# Handle first fetch time
if last_fetch is None:
# if missing, use what provided via first_fetch_time
last_fetch = first_fetch_time
else:
# otherwise use the stored last fetch
last_fetch = int(last_fetch)
# for type checking, making sure that latest_created_time is int
latest_created_time = cast(int, last_fetch)
# Initialize an empty list of incidents to return
# Each incident is a dict with a string as a key
incidents: List[Dict[str, Any]] = []
# Get the CSV list of severities from min_severity
severity = ','.join(HELLOWORLD_SEVERITIES[HELLOWORLD_SEVERITIES.index(min_severity):])
alerts = client.search_alerts(
alert_type=alert_type,
alert_status=alert_status,
max_results=max_results,
start_time=last_fetch,
severity=severity
)
for alert in alerts:
# If no created_time set is as epoch (0). We use time in ms so we must
# convert it from the HelloWorld API response
incident_created_time = int(alert.get('created', '0'))
incident_created_time_ms = incident_created_time * 1000
# to prevent duplicates, we are only adding incidents with creation_time > last fetched incident
if last_fetch:
if incident_created_time <= last_fetch:
continue
# If no name is present it will throw an exception
incident_name = alert['name']
# INTEGRATION DEVELOPER TIP
# The incident dict is initialized with a few mandatory fields:
# name: the incident name
# occurred: the time on when the incident occurred, in ISO8601 format
# we use timestamp_to_datestring() from CommonServerPython.py to
# handle the conversion.
# rawJSON: everything else is packed in a string via json.dumps()
# and is included in rawJSON. It will be used later for classification
# and mapping inside XSOAR.
# severity: it's not mandatory, but is recommended. It must be
# converted to XSOAR specific severity (int 1 to 4)
# Note that there are other fields commented out here. You can do some
# mapping of fields (either out of the box fields, like "details" and
# "type") or custom fields (like "helloworldid") directly here in the
# code, or they can be handled in the classification and mapping phase.
# In either case customers can override them. We leave the values
# commented out here, but you can use them if you want.
incident = {
'name': incident_name,
# 'details': alert['name'],
'occurred': timestamp_to_datestring(incident_created_time_ms),
'rawJSON': json.dumps(alert),
# 'type': 'Hello World Alert', # Map to a specific XSOAR incident Type
'severity': convert_to_demisto_severity(alert.get('severity', 'Low')),
# 'CustomFields': { # Map specific XSOAR Custom Fields
# 'helloworldid': alert.get('alert_id'),
# 'helloworldstatus': alert.get('alert_status'),
# 'helloworldtype': alert.get('alert_type')
# }
}
incidents.append(incident)
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': latest_created_time}
return next_run, incidents
def ip_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""ip command: Returns IP reputation for a list of IPs
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['ip']`` is a list of IPs or a single IP
``args['threshold']`` threshold to determine whether an IP is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an IP is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains IPs
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
ips = argToList(args.get('ip'))
if len(ips) == 0:
raise ValueError('IP(s) not specified')
# It's a good practice to document the threshold you use to determine
# if a score is malicious in your integration documentation.
# Thresholds should also be possible to override, as in this case,
# where threshold is an actual argument of the command.
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return
# each CommandResult will contain context standard for IP
command_results: List[CommandResults] = []
for ip in ips:
ip_data = client.get_ip_reputation(ip)
ip_data['ip'] = ip
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(ip_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (IP) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the HelloWorld Design Document
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='HelloWorld',
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=ip,
asn=ip_data.get('asn'),
dbot_score=dbot_score
)
# INTEGRATION DEVELOPER TIP
# In the integration specific Context output (HelloWorld.IP) in this
# example you want to provide a lot of information as it can be used
# programmatically from within Cortex XSOAR in playbooks and commands.
# On the other hand, this API is way to verbose, so we want to select
# only certain keys to be returned in order not to clog the context
# with useless information. What to actually return in the context and
# to define as a command output is subject to design considerations.
# INTEGRATION DEVELOPER TIP
# To generate the Context Outputs on the YML use ``demisto-sdk``'s
# ``json-to-outputs`` option.
# Define which fields we want to exclude from the context output as
# they are too verbose.
ip_context_excluded_fields = ['objects', 'nir']
ip_data = {k: ip_data[k] for k in ip_data if k not in ip_context_excluded_fields}
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('IP', ip_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.IP``, using ``ip`` as the key field.
# ``indicator`` is used to provide the context standard (IP)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.IP',
outputs_key_field='ip',
outputs=ip_data,
indicator=ip_standard_context
))
return command_results
def domain_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return,
# each CommandResult will contain context standard for Domain
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
# INTEGRATION DEVELOPER TIP
# We want to convert the dates to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(domain_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# INTEGRATION DEVELOPER TIP
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (Domain) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the sample Design Document
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HelloWorld',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the Domain Standard Context structure using Common.Domain and
# add dbot_score to it.
domain_standard_context = Common.Domain(
domain=domain,
creation_date=domain_data.get('creation_date', None),
expiration_date=domain_data.get('expiration_date', None),
updated_date=domain_data.get('updated_date', None),
organization=domain_data.get('org', None),
name_servers=domain_data.get('name_servers', None),
registrant_name=domain_data.get('name', None),
registrant_country=domain_data.get('country', None),
registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('Domain', domain_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.Domain``, using ``domain`` as the key
# field.
# ``indicator`` is used to provide the context standard (Domain)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Domain',
outputs_key_field='domain',
outputs=domain_data,
indicator=domain_standard_context
))
return command_results
def search_alerts_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-search-alerts command: Search alerts in HelloWorld
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['status']`` alert status. Options are 'ACTIVE' or 'CLOSED'
``args['severity']`` alert severity CSV
``args['alert_type']`` alert type
``args['start_time']`` start time as ISO8601 date or seconds since epoch
``args['max_results']`` maximum number of results to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains alerts
:rtype: ``CommandResults``
"""
status = args.get('status')
# Check if severity contains allowed values, use all if default
severities: List[str] = HELLOWORLD_SEVERITIES
severity = args.get('severity', None)
if severity:
severities = severity.split(',')
if not all(s in HELLOWORLD_SEVERITIES for s in severities):
raise ValueError(
f'severity must be a comma-separated value '
f'with the following options: {','.join(HELLOWORLD_SEVERITIES)}')
alert_type = args.get('alert_type')
# Convert the argument to a timestamp using helper function
start_time = arg_to_datetime(
arg=args.get('start_time'),
arg_name='start_time',
required=False
)
# Convert the argument to an int using helper function
max_results = arg_to_number(
arg=args.get('max_results'),
arg_name='max_results',
required=False
)
# Severity is passed to the API as a CSV
alerts = client.search_alerts(
severity=','.join(severities),
alert_status=status,
alert_type=alert_type,
start_time=int(start_time.timestamp()) if start_time else None,
max_results=max_results
)
# INTEGRATION DEVELOPER TIP
# We want to convert the "created" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
for alert in alerts:
if 'created' not in alert:
continue
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
# in this example we are not providing a custom markdown, we will
# let ``CommandResults`` generate it by default.
return CommandResults(
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alerts
)
def get_alert_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-get-alert command: Returns a HelloWorld alert
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an alert
:rtype: ``CommandResults``
"""
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
alert = client.get_alert(alert_id=alert_id)
# INTEGRATION DEVELOPER TIP
# We want to convert the "created" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'created' in alert:
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
# tableToMarkdown() is defined is CommonServerPython.py and is used very
# often to convert lists and dicts into a human readable format in markdown
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def update_alert_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-update-alert-status command: Changes the status of an alert
Changes the status of a HelloWorld alert and returns the updated alert info
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to update
``args['status']`` new status, either ACTIVE or CLOSED
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an updated alert
:rtype: ``CommandResults``
"""
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
status = args.get('status', None)
if status not in ('ACTIVE', 'CLOSED'):
raise ValueError('status must be either ACTIVE or CLOSED')
alert = client.update_alert_status(alert_id, status)
# INTEGRATION DEVELOPER TIP
# We want to convert the "updated" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'updated' in alert:
updated_time_ms = int(alert.get('updated', '0')) * 1000
alert['updated'] = timestamp_to_datestring(updated_time_ms)
# tableToMarkdown() is defined is CommonServerPython.py and is used very
# often to convert lists and dicts into a human readable format in markdown
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def scan_start_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-start-scan command: Starts a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['hostname']`` hostname to run the scan on
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan job
:rtype: ``CommandResults``
"""
hostname = args.get('hostname', None)
if not hostname:
raise ValueError('hostname not specified')
scan = client.scan_start(hostname=hostname)
# INTEGRATION DEVELOPER TIP
# The API doesn't return the hostname of the scan it was called against,
# which is the input. It could be useful to have that information in the
# XSOAR context, so we are adding it manually here, based on the command
# input argument.
scan['hostname'] = hostname
scan_id = scan.get('scan_id')
readable_output = f'Started scan {scan_id}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan
)
def scan_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-scan-status command: Returns status for HelloWorld scans
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` list of scan IDs or single scan ID
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan status
:rtype: ``CommandResults``
"""
scan_id_list = argToList(args.get('scan_id', []))
if len(scan_id_list) == 0:
raise ValueError('scan_id(s) not specified')
scan_list: List[Dict[str, Any]] = []
for scan_id in scan_id_list:
scan = client.scan_status(scan_id=scan_id)
scan_list.append(scan)
readable_output = tableToMarkdown('Scan status', scan_list)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan_list
)
def scan_results_command(client: Client, args: Dict[str, Any]) -> Union[Dict[str, Any], CommandResults, List[CommandResults]]:
"""helloworld-scan-results command: Returns results for a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` scan ID to retrieve results
``args['format']`` format of the results. Options are 'file' or 'json'
:return:
A ``CommandResults`` compatible to return ``return_results()``,
that contains a scan result when json format is selected, or
A Dict of entries also compatible to ``return_results()`` that
contains the output file when file format is selected.
:rtype: ``Union[Dict[str, Any],CommandResults]``
"""
scan_id = args.get('scan_id', None)
if not scan_id:
raise ValueError('scan_id not specified')
scan_format = args.get('format', 'file')
# INTEGRATION DEVELOPER TIP
# This function supports returning data in multiple formats, either in a json
# format that is then mapped to a table, or as a file attachment.
# In this case, if the format is "file", the return value is different and
# uses a raw format and ``fileResult()`` directly instead of
# ``CommandResults``. In either case you should return data to main and
# call ``return_results()`` from there.
# Always use ``CommandResults`` when possible but, if you need to return
# anything special like a file, you can use this raw format.
results = client.scan_results(scan_id=scan_id)
if scan_format == 'file':
return (
fileResult(
filename=f'{scan_id}.json',
data=json.dumps(results, indent=4),
file_type=entryTypes['entryInfoFile']
)
)
elif scan_format == 'json':
# This scan returns CVE information. CVE is also part of the XSOAR
# context standard, so we must extract CVE IDs and return them also.
# See: https://xsoar.pan.dev/docs/integrations/context-standards#cve
cves: List[Common.CVE] = []
command_results: List[CommandResults] = []
entities = results.get('entities', [])
for e in entities:
if 'vulns' in e.keys() and isinstance(e['vulns'], list):
cves.extend([Common.CVE(id=c, cvss=None, published=None, modified=None, description=None) for c in e['vulns']])
# INTEGRATION DEVELOPER TIP
# We want to provide a unique result for every CVE indicator.
# Since every entity may contain several CVE indicators,
# we will split the entities result and CVE indicator results.
readable_output = tableToMarkdown(f'Scan {scan_id} results', entities)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=results
))
cves = list(set(cves)) # make the indicator list unique
for cve in cves:
command_results.append(CommandResults(
readable_output=f"CVE {cve}",
indicator=cve
))
return command_results
else:
raise ValueError('Incorrect format, must be "json" or "file"')
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/v1')
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=demisto.params().get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
# INTEGRATION DEVELOPER TIP
# You can use functions such as ``demisto.debug()``, ``demisto.info()``,
# etc. to print information in the XSOAR server log. You can set the log
# level on the server configuration
# See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, first_fetch_timestamp)
return_results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
alert_status = demisto.params().get('alert_status', None)
alert_type = demisto.params().get('alert_type', None)
min_severity = demisto.params().get('min_severity', None)
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
alert_status=alert_status,
min_severity=min_severity,
alert_type=alert_type
)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(next_run)
# fetch-incidents calls ``demisto.incidents()`` to provide the list
# of incidents to crate
demisto.incidents(incidents)
elif demisto.command() == 'ip':
default_threshold_ip = int(demisto.params().get('threshold_ip', '65'))
return_results(ip_reputation_command(client, demisto.args(), default_threshold_ip))
elif demisto.command() == 'domain':
default_threshold_domain = int(demisto.params().get('threshold_domain', '65'))
return_results(domain_reputation_command(client, demisto.args(), default_threshold_domain))
elif demisto.command() == 'helloworld-say-hello':
return_results(say_hello_command(client, demisto.args()))
elif demisto.command() == 'helloworld-search-alerts':
return_results(search_alerts_command(client, demisto.args()))
elif demisto.command() == 'helloworld-get-alert':
return_results(get_alert_command(client, demisto.args()))
elif demisto.command() == 'helloworld-update-alert-status':
return_results(update_alert_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-start':
return_results(scan_start_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-status':
return_results(scan_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-results':
return_results(scan_results_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| """HelloWorld Integration for Cortex XSOAR (aka Demisto)
This integration is a good example on you can build a Cortex XSOAR Integration
using Python 3. Please follow the documentation links below and make sure that
your integration follows the Code Conventions and passes the Linting phase.
Developer Documentation: https://xsoar.pan.dev/docs/welcome
Code Conventions: https://xsoar.pan.dev/docs/integrations/code-conventions
Linting: https://xsoar.pan.dev/docs/integrations/linting
When building a Cortex XSOAR integration that is reusable, a lot of effort
must be placed in the design. We recommend to fill a Design Document template,
that allows you to capture Use Cases, Requirements and Inputs/Outputs.
Example Design document for the this Integration (HelloWorld):
https://docs.google.com/document/d/1wETtBEKg37PHNU8tYeB56M1LE314ux086z3HFeF_cX0
HelloWorld API
--------------
The HelloWorld API is a simple API that shows a realistic use case for an XSOAR
integration. It's actually a real API that is available to the following URL:
https://soar.mastersofhack.com - if you need an API Key to test it out please
reach out to your Cortex XSOAR contacts.
This API has a few basic functions:
- Alerts: the endpoint returns mocked alerts and allows you to search based on
a number of parameters, such as state (ACTIVE or CLOSED), type, timestamp. It
can also return a single alert by ID. This is used to create new Incidents in
XSOAR by using the ``fetch-incidents`` command, which is by default invoked
every minute.
There is also an endpoint that allows to retrieve additional details about a
specific alert by ID, and one to change the alert status to "CLOSED" once
it has been resolved.
- Reputation (ip and domain): these endpoints return, for an IP and
domain respectively, a WHOIS lookup of the entity as well as a reputation score
(from 0 to 100) that is used to determine whether the entity is malicious. This
endpoint is called by XSOAR reputation commands ``ip`` and ``domain`` that
are run automatically every time an indicator is extracted in XSOAR. As a best
practice of design, it is important to map and document the mapping between
a score in the original API format (0 to 100 in this case) to a score in XSOAR
format (0 to 3). This score is called ``DBotScore``, and is returned in the
context to allow automated handling of indicators based on their reputation.
More information: https://xsoar.pan.dev/docs/integrations/dbot
- Scan: to demonstrate how to run commands that are not returning instant data,
the API provides a scan endpoint that simulates scanning a host and generating
a report after the scan is completed. The API has endpoints to start a scan,
which returns a job ID, poll for the scan status and, if the scan is completed,
retrieved the job results.
This function is used in conjunction of the HelloWorld Scan playbook that uses
the GenericPolling mechanism to implement the job polling loop. The results
can be returned in JSON or attachment file format.
Info on GenericPolling: https://xsoar.pan.dev/docs/playbooks/generic-polling
Please check the HelloWorld Design Document referenced above for details about
the raw API responsens as well as the design details for this integration.
This integration also has a ``say-hello`` command for backward compatibility,
that doesn't connect to an API and just returns a ``Hello {name}`` string,
where name is the input value provided.
Integration File Structure
--------------------------
An integration usually consists of the following parts:
- Imports
- Constants
- Client Class
- Helper Functions
- Command Functions
- Main Function
- Entry Point
Imports
-------
Here you can import Python module you need for your integration. If you need
a module that is not part of the default XSOAR Docker images, you can add
a custom one. More details: https://xsoar.pan.dev/docs/integrations/docker
There are also internal imports that are used by XSOAR:
- demistomock (imported as demisto): allows your code to work offline for
testing. The actual ``demisto`` module is provided at runtime when the
code runs in XSOAR.
- CommonServerPython.py: contains a set of helper functions, base classes
and other useful components that will make your integration code easier
to maintain.
- CommonServerUserPython.py: includes a set of user defined commands that
are specific to an XSOAR installation. Do not use it for integrations that
are meant to be shared externally.
These imports are automatically loaded at runtime within the XSOAR script
runner, so you shouldn't modify them
Constants
---------
Usually some constants that do not require user parameters or inputs, such
as the default API entry point for your service, or the maximum numbers of
incidents to fetch every time.
Client Class
------------
We recommend to use a Client class to wrap all the code that needs to interact
with your API. Moreover, we recommend, when possible, to inherit from the
BaseClient class, defined in CommonServerPython.py. This class already handles
a lot of the work, such as system proxy settings, SSL certificate verification
and exception handling for HTTP errors.
Note that the Client class should NOT contain any Cortex XSOAR specific code,
i.e. it shouldn't use anything in the ``demisto`` class (functions such as
``demisto.args()`` or ``demisto.results()`` or even ``return_results`` and
``return_error``.
You will use the Command Functions to handle XSOAR inputs and outputs.
When calling an API, you should use the ``_http.request()`` method and you
can return the raw data to the calling function (usually a Command function).
You should usually have one function for each API endpoint.
Look at the code and the commends of this specific class to better understand
the implementation details.
Helper Functions
----------------
Helper functions are usually used as utility functions that are used by several
command functions throughout your code. For example they map arguments to types
or convert severity formats from integration-specific to XSOAR.
Many helper functions are already defined in ``CommonServerPython.py`` and are
often very handy.
Command Functions
-----------------
Command functions perform the mapping between XSOAR inputs and outputs to the
Client class functions inputs and outputs. As a best practice, they shouldn't
contain calls to ``demisto.args()``, ``demisto.results()``, ``return_error``
and ``demisto.command()`` as those should be handled through the ``main()``
function.
However, in command functions, use ``demisto`` or ``CommonServerPython.py``
artifacts, such as ``demisto.debug()`` or the ``CommandResults`` class and the
``Common.*`` classes.
Usually you will have one command function for every specific XSOAR command
you want to implement in your integration, plus ``test-module``,
``fetch-incidents`` and ``fetch-indicators``(if the latter two are supported
by your integration). Each command function should invoke one specific function
of the Client class.
Command functions, when invoked through an XSOAR command usually return data
using the ``CommandResults`` class, that is then passed to ``return_results()``
in the ``main()`` function.
``return_results()`` is defined in ``CommonServerPython.py`` to return
the data to XSOAR. ``return_results()`` actually wraps ``demisto.results()``.
You should never use ``demisto.results()`` directly.
Sometimes you will need to return values in a format that is not compatible
with ``CommandResults`` (for example files): in that case you must return a
data structure that is then pass passed to ``return.results()``. (i.e.
check the ``scan_results_command`` function in this file that has the option
to return a file to Cortex XSOAR).
In any case you should never call ``return_results()`` directly from the
command functions.
When you use create the CommandResults object in command functions, you
usually pass some types of data:
- Human Readable: usually in Markdown format. This is what is presented to the
analyst in the War Room. You can use ``tableToMarkdown()``, defined in
``CommonServerPython.py``, to convert lists and dicts in Markdown and pass it
to ``return_results()`` using the ``readable_output`` argument, or the
``return_results()`` function will call ``tableToMarkdown()`` automatically for
you.
- Context Output: this is the machine readable data, JSON based, that XSOAR can
parse and manage in the Playbooks or Incident's War Room. The Context Output
fields should be defined in your integration YML file and is important during
the design phase. Make sure you define the format and follow best practices.
You can use ``demisto-sdk json-to-outputs`` to autogenerate the YML file
outputs section. Context output is passed as the ``outputs`` argument in ``demisto_results()``,
and the prefix (i.e. ``HelloWorld.Alert``) is passed via the ``outputs_prefix``
argument.
More information on Context Outputs, Standards, DBotScore and demisto-sdk:
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/context-standards
https://xsoar.pan.dev/docs/integrations/dbot
https://github.com/demisto/demisto-sdk/blob/master/demisto_sdk/commands/json_to_outputs/README.md
Also, when you write data in the Context, you want to make sure that if you
return updated information for an entity, to update it and not append to
the list of entities (i.e. in HelloWorld you want to update the status of an
existing ``HelloWorld.Alert`` in the context when you retrieve it, rather than
adding a new one if you already retrieved it). To update data in the Context,
you can define which is the key attribute to use, such as (using the example):
``outputs_key_field='alert_id'``. This means that you are using the ``alert_id``
key to determine whether adding a new entry in the context or updating an
existing one that has the same ID. You can look at the examples to understand
how it works.
More information here:
https://xsoar.pan.dev/docs/integrations/context-and-outputs
https://xsoar.pan.dev/docs/integrations/code-conventions#outputs
https://xsoar.pan.dev/docs/integrations/dt
- Raw Output: this is usually the raw result from your API and is used for
troubleshooting purposes or for invoking your command from Automation Scripts.
If not specified, ``return_results()`` will use the same data as ``outputs``.
Main Function
-------------
The ``main()`` function takes care of reading the integration parameters via
the ``demisto.params()`` function, initializes the Client class and checks the
different options provided to ``demisto.commands()``, to invoke the correct
command function passing to it ``demisto.args()`` and returning the data to
``return_results()``. If implemented, ``main()`` also invokes the function
``fetch_incidents()``with the right parameters and passes the outputs to the
``demisto.incidents()`` function. ``main()`` also catches exceptions and
returns an error message via ``return_error()``.
Entry Point
-----------
This is the integration code entry point. It checks whether the ``__name__``
variable is ``__main__`` , ``__builtin__`` (for Python 2) or ``builtins`` (for
Python 3) and then calls the ``main()`` function. Just keep this convention.
"""
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
import urllib3
import dateparser
import traceback
from typing import Any, Dict, Tuple, List, Optional, Union, cast
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_INCIDENTS_TO_FETCH = 50
HELLOWORLD_SEVERITIES = ['Low', 'Medium', 'High', 'Critical']
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this HelloWorld implementation, no special attributes defined
"""
def get_ip_reputation(self, ip: str) -> Dict[str, Any]:
"""Gets the IP reputation using the '/ip' API endpoint
:type ip: ``str``
:param ip: IP address to get the reputation for
:return: dict containing the IP reputation as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/ip',
params={
'ip': ip
}
)
def get_domain_reputation(self, domain: str) -> Dict[str, Any]:
"""Gets the Domain reputation using the '/domain' API endpoint
:type domain: ``str``
:param domain: domain name to get the reputation for
:return: dict containing the domain reputation as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/domain',
params={
'domain': domain
}
)
def search_alerts(self, alert_status: Optional[str], severity: Optional[str],
alert_type: Optional[str], max_results: Optional[int],
start_time: Optional[int]) -> List[Dict[str, Any]]:
"""Searches for HelloWorld alerts using the '/get_alerts' API endpoint
All the parameters are passed directly to the API as HTTP POST parameters in the request
:type alert_status: ``Optional[str]``
:param alert_status: status of the alert to search for. Options are: 'ACTIVE' or 'CLOSED'
:type severity: ``Optional[str]``
:param severity:
severity of the alert to search for. Comma-separated values.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type: type of alerts to search for. There is no list of predefined types
:type max_results: ``Optional[int]``
:param max_results: maximum number of results to return
:type start_time: ``Optional[int]``
:param start_time: start timestamp (epoch in seconds) for the alert search
:return: list containing the found HelloWorld alerts as dicts
:rtype: ``List[Dict[str, Any]]``
"""
request_params: Dict[str, Any] = {}
if alert_status:
request_params['alert_status'] = alert_status
if alert_type:
request_params['alert_type'] = alert_type
if severity:
request_params['severity'] = severity
if max_results:
request_params['max_results'] = max_results
if start_time:
request_params['start_time'] = start_time
return self._http_request(
method='GET',
url_suffix='/get_alerts',
params=request_params
)
def get_alert(self, alert_id: str) -> Dict[str, Any]:
"""Gets a specific HelloWorld alert by id
:type alert_id: ``str``
:param alert_id: id of the alert to return
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/get_alert_details',
params={
'alert_id': alert_id
}
)
def update_alert_status(self, alert_id: str, alert_status: str) -> Dict[str, Any]:
"""Changes the status of a specific HelloWorld alert
:type alert_id: ``str``
:param alert_id: id of the alert to return
:type alert_status: ``str``
:param alert_status: new alert status. Options are: 'ACTIVE' or 'CLOSED'
:return: dict containing the alert as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/change_alert_status',
params={
'alert_id': alert_id,
'alert_status': alert_status
}
)
def scan_start(self, hostname: str) -> Dict[str, Any]:
"""Starts a HelloWorld scan on a specific hostname
:type hostname: ``str``
:param hostname: hostname of the machine to scan
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/start_scan',
params={
'hostname': hostname
}
)
def scan_status(self, scan_id: str) -> Dict[str, Any]:
"""Gets the status of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve status for
:return: dict containing the scan status as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/check_scan',
params={
'scan_id': scan_id
}
)
def scan_results(self, scan_id: str) -> Dict[str, Any]:
"""Gets the results of a HelloWorld scan
:type scan_id: ``str``
:param scan_id: ID of the scan to retrieve results for
:return: dict containing the scan results as returned from the API
:rtype: ``Dict[str, Any]``
"""
return self._http_request(
method='GET',
url_suffix='/get_scan_results',
params={
'scan_id': scan_id
}
)
def say_hello(self, name: str) -> str:
"""Returns 'Hello {name}'
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: string containing 'Hello {name}'
:rtype: ``str``
"""
return f'Hello {name}'
''' HELPER FUNCTIONS '''
def parse_domain_date(domain_date: Union[List[str], str], date_format: str = '%Y-%m-%dT%H:%M:%S.000Z') -> Optional[str]:
"""Converts whois date format to an ISO8601 string
Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format
in a datetime. If a list is returned with multiple elements, takes only
the first one.
:type domain_date: ``Union[List[str],str]``
:param date_format:
a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'
:return: Parsed time in ISO8601 format
:rtype: ``Optional[str]``
"""
if isinstance(domain_date, str):
# if str parse the value
domain_date_dt = dateparser.parse(domain_date)
if domain_date_dt:
return domain_date_dt.strftime(date_format)
elif isinstance(domain_date, list) and len(domain_date) > 0 and isinstance(domain_date[0], str):
# if list with at least one element, parse the first element
domain_date_dt = dateparser.parse(domain_date[0])
if domain_date_dt:
return domain_date_dt.strftime(date_format)
# in any other case return nothing
return None
def convert_to_demisto_severity(severity: str) -> int:
"""Maps HelloWorld severity to Cortex XSOAR severity
Converts the HelloWorld alert severity level ('Low', 'Medium',
'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``str``
:param severity: severity as returned from the HelloWorld API (str)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
"""
# In this case the mapping is straightforward, but more complex mappings
# might be required in your integration, so a dedicated function is
# recommended. This mapping should also be documented.
return {
'Low': IncidentSeverity.LOW,
'Medium': IncidentSeverity.MEDIUM,
'High': IncidentSeverity.HIGH,
'Critical': IncidentSeverity.CRITICAL
}[severity]
''' COMMAND FUNCTIONS '''
def test_module(client: Client, first_fetch_time: int) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: HelloWorld client to use
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
# INTEGRATION DEVELOPER TIP
# Client class should raise the exceptions, but if the test fails
# the exception text is printed to the Cortex XSOAR UI.
# If you have some specific errors you want to capture (i.e. auth failure)
# you should catch the exception here and return a string with a more
# readable output (for example return 'Authentication Error, API Key
# invalid').
# Cortex XSOAR will print everything you return different than 'ok' as
# an error
try:
client.search_alerts(max_results=1, start_time=first_fetch_time, alert_status=None, alert_type=None,
severity=None)
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def say_hello_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-say-hello command: Returns Hello {somename}
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``str``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['name']`` is used as input name
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains the hello world message
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# In this case 'name' is an argument set in the HelloWorld.yml file as mandatory,
# so the null check here as XSOAR will always check it before your code is called.
# Although it's not mandatory to check, you are welcome to do so.
name = args.get('name', None)
if not name:
raise ValueError('name not specified')
# Call the Client function and get the raw response
result = client.say_hello(name)
# Create the human readable output.
# It will be in markdown format - https://www.markdownguide.org/basic-syntax/
# More complex output can be formatted using ``tableToMarkDown()`` defined
# in ``CommonServerPython.py``
readable_output = f'## {result}'
# More information about Context:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# We return a ``CommandResults`` object, and we want to pass a custom
# markdown here, so the argument ``readable_output`` is explicit. If not
# passed, ``CommandResults``` will do a ``tableToMarkdown()`` do the data
# to generate the readable output.
return CommandResults(
readable_output=readable_output,
outputs_prefix='hello',
outputs_key_field='',
outputs=result
)
def fetch_incidents(client: Client, max_results: int, last_run: Dict[str, int],
first_fetch_time: Optional[int], alert_status: Optional[str],
min_severity: str, alert_type: Optional[str]
) -> Tuple[Dict[str, int], List[dict]]:
"""This function retrieves new alerts every interval (default is 1 minute).
This function has to implement the logic of making sure that incidents are
fetched only onces and no incidents are missed. By default it's invoked by
XSOAR every minute. It will use last_run to save the timestamp of the last
incident it processed. If last_run is not provided, it should use the
integration parameter first_fetch_time to determine when to start fetching
the first time.
:type client: ``Client``
:param Client: HelloWorld client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, int]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[int]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type alert_status: ``Optional[str]``
:param alert_status:
status of the alert to search for. Options are: 'ACTIVE'
or 'CLOSED'
:type min_severity: ``str``
:param min_severity:
minimum severity of the alert to search for.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type:
type of alerts to search for. There is no list of predefined types
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
# Get the last fetch time, if exists
# last_run is a dict with a single key, called last_fetch
last_fetch = last_run.get('last_fetch', None)
# Handle first fetch time
if last_fetch is None:
# if missing, use what provided via first_fetch_time
last_fetch = first_fetch_time
else:
# otherwise use the stored last fetch
last_fetch = int(last_fetch)
# for type checking, making sure that latest_created_time is int
latest_created_time = cast(int, last_fetch)
# Initialize an empty list of incidents to return
# Each incident is a dict with a string as a key
incidents: List[Dict[str, Any]] = []
# Get the CSV list of severities from min_severity
severity = ','.join(HELLOWORLD_SEVERITIES[HELLOWORLD_SEVERITIES.index(min_severity):])
alerts = client.search_alerts(
alert_type=alert_type,
alert_status=alert_status,
max_results=max_results,
start_time=last_fetch,
severity=severity
)
for alert in alerts:
# If no created_time set is as epoch (0). We use time in ms so we must
# convert it from the HelloWorld API response
incident_created_time = int(alert.get('created', '0'))
incident_created_time_ms = incident_created_time * 1000
# to prevent duplicates, we are only adding incidents with creation_time > last fetched incident
if last_fetch:
if incident_created_time <= last_fetch:
continue
# If no name is present it will throw an exception
incident_name = alert['name']
# INTEGRATION DEVELOPER TIP
# The incident dict is initialized with a few mandatory fields:
# name: the incident name
# occurred: the time on when the incident occurred, in ISO8601 format
# we use timestamp_to_datestring() from CommonServerPython.py to
# handle the conversion.
# rawJSON: everything else is packed in a string via json.dumps()
# and is included in rawJSON. It will be used later for classification
# and mapping inside XSOAR.
# severity: it's not mandatory, but is recommended. It must be
# converted to XSOAR specific severity (int 1 to 4)
# Note that there are other fields commented out here. You can do some
# mapping of fields (either out of the box fields, like "details" and
# "type") or custom fields (like "helloworldid") directly here in the
# code, or they can be handled in the classification and mapping phase.
# In either case customers can override them. We leave the values
# commented out here, but you can use them if you want.
incident = {
'name': incident_name,
# 'details': alert['name'],
'occurred': timestamp_to_datestring(incident_created_time_ms),
'rawJSON': json.dumps(alert),
# 'type': 'Hello World Alert', # Map to a specific XSOAR incident Type
'severity': convert_to_demisto_severity(alert.get('severity', 'Low')),
# 'CustomFields': { # Map specific XSOAR Custom Fields
# 'helloworldid': alert.get('alert_id'),
# 'helloworldstatus': alert.get('alert_status'),
# 'helloworldtype': alert.get('alert_type')
# }
}
incidents.append(incident)
# Update last run and add incident if the incident is newer than last fetch
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
# Save the next_run as a dict with the last_fetch key to be stored
next_run = {'last_fetch': latest_created_time}
return next_run, incidents
def ip_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""ip command: Returns IP reputation for a list of IPs
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['ip']`` is a list of IPs or a single IP
``args['threshold']`` threshold to determine whether an IP is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an IP is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains IPs
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
ips = argToList(args.get('ip'))
if len(ips) == 0:
raise ValueError('IP(s) not specified')
# It's a good practice to document the threshold you use to determine
# if a score is malicious in your integration documentation.
# Thresholds should also be possible to override, as in this case,
# where threshold is an actual argument of the command.
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return
# each CommandResult will contain context standard for IP
command_results: List[CommandResults] = []
for ip in ips:
ip_data = client.get_ip_reputation(ip)
ip_data['ip'] = ip
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(ip_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (IP) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the HelloWorld Design Document
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='HelloWorld',
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=ip,
asn=ip_data.get('asn'),
dbot_score=dbot_score
)
# INTEGRATION DEVELOPER TIP
# In the integration specific Context output (HelloWorld.IP) in this
# example you want to provide a lot of information as it can be used
# programmatically from within Cortex XSOAR in playbooks and commands.
# On the other hand, this API is way to verbose, so we want to select
# only certain keys to be returned in order not to clog the context
# with useless information. What to actually return in the context and
# to define as a command output is subject to design considerations.
# INTEGRATION DEVELOPER TIP
# To generate the Context Outputs on the YML use ``demisto-sdk``'s
# ``json-to-outputs`` option.
# Define which fields we want to exclude from the context output as
# they are too verbose.
ip_context_excluded_fields = ['objects', 'nir']
ip_data = {k: ip_data[k] for k in ip_data if k not in ip_context_excluded_fields}
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('IP', ip_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.IP``, using ``ip`` as the key field.
# ``indicator`` is used to provide the context standard (IP)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.IP',
outputs_key_field='ip',
outputs=ip_data,
indicator=ip_standard_context
))
return command_results
def domain_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return,
# each CommandResult will contain context standard for Domain
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
# INTEGRATION DEVELOPER TIP
# We want to convert the dates to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(domain_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# INTEGRATION DEVELOPER TIP
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (Domain) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the sample Design Document
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HelloWorld',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the Domain Standard Context structure using Common.Domain and
# add dbot_score to it.
domain_standard_context = Common.Domain(
domain=domain,
creation_date=domain_data.get('creation_date', None),
expiration_date=domain_data.get('expiration_date', None),
updated_date=domain_data.get('updated_date', None),
organization=domain_data.get('org', None),
name_servers=domain_data.get('name_servers', None),
registrant_name=domain_data.get('name', None),
registrant_country=domain_data.get('country', None),
registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('Domain', domain_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.Domain``, using ``domain`` as the key
# field.
# ``indicator`` is used to provide the context standard (Domain)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Domain',
outputs_key_field='domain',
outputs=domain_data,
indicator=domain_standard_context
))
return command_results
def search_alerts_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-search-alerts command: Search alerts in HelloWorld
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['status']`` alert status. Options are 'ACTIVE' or 'CLOSED'
``args['severity']`` alert severity CSV
``args['alert_type']`` alert type
``args['start_time']`` start time as ISO8601 date or seconds since epoch
``args['max_results']`` maximum number of results to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains alerts
:rtype: ``CommandResults``
"""
status = args.get('status')
# Check if severity contains allowed values, use all if default
severities: List[str] = HELLOWORLD_SEVERITIES
severity = args.get('severity', None)
if severity:
severities = severity.split(',')
if not all(s in HELLOWORLD_SEVERITIES for s in severities):
raise ValueError(
f'severity must be a comma-separated value '
f'with the following options: {",".join(HELLOWORLD_SEVERITIES)}')
alert_type = args.get('alert_type')
# Convert the argument to a timestamp using helper function
start_time = arg_to_datetime(
arg=args.get('start_time'),
arg_name='start_time',
required=False
)
# Convert the argument to an int using helper function
max_results = arg_to_number(
arg=args.get('max_results'),
arg_name='max_results',
required=False
)
# Severity is passed to the API as a CSV
alerts = client.search_alerts(
severity=','.join(severities),
alert_status=status,
alert_type=alert_type,
start_time=int(start_time.timestamp()) if start_time else None,
max_results=max_results
)
# INTEGRATION DEVELOPER TIP
# We want to convert the "created" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
for alert in alerts:
if 'created' not in alert:
continue
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
# in this example we are not providing a custom markdown, we will
# let ``CommandResults`` generate it by default.
return CommandResults(
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alerts
)
def get_alert_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-get-alert command: Returns a HelloWorld alert
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an alert
:rtype: ``CommandResults``
"""
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
alert = client.get_alert(alert_id=alert_id)
# INTEGRATION DEVELOPER TIP
# We want to convert the "created" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'created' in alert:
created_time_ms = int(alert.get('created', '0')) * 1000
alert['created'] = timestamp_to_datestring(created_time_ms)
# tableToMarkdown() is defined is CommonServerPython.py and is used very
# often to convert lists and dicts into a human readable format in markdown
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def update_alert_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-update-alert-status command: Changes the status of an alert
Changes the status of a HelloWorld alert and returns the updated alert info
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to update
``args['status']`` new status, either ACTIVE or CLOSED
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an updated alert
:rtype: ``CommandResults``
"""
alert_id = args.get('alert_id', None)
if not alert_id:
raise ValueError('alert_id not specified')
status = args.get('status', None)
if status not in ('ACTIVE', 'CLOSED'):
raise ValueError('status must be either ACTIVE or CLOSED')
alert = client.update_alert_status(alert_id, status)
# INTEGRATION DEVELOPER TIP
# We want to convert the "updated" time from timestamp(s) to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'updated' in alert:
updated_time_ms = int(alert.get('updated', '0')) * 1000
alert['updated'] = timestamp_to_datestring(updated_time_ms)
# tableToMarkdown() is defined is CommonServerPython.py and is used very
# often to convert lists and dicts into a human readable format in markdown
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Alert',
outputs_key_field='alert_id',
outputs=alert
)
def scan_start_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-start-scan command: Starts a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['hostname']`` hostname to run the scan on
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan job
:rtype: ``CommandResults``
"""
hostname = args.get('hostname', None)
if not hostname:
raise ValueError('hostname not specified')
scan = client.scan_start(hostname=hostname)
# INTEGRATION DEVELOPER TIP
# The API doesn't return the hostname of the scan it was called against,
# which is the input. It could be useful to have that information in the
# XSOAR context, so we are adding it manually here, based on the command
# input argument.
scan['hostname'] = hostname
scan_id = scan.get('scan_id')
readable_output = f'Started scan {scan_id}'
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan
)
def scan_status_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""helloworld-scan-status command: Returns status for HelloWorld scans
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` list of scan IDs or single scan ID
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan status
:rtype: ``CommandResults``
"""
scan_id_list = argToList(args.get('scan_id', []))
if len(scan_id_list) == 0:
raise ValueError('scan_id(s) not specified')
scan_list: List[Dict[str, Any]] = []
for scan_id in scan_id_list:
scan = client.scan_status(scan_id=scan_id)
scan_list.append(scan)
readable_output = tableToMarkdown('Scan status', scan_list)
return CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=scan_list
)
def scan_results_command(client: Client, args: Dict[str, Any]) -> Union[Dict[str, Any], CommandResults, List[CommandResults]]:
"""helloworld-scan-results command: Returns results for a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['scan_id']`` scan ID to retrieve results
``args['format']`` format of the results. Options are 'file' or 'json'
:return:
A ``CommandResults`` compatible to return ``return_results()``,
that contains a scan result when json format is selected, or
A Dict of entries also compatible to ``return_results()`` that
contains the output file when file format is selected.
:rtype: ``Union[Dict[str, Any],CommandResults]``
"""
scan_id = args.get('scan_id', None)
if not scan_id:
raise ValueError('scan_id not specified')
scan_format = args.get('format', 'file')
# INTEGRATION DEVELOPER TIP
# This function supports returning data in multiple formats, either in a json
# format that is then mapped to a table, or as a file attachment.
# In this case, if the format is "file", the return value is different and
# uses a raw format and ``fileResult()`` directly instead of
# ``CommandResults``. In either case you should return data to main and
# call ``return_results()`` from there.
# Always use ``CommandResults`` when possible but, if you need to return
# anything special like a file, you can use this raw format.
results = client.scan_results(scan_id=scan_id)
if scan_format == 'file':
return (
fileResult(
filename=f'{scan_id}.json',
data=json.dumps(results, indent=4),
file_type=entryTypes['entryInfoFile']
)
)
elif scan_format == 'json':
# This scan returns CVE information. CVE is also part of the XSOAR
# context standard, so we must extract CVE IDs and return them also.
# See: https://xsoar.pan.dev/docs/integrations/context-standards#cve
cves: List[Common.CVE] = []
command_results: List[CommandResults] = []
entities = results.get('entities', [])
for e in entities:
if 'vulns' in e.keys() and isinstance(e['vulns'], list):
cves.extend([Common.CVE(id=c, cvss=None, published=None, modified=None, description=None) for c in e['vulns']])
# INTEGRATION DEVELOPER TIP
# We want to provide a unique result for every CVE indicator.
# Since every entity may contain several CVE indicators,
# we will split the entities result and CVE indicator results.
readable_output = tableToMarkdown(f'Scan {scan_id} results', entities)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Scan',
outputs_key_field='scan_id',
outputs=results
))
cves = list(set(cves)) # make the indicator list unique
for cve in cves:
command_results.append(CommandResults(
readable_output=f"CVE {cve}",
indicator=cve
))
return command_results
else:
raise ValueError('Incorrect format, must be "json" or "file"')
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/v1')
# if your Client class inherits from BaseClient, SSL verification is
# handled out of the box by it, just pass ``verify_certificate`` to
# the Client constructor
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = arg_to_datetime(
arg=demisto.params().get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
first_fetch_timestamp = int(first_fetch_time.timestamp()) if first_fetch_time else None
# Using assert as a type guard (since first_fetch_time is always an int when required=True)
assert isinstance(first_fetch_timestamp, int)
# if your Client class inherits from BaseClient, system proxy is handled
# out of the box by it, just pass ``proxy`` to the Client constructor
proxy = demisto.params().get('proxy', False)
# INTEGRATION DEVELOPER TIP
# You can use functions such as ``demisto.debug()``, ``demisto.info()``,
# etc. to print information in the XSOAR server log. You can set the log
# level on the server configuration
# See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging
demisto.debug(f'Command being called is {demisto.command()}')
try:
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, first_fetch_timestamp)
return_results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
alert_status = demisto.params().get('alert_status', None)
alert_type = demisto.params().get('alert_type', None)
min_severity = demisto.params().get('min_severity', None)
# Convert the argument to an int using helper function or set to MAX_INCIDENTS_TO_FETCH
max_results = arg_to_number(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_timestamp,
alert_status=alert_status,
min_severity=min_severity,
alert_type=alert_type
)
# saves next_run for the time fetch-incidents is invoked
demisto.setLastRun(next_run)
# fetch-incidents calls ``demisto.incidents()`` to provide the list
# of incidents to crate
demisto.incidents(incidents)
elif demisto.command() == 'ip':
default_threshold_ip = int(demisto.params().get('threshold_ip', '65'))
return_results(ip_reputation_command(client, demisto.args(), default_threshold_ip))
elif demisto.command() == 'domain':
default_threshold_domain = int(demisto.params().get('threshold_domain', '65'))
return_results(domain_reputation_command(client, demisto.args(), default_threshold_domain))
elif demisto.command() == 'helloworld-say-hello':
return_results(say_hello_command(client, demisto.args()))
elif demisto.command() == 'helloworld-search-alerts':
return_results(search_alerts_command(client, demisto.args()))
elif demisto.command() == 'helloworld-get-alert':
return_results(get_alert_command(client, demisto.args()))
elif demisto.command() == 'helloworld-update-alert-status':
return_results(update_alert_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-start':
return_results(scan_start_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-status':
return_results(scan_status_command(client, demisto.args()))
elif demisto.command() == 'helloworld-scan-results':
return_results(scan_results_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
from datetime import datetime, timedelta
import requests
from decouple import config
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from .models import Socio
class ModelTest(TestCase):
def setUp(self):
Socio(
user=User.objects.create_user(
username='00000000',
password='000000'
),
nome='João de Souza',
apelido='João',
whatsapp='(86) 9 9123-4567',
cpf='068.008.773-79',
rg='123456789',
data_nascimento='2000-01-01',
data_inicio=timezone.now(),
data_fim=timezone.now() + timedelta(days=40),
is_socio=True,
stripe_customer_id='cus_00000000',).save()
def test_notificar_email(self):
socio = Socio.objects.create(
user=User.objects.create_user(
username='12345678',
password='123456',
),
nome='Fulano',
stripe_customer_id='cus_123456789',
)
notificar = socio.notificar(metodo='email', mensagem='teste')
self.assertEqual(notificar, 'Enviando email...')
def test_datetime(self):
current_period_end = datetime(
2022, 6, 30, 23, 59, 59
)
if current_period_end - datetime.now() > timedelta(days=30):
if datetime.now().month < 7:
if current_period_end.month > 6:
current_period_end = datetime(
datetime.now().year, 6, 30, 23, 59, 59
)
def test_adicionar_socio_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.data_fim - timezone.now().date() > timedelta(days=30) and socio.is_socio:
url = 'https://cheersshop.com.br/socio/adicionar'
obj = {
"nome": socio.nome,
"email": socio.email,
"telefone": socio.whatsapp,
"matricula": socio.matricula,
"observacao": "",
"cpf": socio.cpf,
"data_fim_plano": socio.data_fim,
"vendedor": "1874"
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config('CHEERS_TOKEN')}'})
self.assertEqual(response.status_code, 200)
def test_adicionar_coupom_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.is_socio:
url = 'https://cheersshop.com.br/codigo'
obj = {
"nome": socio.cpf,
"uso": 1,
"ativo": True,
"desconto_reais": 70 if socio.is_atleta else 65,
"maximo_usuario": "1",
"quantidade": "1",
"usuario": 192061,
"vendedor": "1874",
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config('CHEERS_TOKEN')}'})
self.assertEqual(response.json()['status'], 'Success')
| from datetime import datetime, timedelta
import requests
from decouple import config
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from .models import Socio
class ModelTest(TestCase):
def setUp(self):
Socio(
user=User.objects.create_user(
username='00000000',
password='000000'
),
nome='João de Souza',
apelido='João',
whatsapp='(86) 9 9123-4567',
cpf='068.008.773-79',
rg='123456789',
data_nascimento='2000-01-01',
data_inicio=timezone.now(),
data_fim=timezone.now() + timedelta(days=40),
is_socio=True,
stripe_customer_id='cus_00000000',).save()
def test_notificar_email(self):
socio = Socio.objects.create(
user=User.objects.create_user(
username='12345678',
password='123456',
),
nome='Fulano',
stripe_customer_id='cus_123456789',
)
notificar = socio.notificar(metodo='email', mensagem='teste')
self.assertEqual(notificar, 'Enviando email...')
def test_datetime(self):
current_period_end = datetime(
2022, 6, 30, 23, 59, 59
)
if current_period_end - datetime.now() > timedelta(days=30):
if datetime.now().month < 7:
if current_period_end.month > 6:
current_period_end = datetime(
datetime.now().year, 6, 30, 23, 59, 59
)
def test_adicionar_socio_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.data_fim - timezone.now().date() > timedelta(days=30) and socio.is_socio:
url = 'https://cheersshop.com.br/socio/adicionar'
obj = {
"nome": socio.nome,
"email": socio.email,
"telefone": socio.whatsapp,
"matricula": socio.matricula,
"observacao": "",
"cpf": socio.cpf,
"data_fim_plano": socio.data_fim,
"vendedor": "1874"
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config("CHEERS_TOKEN")}'})
self.assertEqual(response.status_code, 200)
def test_adicionar_coupom_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.is_socio:
url = 'https://cheersshop.com.br/codigo'
obj = {
"nome": socio.cpf,
"uso": 1,
"ativo": True,
"desconto_reais": 70 if socio.is_atleta else 65,
"maximo_usuario": "1",
"quantidade": "1",
"usuario": 192061,
"vendedor": "1874",
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config("CHEERS_TOKEN")}'})
self.assertEqual(response.json()['status'], 'Success')
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
平安行动自动打卡
请事先安装好 lxml 和 requests 模块
pip install lxml requests
然后修改 27-31 行为自己的数据,未使用的变量保持原样即可
如有需要请自行配置 149-171 行的 SMTP 发信或 174-177 行的 Server 酱微信提醒
Created on 2020-04-13 20:20
@author: ZhangJiawei & Liu Chongpeng & Liu Lu
"""
import requests
import lxml.html
import re
import json
import random
import time
import smtplib
import traceback
myid = "STUDENTID"
mypass = "PASSWORD"
mybound = "BOUNDFIELDS"
mydata = r'FORMDATA'
# mysckey = "SCKEY"
title = ""
msg = ""
proxies = {"http": None, "https": None}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "MESSAGE_TICKET=%7B%22times%22%3A0%7D; ",
"Host": "cas.hrbeu.edu.cn",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362"
}
def findStr(source, target):
return source.find(target) != -1
if __name__ == '__main__':
try:
## 登陆校园网络认证界面
url_login = 'https://cas.hrbeu.edu.cn/cas/login?'
print("============================\n[debug] Begin to login ...")
sesh = requests.session()
req = sesh.get(url_login, proxies=proxies)
html_content = req.text
login_html = lxml.html.fromstring(html_content)
hidden_inputs = login_html.xpath( r'//div[@id="main"]//input[@type="hidden"]')
user_form = {x.attrib["name"]: x.attrib["value"] for x in hidden_inputs}
user_form["username"] = myid
user_form["password"] = mypass
user_form["captcha"] = ''
user_form["submit"] = '登 录'
headers['Cookie'] = headers['Cookie'] + req.headers['Set-cookie']
req.url = f'https://cas.hrbeu.edu.cn/cas/login'
response302 = sesh.post(req.url, data=user_form, headers=headers, proxies=proxies)
## 进入平安行动界面
jkgc_response = sesh.get( "http://jkgc.hrbeu.edu.cn/infoplus/form/JSXNYQSBtest/start", proxies=proxies)
headers['Accept'] = '*/*'
headers['Cookie'] = jkgc_response.request.headers['Cookie']
headers['Host'] = 'jkgc.hrbeu.edu.cn'
headers['Referer'] = jkgc_response.url
jkgc_html = lxml.html.fromstring(jkgc_response.text)
csrfToken = jkgc_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken = csrfToken.pop().attrib["content"]
jkgc_form = {
'idc': 'JSXNYQSBtest',
'release': '',
'csrfToken': csrfToken,
'formData': {
'_VAR_URL': jkgc_response.url,
'_VAR_URL_Attr': {}
}
}
jkgc_form['formData'] = json.dumps(jkgc_form['formData'])
jkgc_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/start'
response3 = sesh.post(jkgc_url, data=jkgc_form, headers=headers, proxies=proxies)
## 提交平安行动表单
form_url = json.loads(response3.text)['entities'][0]
form_response = sesh.get(form_url)
headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
headers['Referer'] = form_url
headers['X-Requested-With'] = 'XMLHttpRequest'
submit_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/doAction'
submit_html = lxml.html.fromstring(form_response.text)
csrfToken2 = submit_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken2 = csrfToken2.pop().attrib["content"]
submit_form = {
'actionId': '1',
'boundFields': mybound, # boundFields 修改位置
'csrfToken': csrfToken2,
'formData': mydata, # formData 修改位置
'lang': 'zh',
'nextUsers': '{}',
'rand': str(random.random() * 999),
'remark': '',
'stepId': re.match(r'.*form/(\d*?)/', form_response.url).group(1),
'timestamp': str(int(time.time()+0.5))
}
response_end = sesh.post(submit_url, data=submit_form, headers=headers, proxies=proxies)
resJson = json.loads(response_end.text)
## 表单填写完成,返回结果
print('[debug] Form url: ', form_response.url)
print('[debug] Form Status: ', resJson['ecode'])
print('[debug] Form stJson: ', resJson)
## 生成提醒返回的标题和信息
if (resJson['errno'] == 0):
print('[info] Checkin succeed with jsoncode', resJson['ecode'])
title = f'打卡成功 <{submit_form['stepId']}>'
msg = '\t表单地址: ' + form_response.url + '\n\n\t表单状态: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
else:
print('[error] Checkin error with jsoncode', resJson['ecode'])
title = f'打卡失败!校网出错'
msg = '\t表单地址: ' + form_response.url + '\n\n\t错误信息: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
except:
print('\n[error] :.:.:.:.: Except return :.:.:.:.:')
err = traceback.format_exc()
print('[error] Python Error: \n', err)
title = '打卡失败!脚本出错'
msg = '\t脚本报错: \n\n\t' + err + '============================\n'
finally:
print(':.:.:.:.: Finally :.:.:.:.:')
## 发送邮件
# from email.mime.text import MIMEText
# from email.header import Header
# mail_host = "smtp.qq.com" # SMTP 服务器地址
# mail_user = "sender@example.com" # SMTP 发信邮箱用户名
# mail_pass = "emailpassword" # SMTP 发信邮箱密码
# sender = 'sender@example.com' # 发信人邮箱,即 SMTP 发信邮箱用户名
# receivers = ['receiver@example.com'] # 收信人邮箱,多邮箱以数组形式写
# message = MIMEText(msg, 'plain', 'utf-8')
# message['From'] = Header("1@example.com", 'utf-8') # 发信人邮箱,仅用于显示
# message['To'] = Header("2@example.com", 'utf-8') # 收信人邮箱,仅用于显示
# subject = title
# message['Subject'] = Header(subject, 'utf-8')
# try:
# smtpObj = smtplib.SMTP_SSL(mail_host) # Python 3.7 及以上版本 SSL 加密发信
# smtpObj.connect(mail_host, 465) # Python 3.7 及以上版本 加密发信 SMTP 端口号 465
# smtpObj.login(mail_user,mail_pass)
# smtpObj.sendmail(sender, receivers, message.as_string())
# print ("[info] Success: The email was sent successfully") # 日志输出
# except smtplib.SMTPException:
# print ("[error] Error: Can not send mail") # 日志输出
## 或者发送 Server 酱的微信提醒
# wcurl = 'https://sc.ftqq.com/' + mysckey + '.send'
# wcdata = {'text': title, 'desp': msg}
# try:
# wcresult = requests.post(wcurl, wcdata)
# print('[info] Notification sended at', time.strftime("%Y-%m-%d %H:%M:%S %A", time.localtime()))
# except:
# print('[error] Failed to send notification!')
print('[info] Task Finished at', time.strftime("%Y-%m-%d %H:%M:%S %A", time.localtime()))
print('============================\n')
| #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
平安行动自动打卡
请事先安装好 lxml 和 requests 模块
pip install lxml requests
然后修改 27-31 行为自己的数据,未使用的变量保持原样即可
如有需要请自行配置 149-171 行的 SMTP 发信或 174-177 行的 Server 酱微信提醒
Created on 2020-04-13 20:20
@author: ZhangJiawei & Liu Chongpeng & Liu Lu
"""
import requests
import lxml.html
import re
import json
import random
import time
import smtplib
import traceback
myid = "STUDENTID"
mypass = "PASSWORD"
mybound = "BOUNDFIELDS"
mydata = r'FORMDATA'
# mysckey = "SCKEY"
title = ""
msg = ""
proxies = {"http": None, "https": None}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "MESSAGE_TICKET=%7B%22times%22%3A0%7D; ",
"Host": "cas.hrbeu.edu.cn",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362"
}
def findStr(source, target):
return source.find(target) != -1
if __name__ == '__main__':
try:
## 登陆校园网络认证界面
url_login = 'https://cas.hrbeu.edu.cn/cas/login?'
print("============================\n[debug] Begin to login ...")
sesh = requests.session()
req = sesh.get(url_login, proxies=proxies)
html_content = req.text
login_html = lxml.html.fromstring(html_content)
hidden_inputs = login_html.xpath( r'//div[@id="main"]//input[@type="hidden"]')
user_form = {x.attrib["name"]: x.attrib["value"] for x in hidden_inputs}
user_form["username"] = myid
user_form["password"] = mypass
user_form["captcha"] = ''
user_form["submit"] = '登 录'
headers['Cookie'] = headers['Cookie'] + req.headers['Set-cookie']
req.url = f'https://cas.hrbeu.edu.cn/cas/login'
response302 = sesh.post(req.url, data=user_form, headers=headers, proxies=proxies)
## 进入平安行动界面
jkgc_response = sesh.get( "http://jkgc.hrbeu.edu.cn/infoplus/form/JSXNYQSBtest/start", proxies=proxies)
headers['Accept'] = '*/*'
headers['Cookie'] = jkgc_response.request.headers['Cookie']
headers['Host'] = 'jkgc.hrbeu.edu.cn'
headers['Referer'] = jkgc_response.url
jkgc_html = lxml.html.fromstring(jkgc_response.text)
csrfToken = jkgc_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken = csrfToken.pop().attrib["content"]
jkgc_form = {
'idc': 'JSXNYQSBtest',
'release': '',
'csrfToken': csrfToken,
'formData': {
'_VAR_URL': jkgc_response.url,
'_VAR_URL_Attr': {}
}
}
jkgc_form['formData'] = json.dumps(jkgc_form['formData'])
jkgc_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/start'
response3 = sesh.post(jkgc_url, data=jkgc_form, headers=headers, proxies=proxies)
## 提交平安行动表单
form_url = json.loads(response3.text)['entities'][0]
form_response = sesh.get(form_url)
headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
headers['Referer'] = form_url
headers['X-Requested-With'] = 'XMLHttpRequest'
submit_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/doAction'
submit_html = lxml.html.fromstring(form_response.text)
csrfToken2 = submit_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken2 = csrfToken2.pop().attrib["content"]
submit_form = {
'actionId': '1',
'boundFields': mybound, # boundFields 修改位置
'csrfToken': csrfToken2,
'formData': mydata, # formData 修改位置
'lang': 'zh',
'nextUsers': '{}',
'rand': str(random.random() * 999),
'remark': '',
'stepId': re.match(r'.*form/(\d*?)/', form_response.url).group(1),
'timestamp': str(int(time.time()+0.5))
}
response_end = sesh.post(submit_url, data=submit_form, headers=headers, proxies=proxies)
resJson = json.loads(response_end.text)
## 表单填写完成,返回结果
print('[debug] Form url: ', form_response.url)
print('[debug] Form Status: ', resJson['ecode'])
print('[debug] Form stJson: ', resJson)
## 生成提醒返回的标题和信息
if (resJson['errno'] == 0):
print('[info] Checkin succeed with jsoncode', resJson['ecode'])
title = f'打卡成功 <{submit_form["stepId"]}>'
msg = '\t表单地址: ' + form_response.url + '\n\n\t表单状态: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
else:
print('[error] Checkin error with jsoncode', resJson['ecode'])
title = f'打卡失败!校网出错'
msg = '\t表单地址: ' + form_response.url + '\n\n\t错误信息: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
except:
print('\n[error] :.:.:.:.: Except return :.:.:.:.:')
err = traceback.format_exc()
print('[error] Python Error: \n', err)
title = '打卡失败!脚本出错'
msg = '\t脚本报错: \n\n\t' + err + '============================\n'
finally:
print(':.:.:.:.: Finally :.:.:.:.:')
## 发送邮件
# from email.mime.text import MIMEText
# from email.header import Header
# mail_host = "smtp.qq.com" # SMTP 服务器地址
# mail_user = "sender@example.com" # SMTP 发信邮箱用户名
# mail_pass = "emailpassword" # SMTP 发信邮箱密码
# sender = 'sender@example.com' # 发信人邮箱,即 SMTP 发信邮箱用户名
# receivers = ['receiver@example.com'] # 收信人邮箱,多邮箱以数组形式写
# message = MIMEText(msg, 'plain', 'utf-8')
# message['From'] = Header("1@example.com", 'utf-8') # 发信人邮箱,仅用于显示
# message['To'] = Header("2@example.com", 'utf-8') # 收信人邮箱,仅用于显示
# subject = title
# message['Subject'] = Header(subject, 'utf-8')
# try:
# smtpObj = smtplib.SMTP_SSL(mail_host) # Python 3.7 及以上版本 SSL 加密发信
# smtpObj.connect(mail_host, 465) # Python 3.7 及以上版本 加密发信 SMTP 端口号 465
# smtpObj.login(mail_user,mail_pass)
# smtpObj.sendmail(sender, receivers, message.as_string())
# print ("[info] Success: The email was sent successfully") # 日志输出
# except smtplib.SMTPException:
# print ("[error] Error: Can not send mail") # 日志输出
## 或者发送 Server 酱的微信提醒
# wcurl = 'https://sc.ftqq.com/' + mysckey + '.send'
# wcdata = {'text': title, 'desp': msg}
# try:
# wcresult = requests.post(wcurl, wcdata)
# print('[info] Notification sended at', time.strftime("%Y-%m-%d %H:%M:%S %A", time.localtime()))
# except:
# print('[error] Failed to send notification!')
print('[info] Task Finished at', time.strftime("%Y-%m-%d %H:%M:%S %A", time.localtime()))
print('============================\n')
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Oct 15 12:53:43 2018
# by: The Resource Compiler for PySide (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = b"\x00\x006x\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006\x05IDATx\xda\xec\xddOl-Y~\x17\xf0\xaaIG\xf9G\xe27\xd2\xf0O#\xe2\xfbX0j\x08\xb2G\x10\xd1 F\xbe\x1d!\x116\xb1Gb\x93\xd5\xbb\xbd`\x91\xc5\xf0\xdc+f\xf7\xca\x12\x8b\xd9\xb5\x1f-$$\x16\xefzE\xc4\x22m/C\x82\xfaZ\x83\xa0\xc3\x1f\x8d\x1dF\x0aC \xcfF\x84\x89\x84F\xf3\x9c\x88\x10 \x89\xa9\xd3>\x9e\xf6\xbc\xb6OU\xdd\xbfUu?\x1f\xa9t\xdf\xf3-\xdf?\xa7\xae\xef\xfd\xdes\xea\xfcN~}}\x9d\x01i\x1b_yk\xaf\xbc\x18\x5c\xbd\xff\xd1\xa1\xd6\x00\xa0k>\xa3\x09\xa0\x96\xfd\xb8\x01\x80\xc0\x07}\xb3\xf1\x95\xb7\x06\xe5\xc5N\xb9m\xc6\x9e>\x00\x10\xf8\xa0g\x8a;\xff\xd6\xcb\x07@\xe7\xe4\xce\xe1\x83\x87m|\xe5\xadG\xe5\xc5E\xf8\xe7\x9d\x1f?\xbez\xff\xa3\x0b\xad\x03@W\xe8\xe1\x83\xb4\xbd\xd7\xc2^Ph\x16\x00\x04>\xe8\x8f\xfb\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x0cu\xc3\xf2b\xf3\xbe\xab\xb2\x9b\x9e?\x00\x10\xf8\xa0\xe3R\x134\x0a\xcd\x03@W\x98\xb4\x01\xf7\x88\xa5X^V\xec\xf6\xf6\xd5\xfb\x1fM\xb4\x16\x00m\xa7\x87\x0f\xee7\xaa\xb1\x8f\x12-\x00\x08|\xd0au\xc2\xdcn\xec\x09\x04\x00\x81\x0f\xba\xa4\x0cq\xa3\xec\xd3\xa5X\x1e2\xd2b\x00\x08|\xd0=\xfb\x0b\xda\x17\x00\x04>X\xb5X\x8ae\xab\xc9\xaf\xc4\x1eA\x00\x10\xf8\xa0#\xa6\x09oz\xf9\x00h5eY \x8a\xabg|w\xca_W\xa2\x05\x80\xd6\xd2\xc3\x07\x9f\x98\xa5\xa7n\xa4\xf9\x00\x10\xf8\xa0\xfdf\x09mO\xac\xaf\x0b\x80\xc0\x07-\x16'^l\xcex3\xce\xe5\x03@\xe0\x83\x16\x1b\xb5\xe46\x00@\xe0\x83y\x8b\xabe\xec\xcc\xe1\xa66\x95h\x01@\xe0\x83v*\xe6x[\x02\x1f\x00\xad\xa3,\x0bk-N\xb4\xb8\xc8\xea/\xa5V\xc7\xe3\xab\xf7?\xba\xd0\xba\x00\xb4\x85\x1e>\xd6\xddh\xcea/(4+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x86\xb2\xbdl\xf6R,\xf7\xdet\xe6\x5c>\x00\x04>h\x85E\x8625\xf9\x00\x10\xf8`\x95b)\x96\xdd\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96\x87(\xd1\x02\xc0\xca\xe9\xe1c\x1d\xed-)\xec\x05\xce\xe5\x03@\xe0\x83\x15Xf\x08\x1b)\xd1\x02\x80\xc0\x07KT\x86\xafay\xb1\xb5\xcc\xbb\xccnz\x14\x01@\xe0\x83%\x19\xad\xe0>\x0d\xeb\x02\xb0R&m\xb06b)\x96\x97+\xba\xfb\xb7\xaf\xde\xffh\xe2(\x00\xb0\x0az\xf8X'\xa35\xbdo\x00\x04>\x10\xf8\x96\xe0I\xeca\x04\x00\x81\x0f\x16\xa1\x0c[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6vy\xb1\xd3\x82\x87\xb2\x19{\x1a\x01@\xe0\x839kSY\x14\x81\x0f\x80\xa5S\x96\x85^\x8b\xab\x5c|\xb7\xe6\xeeW\xb7\xbf\xd6\xf0n\xc2\xef\xbd\xca\xea\x9f#\xf8\xc5\xab\xf7?:st\x00X\x16=|\xf4\xdd\xa8\xc6>\x97\xe5\xf6N\xb9\x0d\xcam\x9a vV\x06\xb8A\xbc\x8d\xd3\x1a\xfb+\xc4\x0c\x80\xc0\x07s\x94\x0aW'\xd9MA\xe4A\xb9\x8d\xcb\xed\xd5,w\x14ocX\xfe\xf3\x8b\xe5v\x94\xd8\xf5\x89\xf5u\x01\x10\xf8`\x0e\xcaP\x15\xd6\xb0}}\x985\x0c\xbf>/\xb7\xc7e8\xdb[\xc4\xea\x17a\xb8\xb6\xdcF\xe5??[n\x07\xd9M\x0f\xe2\xebF\x8e\x10\x00\xcb\xf2\x86&\xa0\xc7\xee\xf6\xee\x9d\x97\xdba\xe8\x85[\xd6\x9d\xc7\x1e\xc3\x22lqvn\xd8v\xee<\xb6C\x87\x08\x80e\xd0\xc3G/\xc5U-B\xb8\x0aC\xaba\xd8v{\x99a\xef\x9e\xf0w;\xdc\xfb8>\xa6G\xb1\x07\x12\x00\x16N\x0f\x1f}\x16\x86m/\xda\xf4\x80\xe2\xe3\x19\xc5s\xf8\x9c\xc7\x07\x80\xc0\x073\x06\xab6?\xbe0\xdc\xfb\xca\x91\x02`\x19\x0c\xe9\x02\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x02\x1f\x00,I\x9e\xe7\x8f\xcam\xa8%@\xe0\x03\xa0\x9fao\xbb\xbc\xb8\xd0\x12 \xf0\x01\xd0\xcf\xb0\xb7_^|\xa3\xdc6\xb4\x06,\x96\xb5t\x01Xv\xd0{T^\x8c\xcbm\xf7\xf6g\xd7\xd7\xd7\x13-\x03\x02\x1f\x00\xfd\x08{a\x08\xf7\xb8\xdc6\xb5\x06,\x8f!]\x00\x96\x15\xf6n\x87p_\x0f{\xa7Z\x07\x16K\x0f\x1f\x00\x8b\x0ez\x9f\x1a\xc2\x05\x04>\x00\xfa\x13\xf6\xea\x0c\xe1N\xb4\x14,\x96!]\x00\x16\x15\xf6\x1e\x1a\xc2\x05\x96L\x0f\x1f\x00\xf3\x0ezM\x87p'Z\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b\xc0\xbc\xc2\xde(\xbb\xe9\xadk\x14\xf6\xd4\xe0\x83\xc5\xd3\xc3\x07\xc0\xacA/\x0c\xe1\x1e\x96\xdb\x13\xad\x01\x02\x1f\x00\xfd\x0b{a\x08w\x5cn[S\xde\x84\x1a|\xb0\x04\x86t\x01\x986\xec\x8d\xb2\x9b!\xdc-\xad\x01\xed\xa6\x87\x0f\x80\xa6Ao\x9eC\xb8\x13-\x0a\x02\x1f\x00\xed\x0a{\xb3\x0e\xe1\x02+`H\x17\x80\xbaao\x94\xcd\x7f\x08w\xa2ea\xf1\xf4\xf0\x01P\x15\xf4\xcc\xc2\x05\x81\x0f\x80\x1e\x87\xbdAvSHy!C\xb8j\xf0\xc1r\x18\xd2\x05\xe0\xa1\xb0\xb7W^\x9ce\xce\xd7\x03\x81\x0f\x80^\x86\xbd0\x84\xfbA\xb9m,\xf0n\xd4\xe0\x83%1\xa4\x0b\xc0\xdd\xa07\xc8\x168\x84\x0b\xac\x86\x1e>\x00n\xc3\xde\xb2\x87p'Z\x1d\x04>\x00\x96\x17\xf6\x961\x84\x0b\xac\x88!]\x80\xf5\x0ez\x83luC\xb8\x13G\x00\x96C\x0f\x1f\xc0\xfa\x86=\xb3pA\xe0\x03\xa0\xc7ao\xe5C\xb8j\xf0\xc1\xf2\x18\xd2\x05X\xaf\xa0\x17V\xcd\x08AK\xaf\x1e\xac\x11=|\x00\xeb\x13\xf6\x86\xe5\xc5EK\xc2\x9e\x1a| \xf0\x010\xe7\xb0W\x94\x17\x1fff\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\x9d\x96=\xb4\x89\xa3\x03\x02\x1f\x00\xf31\x88\xe1*\xcc\xc6\xdd\x8e?\xdb\xd1, \xf0\x01\xd0\x13\xd7\xd7\xd7g1\xec}J<\xa7/x\xfdr\x19\x81p\xe2\xe8\x80\xc0\x07\xc0\xe2\xc3\xe0$\x15\xbe^\x0b\x84\xc3L\xcf \x08|\x00\xf47\x10\x96\xe1\xefbA\xb7\x0d,\x81Y\xba\x00$\xc5\x19\xbe\x9bZ\x02\x04>\x00\xfa\x19\xf6\xc2,\xdf\xfd9\xdf\xac\x1a| \xf0\x01\xd0\x22a\x09\xb6T\xed\xbe\x83r\xbb\xd2L \xf0\x01\xd0Aq\xd2\xc6\x93\xc4.\xe7\xd7\xd7\xd7Ey9jx\xd3\x13\xad\x0b\x02\x1f\x00\xedPT\x5c\xff\xf1Po\x19\xfaBa\xe7\xe7\x9a\x0b\x04>\x00:$\xcf\xf3Q\x96.\xc3rrw\xa6m\xf9\xef\x10\xfe\xcek\xde\xfcD\x0b\xc3r)\xcb\x02p\x13p\x06\xd9\xcd\xaa\x14\xc3r\x0b\x13\x15\x92\xabR\x94\x01'\xefq[\x84\xe7_T\xecv\xdfD\x8e\xbd\xec\xa6\xc8\xb3\xf5zA\xe0\x03hE\xa8\xd9\xce>)(<lcH\x89\xe7\xd0}\x18\xff{\x1a\xc3\xd4E\xb9M\xe2\x0a\x1a\x8b\x12\xc2\x5c\xaa\x0c\xcbAy\xff\x17\xf7\x84\xe0\x8b\xd83\xf8A\xea\xc6\xd5\xe0\x03\x81\x0f`\xd1!/\x04\x92\xbd\xac\x1bu\xe5\x06w\xfe\xbd\x93\xdd\xe9m,\x9f\xcbm\x08<\x9eg\x00\x8c=\x9d\xa92,\x97\xd9\xcd\xcc\xdd\x87\xc2\xdcqy\x1b\xe1|\xbe\xa7^q \xf0\x01,+\xe4=\x8a\x01/\x84\x98\xad\x8e=\xfcA\xc5\xf5\xdf\x0b\x81\xe5\xf3\x0cAl\x12\x02`\x9cD1\xad\xaa2,Ey\xfb\xafR7\x10\xce\xe7\x8b\xe1\xfa\xbe\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11\x17\xe5\xf6\xa2\x83a/\x186\xd87\xf4X\x86\x12*\x1f\x94\xcf\xfbU\xb9\x8d\xcbm\xafa\x9b\x85\xfb\xdbM\xecrZ\x86\xb9q\xcd\x9b\x0b\xf7}_}\xbeW^\x9d \xf0\x01\xcc3\xe8=\xcb\xba=\x81`0\xe5\xefm\xdc\x09\x7f\xe1\xbc\xba\x22\x0e\xd5V9\xac\xb8\xbe\xa8\xfb\x00b/\xe0}\x81\xf3\xcc\xab\x14\x96\xcf\x90.\xd0G\xfb3\x04\xbd\xab\xec\x93\xc9\x11a{\xb5\xc2\x90\xb29\xa7\xdb\x08\xc1\xf7Y\x19\xfaNB\xa8\xbbo\xd2D\x9cl\x91\xea\x05=j:\xd9\x22\xec_\xde\xeeA\xbc\xff[z\xf8@\xe0\x03\x98M\xe8Y*CF8\x87\xedI\xcd_\x09\xe7\x94M\xe2vVu~\xda\x92\xbd\x9d}R\x22f\x18/g\xe9\xb1\x0c\xc3\xb5\xbb\xf1|\xbf\xe2vx6\x9e\xe7xX\x11\x82\xf7\xa7<\x1eE\x1c*\xbe=\x9fO\x0f\x1f\x08|\x00sq\x98\x08|!\xec\xdc\xcel=n\xf3\x93\xb8\xd3\xa3\xf6\xbd\xc7\x19\x87f\x87w\xb6iz\x01\xc3\xef\xbc\x88C\xdfE\x8d y8c\x10\x0eC\xbb\x17\xf1>\xf4\xf0\x81\xc0\x070\x97\xa0tV\x86\x99\xb0\xea\xc3\xdd!\xca\xa3r\x1bw\xbd\x06\x5c\xac\x7f7\x8e\xdb\xddz\x82\xa3\xac\xf9\xc4\x94\x10\xfc\xfea\xb9}>\xb1\xcfe\x5c/w\x96\xc7\xfc*N \xf9p\xc1\xf5\x03\x81\x07\x98\xb4\x01\xf4U\xe8\xe5\x0bC\x91\xe1\x1c\xb2\xcf\x96Ac\xd4\xc7\x82\xbf!@\x95[\xe8\x81\x0b\xc1\xefq\xb9\xbd\x9b\xd5_\xe2,\xa8\xeaq\xdb\x9f\xd3\xe3\x9c\xc4\xc7\x06\x08|\x00s\x0bB\xa17\xefQ\xe8\x9dj\xd9yy\x8b|\xce\x17\xaf\x85\xbfP\x00\xf92\xf1+!\x18\xfe\xa5\xc4\xf5\xa7\xf3\x1c\xf6\x0e\x8f\xcd+\x13\x04>\x00\xe6\x1b\xfe\xf6\xcbmP\xfe\xf7\xcb\xe5vr\xcfn\x9f\xab\xb8\x99\x91\x96\x04\x81\x0f\x80n\x84\xbf\xb0\xfaF8\x87\xee\xb6\xd7/\x0cu\x87\xd9\xc9\xa9s\xf7\x9e\xdf\xb7^. \xf0\x01\xd0\xee\xe0\xf7q\xaf_vS\xd0y;\xb1k\x08\x84\x85\x16\x03\x81\x0f\x80\xee\x9ay\xbd\x5c@\xe0\x03\xa0\xa5b\x19\x97TQ\xeas\x93+@\xe0\x03\xa0\xdb\xaa\xc2\xdc\xbe&\x02\x81\x0f\x80\x8e\x8a\xeb\xe5\xee$v9\xe9c\xadB@\xe0\x03X\x97\xb0\x17\xd6\xcb-*v\xd3\xbb\x07\x02\x1f\x00\x1d\x16\xc2\x5cj\xdd\xdd\x03eX@\xe0\x03\xa0\xa3\xf2<\x1fd\xe9\xde\xbb\xb0\x1a\x87\x89\x1a \xf0\x01,$\x88\x1c\x96\xdbY\x1cndq\x94a\x01\x81\x0f`%aoX^<-\xb7\xadr\xbb\x88\xe5BXL;\xef&v\x09\xeb\xe5\x8e\x17p\xbfca\x1e\x04>\x80\xbbC\x88\xa1\xf7\xe9\x1bq\x16)\x8bk\xe7\xfb\x14\x8b\x08{\xd9M\xad\xbf\x10\xe6'\xc2<\x08|\xc0\x1a*\x03\xc0~\x0c\x03\xaf{\x11\xc3\x02\xf3i\xe7\xd1\x03\xed|\xebh\xdeeX\xee\x84\xbd[B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad4\xb7vN\xf5\xee\xcd}\xbd\xdc{\xc2\xde\xad\x0d\xa1\x0f\x04>`\xbd\xecg\xe9\x09\x04#M\xb4\x94v>\x5c@\x19\x96T\xa0\x13\xfa@\xe0\x03\xd6A\xecuJ\x95\x079\xb2\xd2\xc3\x5c\xdayP^<K\xecrY\xb6s\xb1\x80\xbb\x1e\x96\xdb\xb9\xd0\x07\x02\x1f\xb0\xdeR\xbdNW\x99\x95\x1e\xe6e\x5c\xe38\xcc],\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xe2\xbaC\xb5\xe0f\x17\xcb\xb0\xa4\xd6\xcb\x0deX\x8e\x17u\xff\x0dC\x9f\x92- \xf0\x01=\x0b\x22!\xec=\xb4\xb4W\xe8\xdd\xb3\xd2\xc3|\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6\x12\xd7\xe9\xdd\x9bO\xa8\xaeZ/\xf7y\xd9\xceg\xcbx,\xf1x\x8eb\x98\x7fH(\xd9r\xec\xc8\x81\xc0\x07\xf4#\x88\x0c\xb2\x87W{\xd0\xbb7\x9f6\xae*w3\xf72,5B_\x08\x97\xc3\x8a\xd0\xb7\xa3\xf6\x22\x08|@?\xa4z\xf7\x8e\xf5\xee\xcdE+\xd7\xcb\x8d\xa1\xafj\x18\xf9I\xec\x9d\x04\x04>\xa0\xa7\x81O\xef\xde\x8c\xe2\x8c\xd7'\x89]B\x19\x96\x95\xb5s\x5c\xab\xf7\xa0b\xb7\xf7\xca\xe7\xb1\xe7h\x82\xc0\x07t3\x8c\x84\xa1\xc6\x87f\x8d\x9e/\xeb\x9c\xb2\x9e\xab\x0as\xa3U?\xc0X\xf7\xef\xa4b\xb7q\x1c\xfe\x07\x04>\xa0c\x86\xa9\x0fx\xcd3s\xa0\xde\xcb\xd2eXNZT\xcc:\x04\xcf\xaa\x99\xbb\xc7f\xee\x82\xc0\x07tO\xf2\xfc=\xcd3S\xd8\xabZ/7h\xcd\xb9q\x0df\xee\x1a\xe6\x07\x81\x0f\xe8\x98\xe1\x03??_\xc0Z\xae\xeb\xa6\xaa\x0c\xcbA\xdb\xda8\x0e\xe1\x17\x15\xbb=\x89u\x1b\x01\x81\x0fh\xbb\xd8\x03\xf5P \x99h\xa1\x99\xdav\x90\xa5{\xefZ[\xee&N \xa9:\x9f\xef\xd0\xf2k \xf0\x01\xdd\x90\xfa\xc0\x16\xf8fSd\xe92,\xfb-/w3\xca\xd2C\xbb\xe1\xb9\x8d\x1df\x10\xf8\x80\xf6\x1b\x0a|\xf3\x17\xd7\xcbM\x95a9\x8d\xa5PZ\xeb\xce\xf9|)\xce\xf1\x04\x81\x0f\xe8\x80\x87z\xf8\xce\x15[\x9eI\xd5Pm\xd1\x85'Q\xbe\x06B\xa0\xbboh7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcb=T^\xe3B\xd3L'Nf\xd8J\xecr\xd4\xa22,u\x84\xf3\x10\xef\x0e\xed\x86\x89&\xdb\xea3\x82\xc0\x07t\xc7C\xf5\xe1|\x98O\x17\xf6\xaa\xca\xb0,}\xbd\xdcY\xc5Y\xc4\xe19\xe9\xd5\x03\x81\x0f\xe8h8\xc9\x04\xbe\xb9\x0a\xbda\xa9\x89\x1a\x87],u\x13B\x9e^=X\xac74\x01\xb0@o\x97\xdb n\xc3x\x19\xca\xb48\x7f\xafy\x80\x0em\xf7,\xb1\xcb\xa5\xde1@\xe0\x03\x96*N\xca\x98h\x89\xb9\x19W\x5c\xbf\xaf\x89\x80\x87\x18\xd2\x05h\xb9X\x86%\xb5^\xeei\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0Uy\x9eW\xad\x97{d\xb2\x03 \xf0\x01t7\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01tX\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa3\xf2<\x0f\xcb\xd2=M\xec\x12\xca\xb0\x1cj)@\xe0\x03\xe8\xae\xaa07\xd2D\x80\xc0\x07\xd0Qy\x9e\xefe\xd5eX&Z\x0a\x10\xf8\x00\xbaK\xef\x1e \xf0\x01\xf4U\x9e\xe7E\x96.\xc3r\xd0\xc5\xf5r\x01\x81\x0f\x80\xec{\xeb\xe5\xa6\xca\xac\x842,&j\x00\x02\x1f@\x87\x15Y\xba\x0c\xcb\xbe2,\x80\xc0\x07\xd0Qq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x80\xc0\x07\xd0]E\xc5\xf5V\xd4\x00\x04>\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x1c)\xc3\x02\x08|\x00\xdd\x0d{a\xbd\xdc\xd4D\x8c0Q\xa3\xd0R\x80\xc0\x07\xd0]a\xa865Q\xe3P\x19\x16@\xe0\x03\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x96\xaa6\xdc\x8e\xbd\xa4@\xc2\x1b\x9a\x00`e\xaa\xc2\x9c2,\xdf\x1f\xee\x86\xe5\xc5v\xdcBX\xbe=\xef\xf1\xedr\x9bh!\x10\xf8\x00\xda\x18^v\x13\xbb\x84\xf5r\x8f\xb5\xd4\xf7\xf9\xf0\x81\x9fo\x0b|\x90fH\x17`5\xc6\x15\xd7+\xc3rO\x08~\xe0\xe7\x86tA\xe0\x03h\x97<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ci\xa9O\xb9x\xe0\xe7CM\x03\x02\x1f@\x9b\xc2^\xe8\x8d*\x12\xbb\x842,z\xf7\x9a\x05>\xa0\x82s\xf8\x80e\x84\x9cavs\x92}\xd8n\xff\x1dz\xb8\xbe\xbc\x86\xe7\xa9\x85\xb0\x97*\xc3R\x98\xa8\xd18\xf0\xedh\x1a\x10\xf8\x80\xd5;~ \xe4\x0c\xe3u\xeb\x12|\xc3\xe4\x82\xa7\x89].\xcb\xb0\xa7\x0cK\xf3\xc0\x07T0\xa4\x0b,+\xf0\xddgo\xcd\xda\xa1*\xcc\x8d\xbcT\x00\x81\x0f\xe8[\xe0\xdb\x8c\xbd^\xbdW>\xcf\x10nSC\x8f\xa7\xd6\xcb\xadt\x91h\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xd5\x03W\x8f\xd6\xa4\x19\xf4\xee\xcd\xfe:\xba\xd0\x0a \xf0\x01\xed6^\xd7\xa0\x93\xe7y\x91\xa5\xcb\xb0<\x17f\x00\x81\x0f\xe8\x83\x87z\xb86\xca@\xd4\xdb\xd0\x17\xcb\xb0\xa4\xca\xac\x84\x9e\xcf\xc2\xcb\x03\x10\xf8\x80\xce\x8b=X\x0f\xad\x94\xd0\xe7\xc0\x13\x82n\xaa\x0c\x8b\xf5r\x01\x81\x0f\xe8]\xf8\xb9\xcff\x1f{\xf9\xe2D\x82'\x89]\xce\xcb\xb07\xf6\xb2h\xf4\xc5!\x7f`\x9bh\x1d\x10\xf8\x80v|X\x87\xc9\x1b\x97\x0f\x5c]\xf4\xf0)W='+j\x00\x02\x1f\xd0K\x0f\x85\xa0^\xf5\xf2\xc5\xe7\x92*\xc3r\xa2W\x0a\x10\xf8\x80^\x8aC\x98\x0f\xf6\xf2\xc5I\x0e]\x0f{\xd6\xcb\x05\x04>`\xed=\x14\x866{\x12\x84\xf6\xb3t\x19\x96CeX\x00\x81\x0f\xe8\xb5\x8a^\xbegy\x9e\x0f\xba\xfa\xdc\xe2c\x7f\x96\xd8%<o\xeb\xe5\x02\x02\x1f\xb0\x16R=y\xe3\x0e?\xaf\xaa0W(\xc3\x02\x08|\xc0Z\x883v\x1f\xaa\xcb\xb7\x93\xe7y\xe7\x86vc\x19\x96\xdd\xc4.\xa7\xca\xb0\x00\x02\x1f\xb0nF\x89\xeb\x8a\x0e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0z\x89\x13\x17\x0e\x1e\xb8:\xacN1\xee\xcas\x89=\x92[\x89]\x8e\xca\xe7{\xe6\xa8\x03\x02\x1f\xb0\x8e\xa1\xaf(/\xce\x1f\xb8\xba\x13C\xbb\xca\xb0\x00\x02\x1f@\xb5\xbd\x18\x8a\xee\xf3^\x19\xa8\xb6[\xfe\xf8C\xd8K\xad\x97{h\xa2\x06 \xf0\x01k-\x0e\xed\x8e\x12\xbb\x8c\xdb\xfa\xd8\xe3y\x86O\x13\xbb\x5c\xc6^L\x00\x81\x0fX\xfb\xd0\x17f\xed>\x7f\xe0\xea6\x9f\xfbV\x15FG\x8e. \xf0\x01|\x12\xfa\xc2ynGw~\x14\x86y\xdf)\x7f\xde\xca\xd0\x94\xe7y\x18\x8aN\xad\x97{j\xbd\x5c\xa0\x0d\xde\xd0\x04@\xcbB\xdf(N\x82\x18\x94\xdb\xa8\xe53[\xab\xca\xb0\x8c\x1cQ@\xe0\x03\xb8?\xf4\xed\xb5\xfd1\x96\xa1\xb4\xc8\xd2\xeb\xe5>\xb7^.\xd0\x16\x86t\x01\x9a\x87\xbd\xd0\x03\x99*\xb3\x12\x86\xa2\x0b-\x05\x08|\x00\xdd\x15\x86rSeX\xf6\x95a\x01\x04>\x80\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c@\xe0\x03\xe8\xb6\xa2\xe2z+j\x00\x02\x1f@W\xe5y>\xca\xd2eXN\x94a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\xa5\xca\xb0\x1c(\xc3\x02\x08|\x00\x1d\x15\xd7\xcbM\xf5\xde]f\xd5E\x98\x01\x04>\x80\x16\xab*\xc3R(\xc3\x02\x08|\x00\x1d\x15\xcb\xb0\xec&v9U\x86\x05\x10\xf8\x00\xba\xadj\xa8\xb6\xd0D\x80\xc0\x07\xd0Q\xb1\x0c\xcbVb\x97#eX\x00\x81\x0f\xa0}!n\x5cng\xe5\xb6]\xb1_(\xc3\x92\xea\xdd\x0b\xeb\xe5*\xc3\x02\x08|\x00-\x0b{\xa3\xecfY\xb4\xd0k7)\xff\x9f\x0alE\x96\x9e\xa8qh\xa2\x06 \xf0\x01\xb4+\xec\x85\x1e\xbd\x17w~\x14\xc2\xdc{\xe5\xcf'\xb1\xec\xca\xdd}\xc3\xff\x9f&n\xee\xb2\x0c{\x85V\x05\xba\xe2\x0dM\x00\xacA\xd8\x0b\xc3\xb3\x93\x07\xae\x0eK\xa5\xbd,\xf79\xc8>\xe9\xb5\x1bW\xdc\xe4\xfe\x94\x8fc/\xde\xf6E\xb9\xbd\xde;xV\xe7g\xce\x19\x04\x04>\x80\xfb\x85\x90\xb4Q\xb1\xcf\xb3\x10\xe4\xcaP\xf6O\xb2\xf4z\xb9\xa1\x0c\xcb\xf14\x0f\x22\xfc^,\xf3r\xdf\xe3\xd9\xa9\x19\x1a\xef}L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb0:\x08\x08|\x00}\x11\x02\xda\xa0F\xe8\x0b\xd7\xff\x83\x8a}F\xb3<\x902`\xddN\x18\x09\x8fikN\xcf\xef\xbe\xb0\xb8;e\x80\x0c\xab\x86\xbc\x1e\x02k\xf5>\x86\x9f9\xaf\x11\x04>\x80\x95\x08\xe7\xdb\x85\xd9\xb9\xd9\xcd\xac\xdb\xdd\x19n\xea\xf9<z\xc4\xc2m\xc4\x9e\xbe\xe3\xacf\xcf\xde\x12mf\x9f^3\xb8\xf6c\xbc'@\x9e\xde\xb3\xdb$\x86\xc3c\xafN\x10\xf8\x00\xe6\x19\xfaBP\xdb\x8bAk|O\xa8\xa9\xf2\xff\xca\xed?\xcf\xf1\xf1\x84\x9e\xb0a\x0c\xa2Oz\xdc\xf4\xf7\x85\xc5\xaa\x927\xc0\x9c\x99\xa5\x0b\xac[\xf0\x9b\x94\xdb\xa0\xfcg\x98\xa4q\xd5\xe0W\x7f\xb0\xdc\xfeq\xac\xe1\xf7h\x8e\x8fgT^\xbc\xbbF\x87 \xf4\xf8\x0d\x0d\xfd\x82\xc0\x07\xb0\x8c\xe0Wd7\xe7\xf5\x1d5\xfc\xd5\xdb\x1a~\xdbs|,\xa1\xb7\xeb\x9d\x86\x01\xb4\x8b\xc2\xca$\xc2\x1e\x08|\x00K\x0d}\xafb\x0f\xdb\x7f\x98\x22\xf4}\xa3\x0c}\xc5\xbcz\xfb\xca\xc71./\x86=\x0e}\xcfc[\x03\x02\x1f\xc0r\xc5\xd57\xfe\xca\x94\xbf\x1eJ\xb9\x84\x09\x18\xc5\x9cB_\x98\xf9\x1az\x0e\xcf{\xd6\xcc\xef\x94\xcf\xcd2t \xf0\x01\xac$\xec\x85\xde\xb9Y\xc3Z(\xe52\xb70\x13'\x97\x0c\xb3\xfbg\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A-5[\xf7$\xbb\xa9KWe\xae\xe5E\xe2Ps\x08}G\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdcT\xcf\x5c\x08z\xa38\xa3\xf7\x9d\x8a\xe0\xb7\x90\x12#\x1d\x9e\xc1{\x1e\xc3\xde\x99W\x1a\x08|\x00\xab\x14BZj\xe5\x8d\xe2v6i\x18\x92\x8c\xc1\xef\xed\xec\xd3\xbdn\xe7\x8b\x0c6\x1d\x9c\xc1+\xec\x81\xc0\x07\xb0z\xb1\xf8rj\xc5\x8d\xd3\xfb\xce;\x8b5\xfcF\xe5?\x1fg\x9f\xd4\xf1[x\x01\xe1\x0e\xcd\xe0=\xc9\xd4\xd8\x03\x81\x0f\xa0%\xaaBZQ\x11\xc0.B\x1d\xbfr{\xb4\xac\x09\x09\x1d\x98\xc1\x1bj\xec\xed\x09{ \xf0\x01\xac\x5c,\xc3\xb2U\x11\x5c&m|\xec-\x9e\xc1\xfb\xae\x1a{\xd0n\xd6\xd2\x05\xd6)\xecU\xad\xe1\x1a\x86L\xf7[\xf8\xb8\xc3c\xbe\xed9\x0b=}E\xb9}\xb5\xdc\xfev\x0b\x1e\xde;\xca\xae\x80\xc0\x07\xd0&!\xcc\xa5&j\x1c\xb6tH2\x0c\xe5\xee\xb4\xec1\x85p\xbc\xd7\xd6\xdeP@\xe0\x03\xd6P,\xc3\xf2,\xb1\xcbe\x5c_\xb7\x8d\xb6[\xf6xnk\xec\x99\x89\x0b\x1d\xe1\x1c>`]\x8c+\xaeo\xf3\xd2_\x1b-z,a\xd2\xc8\xb6\xb0\x07\xdd\xa2\x87\x0f\xe8\xbdX\x86%5$z\xda\xd6\x15!\xe2coS\xd8Sv\x05:H\x0f\x1f\xb0\x0e\xc6\x15\xd7\xb7\xb9w\xefQK\x1e\xc7\x91\xb0\x07\xdd\xa5\x87\x0f\xe8\xb5<\xcf\xab\xd6\xcb}\xde\xf2\xe1\xc9\x10\xb0Nb\xf0\x0b\xdb\xd6*\xc2\x9e\xb2+ \xf0\x01\xb45\xec\x85\x80T$v\xb9\xaa\xb8~\xe5\xe2,\xd8I\xc5s\xfc\xee\x02\x1f\x82\xb2+\xd0\x03\x86t\x81>\xab\xbd^n\x87-r\x06\xaf\xb0\x07=\xa1\x87\x0f\xe8\xa5<\xcfC\x10z\x92\xd8%\x94a9\xec\xc1S]D\xe0Sv\x05zF\x0f\x1f\xd0WUan\xd4\x93\xe79\xef\xc0'\xec\x81\xc0\x07\xd0~y\x9e\xefe\xe92,'=Z!b\x9e\x81/\x94]\x19\x08{ \xf0\x01\xb4=\xecU\xad\x97\x1b\xec\xf7\xe8)\xcfk\xd6\xae\x1a{ \xf0\x01tFU\x19\x96\x832\xd4\x5c\xf4$\xdc\x0e\xe7tS\xa1\xec\xca\xb6\xb0\x07\x02\x1f@\x17\x02\xd0 K\xf7\xde\x85\xf3\xd3\x0e{\xf4\x94\xe71\x9c\xfb\x5c\x8d=\xe8?\xb3t\x81>)\xb2t\x19\x96\xfd\x9e\xf5b\x0df\xfc}eW`M\xe8\xe1\x03z!\x0eo\xa6\xca\xb0\x9c\xf60\xdcL\xdb\xc3w%\xec\xc1z\xd1\xc3\x07\xf4E\xd5Pm\xd1\xc3\xe7\xbc3e\xd8Sv\x05\xd6\x8c\x1e>\xa0\xf3\xf2<\x1fe\xe9\xd9\xaaG=*\xc3r\xfb\x9c\xa7\xe9\xdd\xbb\x14\xf6`=\xe9\xe1\x03\xba\x1e|\xaa\xca\xb0\xb4~\xbd\xdc)5\x0d|\xca\xae\xc0\x1a\xd3\xc3\x07t]\x98\x95\x9b\x9a\xa8q\xd8\x972,\xaf\x194\xd8\xf7D\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Yb\x97\xb0^n\xd1\xd3\xa7?\xac\xb9\xdf\x91\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\xef\xf7\xf8\xb9\xd7\x19\xd2=\x10\xf6\x80@\x0f\x1f\xd0I\xb1\x0cKj\x96j(\xc3r\xdc\xd3\xe7>\xc8\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07t^U\x98Y\xd7\xde\xbd0Ie\xd4\xd7\xb0\x0b\x08|\xc0\x9a\xc8\xf3\xbcj\xbd\xdc\xe7=/=\xb2\x9d\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0ka/\x94a)\x12\xbb\xf4\xb5\x0c\xcb]\xc3{~v.\xec\x01\x0f\xd1\xc3\x07tM\x08s\xa9\xf3\xd7\x8a5(?2x \xec)\xbb\x02\xdcK\x0f\x1f\xd0\x19qu\x89\xa7\x89]B\x19\x96\xc3\x9e\xb7A\xe8\xe1\xbc;\x9c}$\xec\x01U\xf4\xf0\x01]R\x15\xe6Fk\xd0\x06w\xcf\xdfSc\x0f\xa8E\x0f\x1f\xd0\x09y\x9e\xefe\xd5eX&k\xd0\x14\xc3x\xf9\xae\xb0\x07\xd4\xa5\x87\x0f\xe8\x0a\xbd{7B\x0f\x9f\x1a{\x80\xc0\x07\xf4K\x9e\xe7E\x96.\xc3r\xd0\xd3\xf5r\xef\xb3\xbfF\xcf\x15\x98\x13C\xba@\xdb\xc3\xde K\x17Q\x0eeX\x0e\xd7\xa5=\x84=@\xe0\x03\xfa\xa8\xc8\xd2eX\xf6\xcdP\x05\x10\xf8\x80\x8e\x8a\xeb\xe5>I\xecr\xee\x5c6\x00\x81\x0f\xe8\xb6\xa2\xe2\xfa}M\x04 \xf0\x01\x1d\x95\xe7\xf9(K\x97a9Z\x932,\x00\x02\x1f\xd0\xcb\xb0\x17V\x93HM\xc4X\x87\xf5r\x01\x04>\xa0\xd7\xc2Pmj\xa2\xc6\xa1\xd9\xaa\x00\x02\x1f\xd0Q\xb1\x0c\xcb\xb3\xc4.\x97\xd9\x1a\x95a\x01\x10\xf8\x80>\xaa\x0as\xca\xb0\x00\x08|@W\xc52,\xbb\x89]\xc2z\xb9\xc7Z\x0a@\xe0\x03\xbak\x5cq\xbd2,\x00\x02\x1f\xd0Uy\x9e\x870\x97Z/7\x94a9\xd3R\x00\x02\x1f\xd0\xcd\xb0\x17\xca\xb0\x14\x89]B\x19\x16\xbd{\x00\x02\x1f\xd0a!\xec\xa5\xca\xb0\x14&j\x00\x08|@G\xe5y\xbe]^<M\xecrY\x86=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\xe5y\xbe\x97\xa5\xd7\xcb=\xb5^.\x80\xc0\x07t\x9b\xde=\x00\x81\x0f\xe8\xab<\xcf\x8b,]\x86\xe5\xc0z\xb9\x00\x02\x1f\xd0\xdd\xb07\xc8\xd2eVB\x19\x16\x135\x00\x04>\xa0\xc3\x8a,]\x86\xc5z\xb9\x00\x02\x1f\xd0Uq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x00\x02\x1f\xd0]E\xc5\xf5V\xd4\x00\x10\xf8\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x9c(\xc3\x02 \xf0\x01\xdd\x0d{\xd6\xcb\x05\x10\xf8\x80\x9e\x0ba.U\x86\xe5P\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x16\x00\x81\x0f\xe8\xb4\xaa0W(\xc3\x02 \xf0\x01\x1d\x15\xcb\xb0\xec&v9U\x86\x05@\xe0\x03\xba\xad\xaawom&j\x94\xe1wRn\xd7\x1d\xdd&^\xca\xf4\xe0opTng\xaf\xbd\xb6\x8f\xe3\x17S\x81\x0f`\xca7\xd7\x10\xe6\xb6\x12\xbb\x1c]__\x9fi)`\x09\xefG\xe3\xf2\xe2\xc5=\xefIa\x04\xe2\xc3\xb8\xbe\xb7\xc0\x07\xd0\xf0\xcdU\x19\x16\xa0-\xefG\xe1\xbd\xe8I\xc5n\xcf\xfa\xdc\xd3'\xf0\x01\x8b\x12\xde`S\xeb\xe5\x1e\x9a\xa8\x01,I\xdd/\x97\x85\xc0\x07P\xff\xdb\xf4\xa0\xbcx\x9a\xd8\xe5\xb2\x0c{\x85\x96\x02\x96\xf0~4\xac\xf8\xf2y\xd7\x8e\xc0\x07P\xdf\xb8\xe2\xfa\x91&\x02\x10\xf8\x80\xee~\x9b\xde\xab\xf8\x96|j\xbd\xdcN*4\x01t\xd7\x1b\x9a\x00\x98\xb3\xaa2,\xa35n\x9b6\xcdH\x0e\x93j\xb6j\xee\xfb\x5cH\xa7\xc3.\x1a\xec{%\xf0\x01T\x883\xe16+\x82\xc3\xc5\xba\xb6O\xf9\xdc\xf7[t\xac\x8ek\x06\xbe\xf36=n\x98\xe2\xef\xee\xa2|\xbd\x9ff\xf5\xce\xcf\xeb\xed\x12\x8f\x86t\x81y\x05\x88\xd0c\xb4_\xf1\xcd\xb9\xd0R\xad8V\xe18\xed\xd6\xdc}\xa4\xc5\xe8\x81QV\xdd{w.\xf0\x01\xd4\xfbf\x9c\x9a\x09\xb7\xaf\x0cK+\xc2\xde\xa0A\xf0>P\x18\x9b>\x88#\x0b\xc3r;}`\x97\xa3p}\x9f\xdf\xa3\x0c\xe9\x02\xf3\x08\x11\xe1\x8d4U\xd4\xf4\xdcz\xb9\xad1\xce\xea\x95\xa88W:\x87\x9e\x85\xbe\xf0\xe5eX\xbe_m\x97\x97a\x0b_~&\xe5v\xb6\x0e_F\x05>`\x1e\xaa\x82\x81s\xc0\xda\x11\xcc\xc3q\xa8[gl\xa4\xc5\xe8q\xf0[\xbb\x9ekC\xba\xc0\xac!bT\x11\x22N\xcc\xf0l\xc5q\xda\xce\xea\x0f\xe5\xbek(\x17\x04>\x80\xdb\x10a\xbd\xdc\xee\x18g\xf5\x86rC\x9d\xc4C\xcd\x05\x02\x1f\xc0\xad\x10\xe6ReX\x0e\xd7\xb9\x0cK\x8b\x82y\x08\xe5uJ\xb0\x84\x80>\xd2b \xf0\x01\xdc\x86\x88Ay\xf1,\xb1\xcbe\xd6\xe3\x12\x07\x1d:N\xdb\x15\xc7\xe9\xaeB@\x07\x81\x0f\xe0\xae\xaa0W(\xc3\xd2\x0a\xe3\x9a\xfb\x19\xca\x85\x1e3K\x17h,\x96a\xd9\xad\x08\x0fc-\xb5\xf2\xe3\x14\x02\x5c\xdd\xa1\xdc=-\x06+\xf9;\x1dd7%b\xb2ENp\x13\xf8\x80iT\xf6\xeei\xa2V\x84\xf2\xa75w\x1f\xe9\x8d]\xd9q\x0a\x13\x9f\xc2\xb1\xba\xad\x0d\x17\xfe\xbfS\x11\xce\xcf\xeel\x13\xc3\xf0\x9d:\xd6{\xf1x\x0f\xee;\xce\xe5>\x0f\x1d\xe7\xe3Y\xffF\x05>\xa0\xe9\x9b\xd6(K\xf7\x1a\x1d)\xc3\xd2\x8a\x0f\x96q\xcd\xddC\xd9\x9cc\xad\xb6\xd4\xe3\x13>\xecG\xf1\xc3\x7f\xab\xe1\xafo\xc4\xa0\xb0s\xe7\xf6\xc2\xea\x11\xe3y\xf4\xaa\xc7/\x0a\x1f\xd6\xdc\xfd\xcb\x8bz\xed\xc4\xd7\xf0EVofy\xe5\xe3\xa8\xfb\xbc\xca\xdb\xc9\x17\xf4\xe5+\x1c\xef'3\x1c\xe7\x17\xe5\xed\xdc.\xfd6U\xf8s\x0e\x1f\xd0\xf4M\xf8\xb0\xa2\xf7A\x19\x96\xd5+\xb2\xf4\xec\xe9\xbb\xc7k\xa4\xb9\x96\xf7e\xa9\xdcBo\xcd\xcb\xecf\x22\xcd\xd6\x9cnz'\x06\x82\x8b\x18.\xa6\x16\xbf\xac=\xaf\xb9\xfb8\xbe',\xc2\xb8f\xd8{\xde\xd6/,!\xd8\x97\xdb$\x06\xcd's\xb8\xc9\xf0zy\x11\x82p\x98y\xdf\xb4\xed\x05>\xa0i\x90H\xbd\x09\x1f\x1a\x1a\x5c\xf9\x87L\xe852\x94\xdb\xbe\xa0w\x11?\xac\xb7\x16xW!\xe4\x7f\x18\xcf\xdd\x9c%\xf4\x85/m\xe75v\x0d\xef\x05\xc7\x0bz\x0d\xef\xd6\xd8\xf5<k\xe9\xe9#q$\xe4,\xab\xbf\xb2M\x13\x1b\xf1\x0b\xc3\xb6\xc0\x07,\xe4\xdbjE\x90\xb8\xb4\xf6\xea\xca\x8fQ\x93\xa1\xdc\xe7\x86r\x97rL\x0ec\xd0\xdb\x5c\xe2\xdd>\x0d=K3\xf6\xbe\x8dj\xee\xb7\x13\x97\xec[\xc5k\xb8\x95_X\xee\x1c\xf3\x8d\x05\xde\xcdU\xd3Sg\x04>\xa0\xae\xaa7aC\xb9\xed8Fu>dB\x8dD\xe1|9V\xb5D\xddN\x83\xe0\xf4)qi\xbdwk\xee^\xc4/\x84\xcb|\x0d\xb7r\xf9\xbfX\xe4\xfc\xe9\x12\xee\xaa\xf1\x975\x81\x0f\xa8\xf3&6\xcc\xd2C\x13\xa7z\x8bV~\x8c\xea\x0e\x83\x05\x86r\x97g\x96\xbf\x8b\xf3\x19\xef{7\x06\x90iC_\xe8\xa9:\xad\xb1\xeb\xc6,\xe1r\x8a\xd7p+kF\xc6\xc7\xff\xac\xad\xaf+\x81\x0f\xa8\xfb\xad;\x19 4\xd1J?h\x9a\x0e\xe5N\xb4\xdar\xc4`}\x94\xd8%L\x9c9\xc9nz\xd3\xde.\xb7\xcf\x86\x99\xa2q\xdb\xbe\xfdw\xf9\xf3\xc7\xe5\xf6\xe5x[W\x0d\x1e\xc2\xb3\xb8\xda\xca\xb4F5\xefo\xa6\xa1\xdd\x06\xaf\xe1VN4\xaa1\xa1\xedS\xa1\xf5\xf6\x98\xdf9\xde\xb7\xc7\xf9\xedx\xddC\xc7\xfaj\x9a/\xd8\xca\xb2\x00Uod\xa1\x87`\xb3\x22@\x5ch\xa9\x95\x0ao\xfe\x86r\xdb}|\x9e\xbc\x16Z\xc2\xcf\x0e\xeb\x0eK\xc6\xbf\xb1\xb0\x1d\xc7p\x11\xc2U\xdd\xde\xa4\x10D\x86S\x06\xd6\x8b8\x01\xe1\x83\x1a\xbb\x87\xa1\xdd\xe3)\xdf\x0f\xc65_\xc3\xa3\x96\xbe\xdfT\xad+~+\xf4\xda\xee?\xf4\xa5\xeb\xceq\x9e\xdcy\x0f\x0e=\x87{w^C\xe3i\x1e\xa0\x1e>\xa0\xea[\xeb~\xc5\xb7m\x01b\xb5\xc7(\x1c\x9f\xba3\x01\xf7\x0c\xe5._\xec\x8d\xb9\x8a\xdbA\xb9\x0d\xca\x9f\x8d\xa6=\x07-\x1c\xc38A\xea\xed\xac~\xef\xdb\xf6\x8c\x8f\xff\xa4\xc6\xaeS\x0d\xed6\x18\xca=j\xf1\xa9#\xa3\x9aao\xd8\xb4\x87=<\xe7\xf0z\xc9nz\xffN\x04>`\x11\x0e+\xbeu[/w\xb5ao\xd0 p\x1f\xb4\xf1$\xf752\x8aAon\x7f318\x8cj\xee\xbe?\x87\xc7?\xf7\xa1\xdd\x06C\xb9\x97YK'\x86\xc5s\x9c\xabz\xf7\xaeb\xd8{5\xc3\xf1\xbe(\xb7\xbdi\xff\x8e\x05>\xe0\xa17\xb1\xd0#\x90*\x16z\xde\xc6\x13\xa7\xd7\xcc8\xab7\x0cv\xaed\xcej\xc5^\x9aW\x8b\xb8\xdd,}\x8e\xe0\xad\xbd\x19\xef\xe7U\x83\xdbh2k\xb7\xeek\xb8\xcd\xbd\xd3\xc3:\xcfs\xd5\x8f_\xe0\x03\x1eR\x15\xe6\x94aYm o2\x94;\xd2b\xbdV'\xcco,q\x15\x8eZC\xbb\xf1\xf1\xd4\x19\xcam{\xeft\x9dv]\xf9P\xb4\xc0\x07\xdc\xf7F<\xaa\x08\x13'fz\xae\xf4\xf8\x84\xde\xd7\xf7j\xeen(\xb7\xe7\xe2\x89\xfeu\xce\xb1\x1b\xce\xe1\xeeB\xb8\xacS.&9\xb4\xdb`(\xf7\xb4'\xbd\xd3+\xff\x1b\x14\xf8\x80\xfb\xde\x88\xab\xde`\xf5\xee\xad\xd6\xb8\xe6~\xa7\x86r\xd7F\x9d/`\xdb\xb3\xdeI\x1c\x96\x1c\xd5\x0d\x87\x89\xd5>\xea\xccj\xed\xd3Z\xcf\xdb\xab~\x00\x02\x1f\xd0\xf4\x8d\xf8@\x19\x96\x95\x06\xf2\x10\xe0\xea\xac\xc7\xda\xa7\x0fK\xaa\xd5\xe9Az4\x8f;\x8a=\xc6\x075v\xbdwh7\xf6P\xd7))3\xea\xd1{\xcd@\xe0\x03\xda\x14&\x06Y\xba\xf7.\xcc\x943Qcu\xc7\xa7\xee\x07eP\x08\xe6k\xa5\xce\x84\x80\x9dy\xddY\xec9\xae\xb3\x0a\xc7\xee=\xe7\x0e\xd6y\x0f9\xea\xd9\xea=\x85\xc0\x07\xb4\x892,\xed\x0d{MV\xd385\x83z\xbd\xac\xe8<\xcdQV\xafT\xcb\xf8\xce\xebxT#x\xb6\xb6\x04\xcb\x03\xea\xb4\xfdf\xf9\xdc\xc7\xab|\x90\x02\x1fp\xfbF\x1c\xbe\x85\xefV\x84\x88\xb1\x96Zi\x0f\x81\xa1\x5c\xda\x142/j\x06\xb3\x10v\x8a\x06\xcb\x8fu\xad@\xf8\xa4\xe6~OB\xe8K\x9c\xd7\xb8P\x96V\x03nU\xbd\x11\x17\x9ah\xa5a\xfci\xcd\xdd;q\xdeS\xf9\x9c&K\xb8\x9b\xb3\xb2-\xf6;z\xccC(\xd8\x8e[\xf8\xf70^5\xc8\xea-\xe1\xb5\xac\xd07\xae\xb9R\xc6~|\xecU5\xf7:7\xab<\x0c=\x97mpY\xf3\xb8\x84\xda\xa6\xc3\x10\x80\x97\xfd\x05Z\xe0\x03n\x87YR\xbdGG\xca\xb0\xac\xf4\x83\xbf\xee\x07\xc3I\x87\xce{\xdaqt?u\x9c\xf7b\xb0\x1b\xb6)\xd4\xd5\xf9\x92\x91\xdd\xac\xff\x9a\x0as\x1bY\xba\x90{\xd0\xe5Y\xe5\xe1q\xbf\xa8\xb9o8\xb6/\xe2\x04\xac\xb0\x1d/\xa3G\xd3\x90.\xf8\xa0\xa9\x1af\xb9\xca\x94aY\xf5\x07I\x9d\x0f\x7fC\xb9\x1d\xfd\xb2Un!\xa4\x7f7\x06\x86'\x1d\x0b{MW\xe1\xe8\xe5\xeb7\xf6\xd6\x9d6\xfc\xb5\xcdx\xcc/\xe2P\xefBK\xb7\x08|@Q\xf1\xcd\xfc\xd0D\x8d\x95\x85\x81\xf0!\xdad(\xd7q\xea\xc8\x97\xacxN\xdb\xab\xf8\x81\xbf\xdb\xf5\xe7\xd4`\x15\x8e\x87\xec\xf7`Vy\xf8{=\x9f\xe2\xf7n{?\xbfQ\xbe&\xce\xe2\x97\x80\xb9\x9f\xe7'\xf0\xc1z\x7f\xf0\x0c*\x02\xc5\xa5\xc2\xbd\xab\x0b\x05Y\xfd\xa1\xdc\xbe\x95\xb0\xe8\xf3q\x0d\xbd\xe5!\xd8<\xcb\xea\xad!\xdb\xb5/\x8f\xd3\x04\x9e\x93>L\x08\x8b_\xb8\x86S\xb6\xc1\xad\xad\xec\x93^\xbfb\x9e\xc1O\xe0\x83\xf5V\xf5&k(w\xb5\xc7\xa6N \xb8t\x9c\xba\x11\xe0\xe3D\x95\xf7z\x18\xf4\xee\x06\x9eQ\xc3_\xbb\xcczt*\xc2\x9d\xd0w:\xe3Mm\xc4/\x05\x17\xf1\x5c\xbf\x99\x99\xb4\x01\xeb\xfb\x01\x14\xde\x94R'\xce\x9f\xea5Z\xd9\xb1\xa93\xeb\xf1\x96\xa1\xdc\xf6\x1f\xcfpn\xd6d\xc6\xa0\x17z\x8d\xc2q>\xcb\xbe\xbf\xc8\xf2\xe4\xce\xbf\xc3\xb9\xb8[\xab|\xaea\x86m\xf9|\x8f\xb2\xea\x09\x1a\xb7\xce\xfa\xf6\xfa\xbd\x0d}1\xa8=\x9b\xf1\xe6>\x0e~\xf1=a4\xcb\x0cf\x81\x0f\xd6\xd7\xb8\xe2z\xbdF\xab\x09\x07M\x86r\x9fwu\xf6t\xf9\xb8sa/)\xf4\x10\x1d\xc7@4\xa9y_\xafZ\xf2|\x9f4\xf8\x95\xb0\x12\xc7^\x1f\xbf\x5c\x86\xd3ab\xb1\xe5\xc3l\xf6\xf34C\x90\x9f\x84S\x02\xa6\x1d\xfe\x16\xf8`=CE\xd5z\xb9\xcf\xbbV\x0b\xabG\x8e\xb3\xfaC\xb9\x85\xe6j}xo\x12\xf6\xaeb8\x18wq\x02C|\xbe\xd3\x04\xb7\x8fg\xa8\xf6q)\xc0\xf8\x9c\xf6\xe2\x88\xca\xa8a\x18~]x\x1d\x85r.\xd94\xa1\xcf9|\xb0\x9e\x1fBE\xc5\x87\x8e \xb1\xba ^\xb7>\x9d\xa1\xdc\xfe\x84\xf7\x8f\xbfd\x95\xdb \xf4\x0au8\xf8\x1cf\xd3\x95\x94\x09m4\xee\xf3\x0b!\xf4\xd2\x96[\x08|\x8f\xcb\xed ~a\x9b\xd6\x8b8\xc4+\xf0\x01\x95o\xca\xd6\xcbm_\xd8\x1b4\x08\xda\x07\x0aa\xb7\xfex\x8ej\x86\xf7\xf0\x05\xeb\xed\xb0\x22H\x97\xff\xeeb\x00\x99\xa5\xf7j'~\xe1\xe9\xb5\x10\xe6c\xa8\x0f\x7f\xef_,\xb7\xa3\xac\xdez\xc4\xaf\x1b\xc7\xf7\x0c\x81\x0f\xb8\xf7M\xb9\xea\xfc\x9aP\x86\xe5PK\xad\xc48\xab\xd7\x1bt\xaeTN'\xd4=F\xc3\xae\x87\xf7\x86\xe7\x9d&\xdb\xaci\x88\xe9x\xf8;\x8b\xbd~\xe19\x1f4\x0c~\x1bY\xc3\x91\x18\x81\x0f\xd6KU\x98\x1bi\xa2\x95|`\x867\xee\xdaC\xb9Z\xac\xf5\xc73\xf4v\xd5\x19\xda<\xe8\xc9\xb9\xb2u\x86\xaeOj\x86\x98\xb5\xab\x0c\x10zv\xe3\x97\xb8\x10\xfc\x8e\x1a\xfc\xea\x93&\x01Y\xe0\x83\xf5\xfa\x10J\x85\x8a\x13\xc3\x84+9.\xa1\xd7\xb5n\xe9\x86\x03\x93i:\xa1\xce\xf9UW}\xe8\xa9\xady\xdei\x189\xd8\xab\x19f\xb6\xe6Uw\xae\xa3\xc1/|\xa1{g\xce\xaf5\x81\x0f\xd6(TT\xad\x97\x1b(\xc3\xb2\x1a\xe3\x9a\xfb\x19\xca\xed\x8e\xe1\x1c\x8f{\xdb\xbf\xac\xd4yM\x8e\xee\xbc\xc7\xd4\x19\xb6|\xb6\xe8ue[\x1e\xfc\xc6\x0dB\x9f\xc0\x07|*\xcc\xa5\x86\x98\x0e\xfaX\x12\xa1\x03\x1f\x98\xe1\xc3\xb2N\xa1\xdc\xabl\xf6\xc5\xe9Y\xde\x97\xab:\xc3\xb9\x93\x05\xdc\xfd\xce\x92\x9fn\x08&\x95C\xb9\xb7#\x07qRJ\xd1\xe0\xb6\xd7V\x0c}u\x86\xc1k\x07c\x81\x0f\xfa\xff\x014\xc8\xd2\xbdw\xb7\xb5\xbfX\xeeqi2\x94[\x08\xe4\x9dQ\xf7\x03\xf8b\x01\xaf\xa7e\xbe~\xeb\xac\xeaq\xf5\xfa{O\x9c\x14Vg\xad\xd9\xadx\x1f\xeb\xacN8\xae]\xd0[\xe0\x83\xf5x\xd3H\xbd)\xec+\xc3\xb2\xf4\xb0\xd7\xa4@\xed\xa9\x99\xd3\xfd\xb3\x80s1\x07K|\xfd\x0e\xcb\x8b\xa73|Q\x19\xd5\xbc\xab\xa7\xf1\xbe\xbcF\xe6@\xe0\x83~\x07\x8b\xf0f\xf9\xa4\x22L\x8c\xb5\xd4JBx\x9da\xbf\xab\xcc\xac\xdc\xbe\xfem\xce;\xa0\xed-\xe9q\xd7-\xc1r\xfe\xd0\x17\x95\x18d\x9e\xd7\xbc\xcbq\xbc\xcfuu9\xaf\x1b\x12\xf8\xa0\xdf\xaaz\x86\x0aM\xb4\x92\x10\xfe\xb4\xe6\xee\xfb\x86r{k0\xc7\xd7\xd4 \x9b\xad\xe8q\x13\xe3\x9a_V\xaa\xbe\xa8\x145\xc3\xcc\xe6\x9a\xbfO\xcd-\xec\x0a|\xd0\xdf`\x11\xdepS\xe7\xd8\x1c)\xc3\xb2\xf4c\xd2\xa4@\xed\x89\xde\xd7^\x9bg\x8f\xdcxI\xaf\xdf\xf0\x98wk\xecZY>(\x9eF2\xaay\xd7k9\xb4\x1b\xdf/6\xe6u{\x02\x1f\xf4\xf7\x8d\x22\xd5\xbbg\xbd\xdc\xd5\xa8\xbb\xd6\xa8\xa1\xdc\xee\xaa{\xde\xd5\xde\x9c\xfe\xd6\xc3\xdf\xf1N\xc3\xf7\x86i\xeegP3X^f5'\x81\xc5/\x9cu\x87v\x8f\xd7ph\xb7\xcek\xe4T\xe0\x83\xf5\xb6_\xf1\xcd\xf0\xd0P\xe1\xd2Cx\x93\xb5FG&\xd2tS<n\xb5\x86*g];6\xf6\xe2?k\xf8k\xd3\xce\xe6\x1dg\xf5z\x9b\x9a\xbev\x8b\x9a\xed\xb5\x91\xb5\xb4TK\x08\xa2\xf3\xee\x81\x8c\xe1\xb6\xce\x97\xf2\x89\xc0\x07\xeb\x1b,\x06\x15\x1f\x02\x97\x0a\xf8.\xff\x03!k6\x94{\xac\xd5:\xad\xee\xf1+\xa6-\xa7\x12{\xf6^,\xe9\xf5[g5\x8d\xe0y\xd3\xd3D\x1a\x0e\xed\xee\xc6/Nm\x13\x1e\xd3\x87\xe5c\x9b\xcc#\xf8\xc5\xf7\x8b\xd0\x8euF\x03j\x87\xe07\xfc]B\xefT\xbd\x01XQc5\xc7\xa4\xee\xb98\x83\xf0\xc1\xd1\xd2\xe7\xb1oi\xb7Z\xc2\x90f\x9d\x899\xe15\x11B\xc2\xa8n\xc8\x8f\x81\xa2N\x0d\xbcy\x85\xbd\x10H\xdf\xab\xb1\xeb\xd4\xa7\x89\x84\x90X\xde\xcf\xf3\x9am\x16f\xed\x0eZ\xd6\x03~\x1bBwb\xf0;\x8d\x7f\xf3\xc7M\x1fg\xc3\xe3{\xdad\xa4F\xe0\x83\x1e\x89o\x16;\x15o\x10z\x8f\x96{L\xea\x9e\xe8~k\xab\xc5O\xe7\x91#Z+\xc0\x5c\xc4\x0f\xfd:\xbdb!\xf4}\x90\x0a\x091t\x85\xbf\xedQ\xc5\xeb\xe3*\x9b\xe3I\xfe\x0d{\xa6g=\x0d\xa1\x88\xc1i\xb3F{\x1dg\xf5\x96\xaf[\xc6\xdf\xf7\xa3{\xfe\xbew\xe2\xf6\x22\x1e\xd7I\xdc\xce\xeek\xa3\x06\xc7w\xa6/\xef\x02\x1f\xf4K\xd5\x9b\xb3\xde\xbd\xf6\x1d\x13\xfa)|x\xbfl\xb0\xff\xdd\x900Mx\xbb\x9d\xe8\xf3\xc1\x1c\x9fCQ3\x80\xcc|\x1aB\x08B\xf1\x9c\xc4\x0f\xeb\xb4U\x18fnIA\xf2\xbd\x9a\xc7\xf5Y\x0cw\xb7?\xbf\xcc\xea\x0d\xd9>\xe4\xa0io\xbbs\xf8\xa0'\xe2y6\xa97\x90\xe7\x86\xe3VbC\x13\xac\x9f8\xd4\xf6\xee\x92^7!\xec\x0dk\x86\xaeZ\xe7\x0c6\xa8\x179\xb7\x19\xe5\x0dg\xed\x16\x0b(^\xbd\x88\xc0\xf7\x90Y\xc2\xde\xd14\xe7a\x0b|\xd0\x8f\xb0W5\xa3K\x19\x16X~\xe8\x0b=PG\x0b\xbe\x9b\xdb\xb0W\xf7\xcb\x5c\xe5\xb0|\xc3\xa5\xff\xe6=\xa3\xbc\x88\xcf\xa9N \x1e\xb7\xe0}ww\xc9w\x1b\xc2\xdeT\x01[\xe0\x83~8\xac\xe8\x11(\x94\xf9\x80\x95\x84\xbe\xf0\xe1\xfc|A7\x1f\xce\x0f\x1b\xbc\x16\xf6\xaa\xea\xb2\xd59\x0fs\x9c\xd5\xeba\x9c\xfb9\xc1\x0dg\xed\xee\xc4\xd9\xca\xab2X\xf2\xfd\xbd;m\xd8\x13\xf8\xa0\x07\xe2\x09\xbf\xa9\xfan\x97-9\xd7\x05\xd65\xf4\x85\xd3-\xbe\x9c\xd5\xeb\xb9\xaa#\x9c\xff\xf5Ny\xbb\xc3)\xbe\xc8mW\xbc\x9f\x84@Q\xa7\xd7ja\xc5\xc1c\x88<\xa9\xb9\xfb\xb3iK\xdb\xcc\xe1q\x86\xa0\xfd8[|/n\xb8\xfd\xc7\xb3\xbe\x8f\x0b|\xd0}Uo\x02#M\x04+\x0f}!\xc4\x0c\xca\xed`\x86\xe0w\x1e\x83\xde \xb1\xec^\xd5\xd0\xee \x11\xf6\x06Y\xcdU2\xb2\x9bQ\x83\x8b\x056\xd9\xa8A;\x8dWx\x5c/b\xaf\xdbg\xb3\x9bs6O\xe7t\xd3\xe1\xb9?\x8fAo4\x8f\xb6\xce\xcb\x1b\xf1\x97\x08\xd1\xc6W\xde\x9ad\x0d\x96)\x8aN\xaf\xde\xffh\xb8\x8a\xc7\x1bK~\xa4f\xe5\x85!\x97\xa1#\x0b\xed\x12\xffv\xc3\xdf\xe6v\xe2=\xe72\x06\xb8\xf0\xbetlu\x9c\xce\x1c\xdbG\xf1\xb8\xde\x1e\xdfG5>WBP|\x15\x8f\xf5d\x11\x13\xec\x94e\x81n\xd3\xbb\x07\x1d\x14{\xfc\xd4\xc4\xec\xe7\xb1\xfd^pk\xd3\xe32\xa4\x0b\xdd\xfd\x16Yd\xe9\xa9\xfd\x07z\x04\x00\x10\xf8\xa0\xbbao\x90\xa5\x8b(\x87\xf3?L\xd4\x00@\xe0\x83\x0e+\xb2t\xd9\x84}eX\x00\x10\xf8\xa0\xa3b\x05\xfcT\x19\x96\xf3\xc4\x0c>\x00\x04>\xa0\x03\x8a\x8a\xeb\xad\x97\x0b\x80\xc0\x07]\x15\x8b\xa2\xa6\xa6\xf7\x1f\xc5\xf5(\x01@\xe0\x83\x0e\x86\xbdP\xcb)5\x11\xc3z\xb9\x00\x08|\xd0qa\xa865Q\xe3P\x19\x16\x00\x04>\xe8\xa8X\x86\xe5Yb\x97P\x91_\x19\x16\x00\x04>\xe8\xb0\xaa0\xa7\x0c\x0b\x00\x02\x1ftU,\xc3\xb2\x9b\xd8\xe54.\xd3\x04\x00\x02\x1ft\xd4\xb8\xe2zeX\x00\x10\xf8\xa0\xab\xf2<\x0fa.\xb5^n(\xc3r\xa6\xa5\x00\x10\xf8\xa0\x9ba/\x94a)\x12\xbb\x842,z\xf7\x00\x10\xf8\xa0\xc3B\xd8K\x95a)L\xd4\x00@\xe0\x83\x8e\xca\xf3|\xbb\xbcx\x9a\xd8\xe5\xb2\x0c{\xca\xb0\x00 \xf0A\x87U\x85\xb9\x91&\x02@\xe0\x83\x8e\xca\xf3|/K\xaf\x97{j\xbd\x5c\x00\x04>\xe86\xbd{\x00\x08|\xd0Wy\x9e\x17Y\xba\x0c\xcb\x81\xf5r\x01\x10\xf8\xa0\xbbao\x90\xa5\xcb\xac\x842,&j\x00 \xf0A\x87\x15Y\xba\x0c\x8b\xf5r\x01\x10\xf8\xa0\xab\xe2z\xb9O\x12\xbb\x9c\x97ao\xac\xa5\x00\x10\xf8\xa0\xbb\x8a\x8a\xeb\xad\xa8\x01\x80\xc0\x07]\x95\xe7\xf9(K\x97a9R\x86\x05\x00\x81\x0f\xba\x1b\xf6\xea\xac\x97[h)\x00\x04>\xe8\xae0T\x9b*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT,\xc3\xf2,\xb1\xcbe\xa6\x0c\x0b\x00\x02\x1ftZU\x98+\x94a\x01@\xe0\x83\x8e\x8aeXv\x13\xbb\x9c*\xc3\x02\x80\xc0\x07\xddV\xd5\xbb\xa7\x0c\x0b\x00\x02\x1ftU\x9e\xe7!\xccm%v\x09eX\xce\xb4\x14\x00\x02\x1ft3\xec\xd5)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xd4z\xb9\x87&j\x00 \xf0AG\xc52,O\x13\xbb\x5c\x96a\xaf\xd0R\x00\x08|\xd0]\xe3\x8a\xebG\x9a\x08\x00\x81\x0f:*\xcf\xf3\xbd,\xbd^\xee\xa9\xf5r\x01\x10\xf8\xa0\xdb\xaa\xca\xb0\x8c4\x11\x00\x02\x1ftT\x9e\xe7E\x96^/\xf7\xb9\xf5r\x01\x10\xf8\xa0\xbba/\x94aI\x95Y\x09eX\x0a-\x05\x80\xc0\x07\xdd\x15\x86rSeX\xf6\x95a\x01@\xe0\x83\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c\x00\x04>\xe8\xb6\xa2\xe2z+j\x00 \xf0AW\xe5y>\xca\xd2eXN\x94a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850\x97*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT\x5c/\xf7Yb\x97\xcb\xac\xba\x083\x00\x08|\xd0bUa\xaeP\x86\x05\x00\x81\x0f:*\x96a\xd9M\xecr\xaa\x0c\x0b\x00\x02\x1ft[U\xef\x9e\x89\x1a\x00\xac\xc4\x1b\x9a\x00f\x17\xcb\xb0l%v9\xba\xbe\xbe>\xd3R\x00\xdd\xf3\xf5\xc7_\x18\xc7\x7f\x16_z\xf9\xad\x8b.>\x07=|0\xa3?\xf9\xc7?\x10\xbe8\xa5z\xf7\x94a\x01\xe8\xb6\xe3\xecf\xe5\xa4\x97e\xf8\x9b\x94\xdbH\xe0\x835\xf3\xf8\xdb?6\xc8\xd2\xeb\xe5\x1e\x9a\xa8\x01\xd0]_z\xf9\xad\x10\xf8.\xe3\x7fCQ\xfd\x17e\xe8\xbb(\xb7\xa2\xdc\x1e\x09|\xd0s\x7f\xee\x0f\xdf\xc8~\xf5\x83_\xf9|b\x97\xcb2\xec\x15Z\x0a\xa0\xf3^\x1f\xc9\x09\xf5VC\x19\xae\xef\x86!\xdfr\xdb\x16\xf8\xa0\xa7>\xf7\xf2\x07\xabv1\x94\x0b\xd0\x0f\xe3\xec\xe6\x14\x9d\xfb\x84\xe1\xdeo\x94\xa1\xef\xac\xad\xc3\xbd\x02\x1fL\xe9\xa7\xfe\xe0G\xb3\xd3_\x9e\xa4v\x09eX\x8e\xb5\x14@\xf7}\xe9\xe5\xb7\xc2\xa99U\xef\xe9a\xf2^\x18\xee}\x15\x87{\x07\x02\x1ft\xdc\xef\xfe\xfb\xefT\xed2\xd2J\x00\xbdRw\xa5\xa4p^w\x18\xee\x0d\x93<\x8e\xcbm(\xf0A\x07\xfd\xb5\xef<\xca\xbey\xf6\xeb\xa9]\x9e[/\x17\xa0_\xbe\xf4\xf2[\xa1\xbc\xd6i\xc3_\x0b\x05\xf9?\x8c\x93<F\xab\x9a\xe4!\xf0AC?\xfe\xc7\x9f\xc9\xbe9I\x86\xbdp\x8eG\xa1\xa5\x00zi<\xe5\xef\x85I\x1e/\xca\xed\x22N\xf2\x18\x08|\xd0bo\xfe\xce\x8fg\xdf\xfe\xf6\xb7S\xbbX/\x17\xa0\xa7\xbe\xf4\xf2[!\xf0]\xcep\x13a\xb8\xf7nM\xbf=\x81\x0fZ\xe6/\xfc\xdf\x1f\xca~\xf5\x97\xfeEj\x97\xf32\xec\x1dj)\x80^\x1b\xcf\xe9vBM\xbf\x0f\xe2p\xef\xfe\x22\x87{\x05>h\xe0\xc7.+\xffd\x94a\x01\xe8\xbfy\x7f\xb1\x0f\xc3\xbd\xefe\x0b\xac\xe9'\xf0AM\x7f\xf5\xf7~\xa2\xaa\x0c\xcb\xc9\xf5\xf5\xf5DK\x01\xf4[,\xd1r\xb4\xa0\x9b\xbf\xad\xe97\xd7%\xdc\xdep\xd8\xa0Z\x98\xa8\xf1\xea\x9b\xff3\xb9\xcf?\xfa\xb9\xbf\xf9\x8bm\x98z\x0f\xc0R\x9c\xc5p\xb6(a\xb8w\xa7\xfc\x5c\x09\xbd\x89a\x1b\x97A\xf3b\xda\x1b\xcb\xaf\xaf\xaf\x1d2\x886\xbe\xf2\xd6$\xfe\x91}\x9fP\x86\xe5W\xfe\xd9/?\xf8{\xc5\xde\xcfd?s\xf6\xdf5 \x00\x8bt\x14\x83\xdf\xa4\xe9/\x1a\xd2\x85{\xfc\xe8\xef}\xee{\xff\x0e\xeb\xe5\xa6\xca\xb0\xec\xbc\xf5\xd3\xd9\xdf\xf8\xadok4\x00\x16-\xf4(~x\xbb\x84[\x93I\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f\xbe\xf1S\xd9\x0f}\xe7'?\xfe\xff\x9f\xf9\xed\x1fI\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xf1\x12n\xd9MM\xbf\xc3:5\xfd\x0c\xe9\xc2\x1d\x9f\xff\xd9_\xf8\x9d\x9f\xf8\xf6\xc6\x9f\xbe\xfd\xff\xe7\xfe\xe2\xcb\xec_\xfd\xe2?\x7fp\xff\x9f\xff\xd9\xb7\xb3_\xf8O\xbf\xad\xe1\x00X\xb5\x93\xecf\xb8\xf7\xde\xf5~M\xda\x80\xe8\xcd\xad\xaf\x8e\x7f\x22\xfb$\xec\x05\x9f\xf9\xed\xab\xe4\xef\xfcl\xf6\xbf4\x1c\x00m\x10\x96p\xdb\xfd\xfa\xe3/\x84\xa2\xd0\xb7\x93<\xbe\xb7\x08\x80\x1e>\x88a/\xbbg\xb6\xd5\xc6\x8f\xfcQ\xf6'~\xf47\xb3\x7f\xf9\xe1\xc9\xa7~\xe7\xef\xff\xdc\xdf\xca\xfe\xee\xaf\xff7\x8d\x07@\x1b\x85\x1e\x8b\xd0\xdbw\x18\xd6\x00\x16\xf8\x10\xf6\x1e\x08{w\xfd\xd9\xcf\xfeA\xf6\x7f~\xff\xdfd\xff\xfa\xdf\xfd\xdb\x8f\xff\xbf\xb99\xc8\xfe\xe9\xc6\x8f8w\x0f\x80.\x04\xbf}\x81\x0fa\xafA\x1d\xa5\xbf\xf3\x93\xdf\xca~\xe9?\x9ee\xef|\xf1\xcf+\xc3\x02@\x9b}\xdf\xd0\xae\xc0\x87\xb0\xd7\xd0;?\xf8\xcd\xec\xaf\x7f\xe775 \x00mt\xef\xe4\x0d\x81\x0fao\x0a\x7f\xef\x0f\x7f=\xfb\xe9\xdf\xff\xaf\x1a\x12\x806\x08\xc3\xb6\xe1s\xed\xf0\xa1\xd58\x04>\x84=\xa1\x0f\x80n:\xcfn\x86m\x8f\xef\xce\xc8\xbd\x8f\xb2,\x08{S\xfa/?\xf0\xb9\xec\xa73\x81\x0f\x80\xa5k\xbc\xc4\x9a\xc0\x87\xb07\x85\xb7\xaf\xffG\xf6\xf3\xbf\xf7k\x1a\x15\x80e\x09\xc3\xb6\xb7\x930.\x9a\xfe\xb2\xc0\x87\xb0\xd7\xd0\xe7\xf3\xff\xfd\xcb?\xff\xbb\xbf\xf65\xad\x0a\xb0\xd6\x86\xe5\xf6l\x09\xf7s\x1aC\xdex\x96\x1b\x11\xf8\x10\xf6\x9a9\xfa\xd5\xb3\xe7#\xad\x0a\xb0\xde\xbe\xfe\xf8\x0b\xfb\x0b\xbe\x8b0l\xfbq\xd1\xe4y\xdc\x98I\x1b\x08{\x0d\xfe\xf8~\xe3\xfck\xc2\x1e\x80\xb07(/^.\xe0\xa6\xef]\x16m\x1e\xf4\xf0!\xec\x09{\x0043\xef\xde\xbd0l{\xf8z\xed\xbcy\xd2\xc3\x87\xb0'\xec\x01P\xd3\xd7\x1f\x7f\xe1QyqQn\x1b3\xde\xd4\xedZ\xb7\xc54\x930\x9a\xd2\xc3\x87\xb0'\xec\x01P\xdf\xde\x8ca/\x0c\xdb\x16Y\x8d\xday\x02\x1f\x08{\x00\xacF1\xe5\xef\x85%\xcf\x0e\x9b\xd4\xce\x13\xf8@\xd8\x03`\xc9\xbe\xfe\xf8\x0b\xc3\xf2b\xb3\xc1\xaf\xccT;O\xe0\x03a\x0f\x80\xe5\xab\xfb\xd9\xf0\xf1\x92g\xb3\xd6\xce\x9b'\x936\x10\xf6\x84=\x00*\xd4,\xc52\xd7\xday\xf3\xa4\x87\x0faO\xd8\x03\xa0\xdaC\x9f\x0fa\x12\xc68\x06\xbdWm}\xf0\x02\x1f\xc2\x9e\xb0\x07@\xb5\xd7k\xef\xcde\xc93\x81\x0f\x84=\x00Z\xe0\xeb\x8f\xbf\x10>#B)\x96\xa5\xd6\xce\x13\xf8\x10\xf6\x84=\x00\x96gXn\xeff\x0bX\xf2lYL\xda@\xd8\x03\x80\x9e\xfb\x8c&@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\xd3\xaa\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\xb0\xb6\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x16\x05\xbe2\x9c=\x12\xf6\x00\x00z\x1c\xf8J\x87eH\x1b\x08{\x00\x00=\x0c|\xb1w/\x04\xb4\xd1\x1cnK\xd8\x03\x00h[\xe0+\xed\xc7\xcb\x99\xc2\x95\xb0\x07\x00\xd0\xde\xc0w\x1b\xac6\xcb\xd0\xb6'\xec\x01\x00\xf4(\xf0\xc5\x80\xb7yO\xf8\x13\xf6\x00\x00\xfa\x10\xf8\xee\x09x\xbbM&o\x08{\x00\x00-\x0e|1\xd8\xed\xd6\x08\x81\xc2\x1e\x00@\x17\x03_\x22\xd8U\x06/a\x0f\x00\xa0\xdb\x81/9yC\xd8\x03\x00\xe8@\xe0+C[\x08Y\x9b\x89]\xf6\x85=\x00\x80\x0e\x07\xbe\xacz\xd8v\xe7\xf5\xc9\x1b\xc2\x1e\x00@G\x02_\x0cr;5v\xdd\x17\xf6\x00\x00:\x18\xf8\xb2\x07\x86k\xef1\x12\xf6\x00\x00V'\xbf\xbe\xbe\x9e\xea\x17\xcb\x00\xf7\xaa\xbc\xd8\xa8\xb9\xfby\xb9m\x09{\x00\x00\xcb7U\x0f_\x9c\xac\xb1\xd1\xe0W\x84=\x00\x80.\x05\xbel\x8a\xa5\xd3\x84=\x00\x80\xd5h<\xa4\x1b'k\xbc\x14\xf6\x00\x00\xbaa\x9a\x1e\xbeB\xd8\x03\x00\xe8\x8eF=|on}\xf5Qyq\x915;\x7fO\xd8\x03\x00X\xa1\xa6=|{\xc2\x1e\x00@\xbf\x03\xdf\xfe\x92\x1e\x97\xb0\x07\x00\xb0\xec\xc0\xf7\xe6\xd6W\xb7\xb3\xf9\x95W\x11\xf6\x00\x00\xda\x16\xf8\xb2\xe5\xf4\xee\x09{\x00\x00\xab\x08|q\xb2\xc6\xde2\x1ePy_C\x87\x05\x00`~j\xcd\xd2\x8d+k\xbcX\xe2\xe3\xba,\xb7\xc3r\x1b\xff\xc6\xf9\xd7^9L\x00\x00\x8b\x0f|\x17\xe5\xc5\xe6\x0a\x1e\xdfU\xb9\x1d\x87\xf0W\x06\xbf3\x87\x0b\x00`\x01\x81/\x0e\xb1~\xd8\x82\xc7z\x9e\xdd\xf4\xfa\x1d\xeb\xf5\x03\x00\xa8\xaf\xce9|\xa3\x96<\xd60C8\x0c+_\x94!t\x1cg\x0d\x03\x00P!\xd9\xc3\x17'k|\xb7\xc5\x8f?\xf4\xfa\x15\xbfq\xfe\xb5c\x87\x12\x00\xe0~U=|\xa3\x96\x87\xbdCa\x0f\x00 \xed\x8d\x8a\xeb\xf7[\xf6xM\xe2\x00\x00\x98W\xe0\x8b\x9356[\xf28M\xd8\x00\x00\x98w\xe0\xcb\xda1\x9c{\x94\xe9\xcd\x03\x00\x98\xc9\xbd\x936\xde\xdc\xfa\xea\xa0\xbcx\xb9\xa2\xc7\xa4\xe82\x00\xc0\x1c=\xd4\xc37Z\xc1c9\x8a!o\xe2\xb0\x00\x00\xf4'\xf0\xe9\xcd\x03\x00Xv\xe0{s\xeb\xab{\xd9\xe2'k\x9c\xc4\x90\xa7\xa4\x0a\x00\xc0\xb2\x03_\xb6\xb8\xde\xbd\xd0\x9b7\x8eA\xefB\xd3\x03\x00,\xc7\xf7M\xdaX\xd0d\x8d\xd3L\x81d\x00\x80\x95y\xbd\x87o4\xa7\xdb\x0d\x05\x92\xc71\xe8]hf\x00\x80\xfe\x04\xbe\xd0\x9b\x17\x86l\xc7\x9a\x16\x00\xa0e\x81\xef\xcd\xad\xaf\x86\xb07\xcdd\x0d\xbdy\x00\x00]\x08|Y\xf3\xde=\xcb\x9d\x01\x00t\xc0\xc7\x936\x1aL\xd6\x08\xbdya\xf2\x85\xe5\xce\x00\x00:\xe2\xb6\x87o\xbfb?\xbdy\x00\x00\x1d\x0f|\xa3\x07\xae\xb7\xdc\x19\x00@\xd7\x03_\x9c\xac\xb1q\xe7g\x96;\x03\x00\xe8S\xe0\xcb>\xe9\xdd\xd3\x9b\x07\x00\xd0\xd3\xc0\x17\x02\xde\x9e\xde<\x00\x80~\xfa\xff\x02\x0c\x00\x1eE:\x8bH\x8b\x05S\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x0f\xb6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x12t\x00\x00\x12t\x01\xdef\x1fx\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x0f3IDATx\x9c\xe5\x9by|TU\x96\xc7\xbf\xaf*KU\xaa\x0a\xb2\x90\xa4*\x8e\xa3\x01\x82c\x98\x86@l\xb7@\xc0fQ\x16\xa1\xb5\x05\x5c\x80\x06\x84\xb0((\xc8\x08\xd2\x0dQTF\xa4\x05Y\x14\x08K\x90\xc8\x9anA\x09\x81\x04ADtD\x0d\x04\xdbFIB@gHU\xc0\x90\xa4\xeaU\xaa*\xa9zw\xfe\xa8\xa4$\x92\x04*\x9b\xd3\xce\xef\xf3y\x9fT\xee;\xf7\x9c\xdf9\xef\xdc\xe5\xdd{\x9fD\x03HMM\x0du:\x9d\xcf\x02#%IJhH\xe6\x9f\x05B\x88|\xe0}`\xcb\xd2\xa5K/\xfc\xfc\xbe\xf4\xf3\x82y\xf3\xe6\xbd\x08<\x13\x1e\x1e\x1e\x1a\x1f\x1fO||<\x00\x9d;wn[\xa6\xad\x8c\xe2\xe2b\x00\xce\x9c9\xc3\x993g\xb8r\xe5J\x05\xb0r\xe9\xd2\xa5/^-\xe7\x0b@jjj\xa8\xcb\xe5Z\x01L\x188p \x03\x06\x0chO\xbem\x8e\xc3\x87\x0f\xf3\xe1\x87\x1f\x02l\x09\x0e\x0e\x9e\xfd\xd2K/U\x00\x04\xd4\x09\xb8\x5c\xae\x15\x1a\x8dfBJJ\x0a&\x93\xe9\x97\xe2\xd9f\x180`\x00\xf1\xf1\xf1\xa4\xa5\xa5Mp:\x9d\x00\x13\x01\xd4\x00\xf3\xe7\xcf\x7f\x16\x98?c\xc6\x8c_\xa5\xf3u0\x18\x0c\xdcv\xdbm\x9c8q\x22\xa1O\x9f>\x95\xc7\x8f\x1f\xff\x5c\x9d\x9a\x9a\x1a\xeav\xbb\xf7\x0c\x1a4H\xd3\xa3G\x8f_\x9ac\x9b\xc3`0 I\x12\xe7\xce\x9d\xbb{\xe0\xc0\x81\xebU.\x97k\x82V\xab\x0dMJJjW\x22\xa2\xa4\x18w\xe6rDIq\xbb\xda\x05HJJB\xab\xd5\x86\xba\x5c\xae\x09\x01\xc0\xc8\xee\xdd\xbb\xa3\xd1h\xda\xc5\xb8p\xc8\xb8?X\x8f\xe7\xabC\x00\xb8\xbf\xc8E}\xc7 \x02FLE\xd2\xea\xdb\x85\x83F\xa3\xa1{\xf7\xee\xe4\xe5\xe5\x8d\x0c\x00\xfa\xd7\x0dum\x8d\x9a\x83[q\x1f\xdb\x03N{\xbdr\xf7\x89\x1c\xdc\xa7\x8f\x13\x90\xfc\x10\x81\x0f\x8co\x17.\xf1\xf1\xf1\xe4\xe5\xe5\xf5W\x01m\xfe\xf4=\x85\xa7\xa9z\xf1\x09\xaa\xb3\xb7\xa2\xc86\x14\xb7B\xc0\xa0\xb1\xf5\xfe*\xb2\x8d\xea\xec\xadT\xbd\xf8\x04\x9e\xc2\xd3m\xca\x07~\xf29\x00 &&\xa6M\x8c(e\x16\x5c\x19K\xf1\x14\xe6\xfb\xca\x02z\xf6!\xe8\x0fO\xa1\x8a0\xe2\xcaJ'p\xc8x\xd4w\x0e\xa6\xfaoo\xe1>}\x1cq\xb9\x04\xc7\x9b\xcf\xa2\x8eK x\xdc<T\x11\xc66\xe1V\xe7s\x00\xb4~\x06\x88*\x19W\xd6\x16\xaa\x0fg\xfa\xca\xd47wE3z&\xean?\xcd\xac\x15\xb7\x02\x80*\xc2\x88&\xe5e<\x05\xf98w\xaf\xc6\xf3\xdfE(\xdf\x9e\xa4f\xc1\x18\x82\x06\x8c\x22x\xf8\x04\xa4\x90\xd6\xed\x1f\xeae@k\xc2uh7\xae\x0f\xd2\x11\x0e\x19\x00)D\x8f\xe6\xd1Y\x04\xdd;\xe4\x1aY\xe1Q\xea\xfd\xaf\xee\x96\x80\xee\xcf\x9b\xa8\xfe\xec\x00\xce\x9d\xab\xbc\x81\xcc\xddE\xf5'\xfb\x09\x1e1\x91\xe0A\xa3[\x9bn\xeb\x05\xc0\xfd\xddI\xec\x1b\x97 ~4\xfb\xca\x82\xef\x1f\x83f\xe4\xa4F\x9f\x9ep+\x0d\x96\x07\xdd;\x84\xc0\x84\xbe8\xdf\xdf\x8c+g\x17\xc2f\xc5\xb1m%\xce\x9c\xdd\xe8&/ \xe0\xdfz\xb7\x16\xed\x96\x07@\xb9l\xa6\xea\xdd7\xa9>y\xccW\x16x{ot)\x7fF\xd5\xa9\xe9Y\xa5\xe2i8\x00\xe0\xcd\x1c\xedc\xb3\x08\x1e4\x0a{\xda+\xd4|{\x12J/b}\xf5)\x82z'\x132\xf6YT\x91-\x9f\xb56;\x00\xc2n\xc3q`'\x8e\xbfm\xf4\x95\xa9\x22M\xe8\xa7-\x220\xfe\xc6\x9ePc\x19p5T\x9dL\x18\x16\xbcE\xcd\x99\x93\xc8\xeb\x16\xa3\x5c6\xe3\xfa\xe2(\xae/\x8e\xa2\xfd\xc3d\xb4C\x1eE\xd2\x19\x9a\xebF\xf3\x02\xe0<\xba\x0f\xf9\x9d\xe5\x08{m;\xd7\x19\xd0=2\x05\xed\xb0\xc7\xea\xc9\xd5\xfc#\xcf\xf7;\xb0{\xe25z\x9a\xca\x80\x9f#0\xbe7a\xab\xf6\xe2\xd8\xbf\x03\xfb_7 \xec6\xec\xbb\xd3\xa8\xda\xbf\x1d\xfd\x1f\xe7\xa0\xe9\xff`s\x5c\xf1/\x00\xd5\xff\xc8CN_\x8e\xfbB\x81\xaf,d\xf8c\xe8FM\xa9\xf7\x14\xaa\xb2v\xe0\xc8\xde\x81\xe7\x92\xb9^}\xcd}\xc3\xd1\x8dJA\x1d\xe5M]q\xe3\xfe\xfb\xa0\x1d\xf6\x18\x9a\xfe\xc3\xb1gn\xa0*k\x07\xc2&c]\xb3\x98\xaa\xac\x9d\xe8'\xce!\xa8\x81@7\x05\x95?\xc2\xe5\x8b\xa6Ss\xbe\x10!$\x02\xe3\x13\x09_\xb6\x0d\xfd\x849\xf5\x9c\xb7\xaeY\x8c-}\x05\xeeR\x0bBH\xf5.\xc7\x91\xfd\x94\xcd\x1d\x8b\xe3\xa3,\x00\x84\xb8f=\xe6\x86 \xe9\x0c\xe8'\xcc!|\xd96\x02\xe3\x13\x11B\xa2\xe6|!\xe5\x8b\xa6\xfb\xad\xcb\xaf\x0c\xa8#\x1c:\xefu4w\xf5\xbb\xe6\xbe\xbcs\x03UG\xf6S\xb7\xce\xa2\x8e4\xa1\x1f3\x99\xaa\xac]\xd4\x5c(@\xa5\xd3\xa3\xc82\x95\xab_F\x1d\x19\xd3\xec\x00\xf8\xc8\xc7v#l\xf1:\x9c'>\xa6b\xe9\xf3\xcd\xd2\xe1W\x06\x08\xe1\xbd\x1ar^\xb1\xdb\xb0\xef\xdb\x85\x10\x10|g?:>\xbd\x10Ig\xa0b\xf5\xcb\xb8/\x99\x89x#\x83\x887\xdeE\x15iB\x08\xa8X\xb5\x18!\x9a\xc5\xf9\x1ah\xee\xea\xe7\xe3\xe6/\xfc\x0b\x80\x22!\x94\x86\x9f\x9a\xe3\xf0~<\xb2\x8cP$\x02n\xe9F\xf0]\xfdp[,\x04\xdcr\x1bB\x91\xa8:\x92\x8d:\xcaD\xc7\x99\x8b\x10\x8a\xe4m\x22\x8d\xe8j\x0e\x9a\xe2\xd6\x14\x9a\xd5\x04\x1a\x82\xe3\xc4'\xbe\xfb\xf6\xc3\xd98N\x1cC\x15e\x22\xe2\xd5\xb7q~\xfe1\xe5+_A\x0a\xd1\xe3<q\xac\xc5\xa9\xef/\xb7\xa6\xe0g\x13\x90\x1a5Tw/\xe4w\xc3q\x97Zp[,\x84\xcdZ\x88J\xa7G\x1deB\x08\x09\xeb\xf6\xcd\xa8#cjS\xb6q]\xcdAs\xf5\xb5Z\x06\xb8K\xcd\x08!\xe1*\xf6\x8e\x12\x1e\xd9N\xd5\xe7\xc7\x08(.\xe4\xca\x9b\xaf\xa2\xbd;\x19\xd7\xdfO\x12\x10\x1b\xe7\xd5\xf5_\x9f\x5c\xa3\xa3\xba\xb8\x10\xc7\xe7\xde\x19e\xf0oz\xa3\xf9M\xafV\xe1\xd6\x14Z-\x00\xea\xc8\x18j,\xa5T\x9f+B{O2!w\xf7\xa5l\xc5\x12\x00t\x03\x87\x121\xfbOTn\xdb\x84uo&\xa1OL\xf4\xe9\x92?\xcc& \xcaD\xe5\xf6\xcd\xb8\xfe~\xea*\x8d\xe9\xb5\xf5\x16\xb4\x98[S\xf03\x00\x8d\xdf\xd3\xdc\xdd\x17\xc7\xd7^\x07\xb4w\xf5E{O2\xaaw\xd3\x91$\x90\x0f\x1d@\x0a1\x10:v\x12\xd6\xbd\x99\x5cY\xbf\xdaW\xafl\xf9\x92\xfa\x84\xa2M\xdeE\x13\xbb\x8c|(\x9b\xc0\xd8\xaet\xf8\xfd\xf5\xdf\x02\x9b;\xa2\xb4Z\x1f\xa0\x1f4\xd4\xd7\xd6\xcb\xd6\xaf\xa6l\xdd*\xdc\xa5\x16Lk\xd2\x09K\x99\x85|\xe8\x00\xa5/-\xc0#\xdb\x11B\xa2\xc3\xe3\x93\x08\x8c\xed\xe6\xd3\xa9\x8e2\x111\xfbO\xdc\x94\x9eI`\xe7\x9f\xca\xcb\xdfMG\x91\xe5\x16qk\x0a\xad6\x0c\xaatz\xa2\x16.A\x0a1\xa0\xd8\xec\xc8\xb9\x07\x09\x88\xed\x06\x02:\xfc~4\xa1OL\xc2y:\x1f\xdd\xc0\xa1\xe8\x06\x0e\xa5\x22c\x0b\x8a\xf0\xea\x0b\x8c\xedF\xcc\x9at\xf4\x83\x86\xa0\xc82\x8e\xfc|\xc2Sf!\x14\x09\xc5f\xc7v\xe8@\x8b\xb85\x05\xbf\x02\xa0\x08\x09\xa5\x89(\x07u\x89\xc3\xf4\xfaJTQ&\x14!\xe1**\xe2\xfb\xf1c\xa8|/\x13k\xeeA\x14!\x11\xf9\xdc\x0bD>\xf7\x02\xc1=z\xe1**\x22\xb8G/\x9c\xb5r\xf6\xcf>\xa1bO&\xe8\x0ctxh\x14\xda{\x92Q\x84\x84\xfd\xb3\xe3-\xe6\xd6\x18Z\xad\x13\xacCP\x978n^\xbb\x89+\x19[\xa8x\xef\xafxlv.\xaf}\xab\xf6\xae\x845\xf7 \xb6\xdc\x83\xb8\xce\x15\xf1/k7\x11\xdc\xa5+\x8a,\xf3\xe3\xda5\x98S\x17\xa2\xd2\xeb\x09}\xe8\x11\x00B\xeeMB\xfe\xf48U\xf9\xd7_$m\xa7y\xc0\x8du6*\xbd\x9eN\xd3\x9f\xe6\x96\x8c\x9d\x04u\xee\x8a\x10\x10\x10mD\xd3#\x81\xd2\xd7_\xa3*?\x9f\xa8\xb9/\x10\xdc\xa5+\x005\xa5\x16$\xbd\x1e!\xc0c\x93\x09\x1f?\x11\x80\xe0.q>\x9b\xaesE\xad\xc2\xed\xe7h\xf5\x0c\xb8\x1a\x81F#\xd1\xff1\x9f\xef\xa7N\xa1\xda\x5c\x8a\xdbf' \xda\x84G\x96)Y\xb4\x10}R\x1f\xaaN\xe7\xa3\xc82\x81F#\x86\xc1C\xb0\xe6\x1c\xa4\xeat>\x9a.]){\xe7\x1d\x9fM\xcfu:\xc2_|&\xd8\x18<\xb2L@\xb4wt\xf0\xd8\xecDN\x7f\x0a\x95\xce\x80\x10\x12\xb6\xe3\x9f\xd2a\xf0\x03\xc4n\xdbA\xec\xb6\x1dt\x18|\xbfw2Ux\x8e\x0b))\xd8\xf3O\xffd\xf3:O\xb7}F\x01?\x8c\xd4X,\x5c\x5c\xb8\x88\x1ff?G@\xb4\xd1W\xd7\xbct\x19*\x9d\x81[\xd3\xd2\x10BB\x9f\xd4\x87@\xa3\x11\x8f,S\x99\x93\x8b\x10\x12\xa5o\xbd\x8d\xb6g\x02]\xb6o\xbf\xcaf\xd3v\xdbi*|}\x99\x1a\x8b\x85\xcb[\xb6Ry0\x87@\xa3\x91\x7f]\xb1\x9c\x90\x84\x9e\x94\xbc\xf6:\x95\x07sp\xdbdnzy\x06\xa2V\x9f\xb3\xb0\x88\x8a\x839T\x1e\xcc\x01\xc0\xd0'\x89\xe8\xa7g\xf8\x82Rg\xf3\xf2\x96w\x88\x99\xff<\x81\xc6\x867J\xdae\x22\x84\x90\xbcW\x03\xf0\xc82\x97\xd3\xb7R4f,U\xa7\xbe&f\xde\xf3t\xdd\xb9\x8d\x90\x84\x9e\x00\x18\x92\x92|\xf5\xed\xa7NS\xf2\x9f\xcb@H\x94\xaeY\x8b\x22\xdb1\xf4\xe9\x03B\xf29\x0fP\xbaf\xad\xaf\x8eb\xb3S4f,%\xaf-\xa3\xc6b\xf1\x8b[Shq'\xe8\x91e\xcav\xbf\xc7\x95\xcc=H\x12D?=\x83\xf0Q\x0f_#g\xe8\x9b\xe4\xab_q \x97 \x93\xb7Y\xdc\x96\xbd\x07\xb5^\xcf\x0f\x0b^D\xdf\xc7\xdb\x1cj,\xa5\x98W\xadE>\xfe) \x11>\xeaa\x8c3\xa7Sq \x97\xcb\xe9[\xb1~\xfc\x19\xe1\xa3\x1e\x22b\xf4\xc3\xa8\xf5\xfaF\xb9\xdd\x08Z\x14\x80\xf2\xec\x5c,\xab\xd7!!\xae!\xd4\xa0\xb1h\xafs\x1d\x1f\xb8\x1fM\x5c\x17\xe4\x93_\xfb\xe4\xab-\xa5\x04\x19\xa3\xb9\xb8\xe4/T\x1c\xc8\xad\xad!a\xe8{/\xc6\x99\xde\xb5\xbe\xd0!\x83\x09\x1d2\x98K\x9b\xb7r%s\x0fe\x99{\x89\x18\xf5\x10Q\x93\xc6\xb5\xef\xcbPyv.\x977o\xa5\xdar\x89\x88\xd1\x0f\x135il\x93\x8e\x03\x5c\xda\xb4\x95js)\x005f\x0b5f\x0b\x81\xd1Q\x98W\xad\xc5Yp\xcewi\xe2\xba\xa0\xd2\xe9\xf1\xc82A\xa6hnZ0\xf7\x1a]Q\x93\xc6\x131\xfaa\xcav\xef\xa1l\xf7\x1e*\xb2s\x9a\xdd\x07\xf8\xb7/\xa0x\xd7\xb1/\xbe\xf2:a\xc3\xee\xe7\xd6I\xe3\x082\xdd\xd8\xee\xad\xfd\xd4i_\xfd\xf2\xfd\xde\x0eO\xad\xd7\xe3,(B\xd7\xab'\xf6\x93\xf9\xc4\xaey\x03\xb5AG\xd1\x1f\xa7\x01`\x9c9\xad\xd1\xc0\xaa\xf5z\xa2&\x8d#t\xc8 ~\x98\x9f\xea\xd3\xed/\xfc\x1c\x06\x95\xab.\xffB.\x84@\x08\x85\xc8I\xe3\xf8\xf7\xcf>$r\xd28\x82\xe3:\x13\xbb\xe6\x0dB\x87\x0eF\x08\x05\xb5AG\xe5\xc7\x9f\x22\x84B\xa01\x8a\x0e\xc9M\x1f\xdb\xa96[\xb8\xb4i+\x8e\x82B\x1f/\x7f\xe1W\x00nym1A\xc6(\x10\x0a\xe5Y\xd9\x14\x8d\x9fL\xe9\xc6-xl\xd7\x7f]U\xebC@(T\xec?P+/|;#5f\x0b\x08\x05M\x5cW\x9c\x85\x85 \x14t\xbd\x1a?\xb0\xe5\xb1\xc9\x94n\xdcB\xd1\xf8\xc9\x94ge\x83P\x082Fq\xcbk\x8b\xfdq\x07\xf0\xb3\x09t\xe8\xd7\x07]\xef\x04~\xdc\x99I\xd9\xaeL<V+\x976l\xa6b_6\xa693\xe9\xd0\xafo\xa3u;\x8dy\x04\xeb\xd1cT\x97\x94\xf0\xfd\xf3\x7f\x02\xa0\xa6\xc4L\xe9\x86t_\x00\x8a\xa7?\x83\xfd\xa4wQ%\xd0\x18\xdd\xa0\x1e\xeb\xc7\x9f`^\xbe\x9a\xea\xda\xa1Pm\xd0\x131f\x14\x9d\x1e\x1d\x85\xda\xe0\xff\x19\x02\xff\xe6\x01\xb5\x06\xa3\xa7L\xa4k\xc6fB\x87>\x80P\x14\x5c%%\x5c\x98\xfb\x02\xc5\xd3f\xe2((l\xb0\x9e\xaew\x82O\xdeq\xf6,\x8e\xb3g\x11B`\xcf;\x89\xe3l\x01BQ\xd0\xf5\xee\x89P\x94\xda\xdf\xf5\x8f(;\x0a\x0a)\x9e6\x93\x0bs_\xc0UR\x82P\x14B\x87>@\xd7\x8c\xcdDO\x99\xd8,\xe7\xa1\x05\xbb\xc3A\xa7. l\xf8\x10.\xa5mB\xce;\x85\xfc\xd5I\x0a\x1f\x9f@\xd8\x83C\x89\x99\xf3\xcc5\xa4\x82L\xd1 \x04\xda8\xef[\xa0.\xb1\x17\xd1)O\x22\xe7\x9d\xa2x\xea\xd3\xe8z\xf7\xf2\x0d5\xdan\xde\xc5S\x8fM\xa6d\xf9J\xca\xf7e\xfb\xf4\xe8\x13{\x11\x95\xf2$\xfa\xc4\x1b_4m\x0c->\x1f\xa0O\xec\x85~\xfd\x1a\xca\xf7\xed\xa74m\x13\xd5%f\xca?\xc8\xc2z\xe4(\x9d\x1e\x1fC\xf4\xd4\xc9>Y\xc7\xd9\x02\x10\x0a\x81%\xe6\xab\xe6\xaf\xde\xfe\xc0y\xf6\xac\xaf_P\x1b\xf4\x94\xae\xdf\xc8\x8f\xdbw\xf9\xde\x04\x83bLD\xa7<I\xd8\x83\xc3ZJ\xdb\x87V;!\x12\xf6\xe00:\xf4O\xe6\xf2\xf6\x9d\x94\xae\xdb\x88\xdbj\xc5\xb2n\x03W>\xd8G\xcc\xdc9t\xbc\xaf\x1f\x1e\xab\x15\xa1(\xc8_~\x89\xc7&\xe3\xba\xf8?\xc8_\xe5\xe1\xb1\xd9\x10\x8a\xc2\xc5e+\x00\x08\x8a1\xf2\xed\xb0\x91T\x97\xfc4\xe5\x8d\x9e6\x99\xc8\xc7\x1fEmh\xfeY\x80\x86\xe0w\x1f\xd0\x14\xd4\x06\x03\xc6\xa9S\xb8}\xff^:\xf6O\x06\xa1P}\xb1\x84\x0b\xb3\xe7rn\xf24<V\xab\xb7\xc76\x99\xf0X\xad\xe8\x13{\xa3K\xec\xe5Mw\xa1\xd4\xabS}\xb1\xc4Wv\xfb\xfe\xbd\x18\xa7Niu\xe7\xa1\x95\x03P\x87\xa0\x18\x13\xb7\xaeXF\x97\x8d\xeb|\xce\xc9_~\x89\xe3\xbb\xef@(DO\x9b\x02B!l\xc4p\x8c\xd3R\x08\x1b1\x1c\x84B\xe5\x91#\xde& \x14\xb4\xdd\xe2\xe8\xb2q\x1d\xb7\xaeXFPL\xdb\x1d\xe0n\xf5SbWC\x7fG\x22\xddvo\xe7\xca\xfb\x1fpq\xe9_\xf0\xd8l\x00\x9c\x9f5\x1b\xa1( \x04\x973\xb6aY\xbb\xde\xfb?\xde,\xbai\xde\x5c\xc2G\x8ehKj>\xb4i\x00\xea\x10>r\x04\x1d\x7fw\x9f\xd7\xd9\xb7\xd7\xe2\xb1V\x02P4\xf1\xc9zr\xc6\x19\xd3\x89\x1c\xf7D\x9b\xa4zch\x93&\xd0\x10\xd4\x06\x03\xc6\x19\xd3\x88\xcf9\x80\xfe\x8eD_\xaa#\x14\xf4w$\x12\x9fs\x00\xe3\x8ci\xed\xea<\xb4S\x06\x5c\x8d\xa0\x9bb\xe8\xbae3\xf2\x17_b~\xebmLO\xcd@\x7f\xe7o\xdb\x9b\x86\x0f\x01\x00f\xb3\xb9\xdd\xbf\x14\xd1\xdf\xf9[\xe2\xeeLoW\x9bW\xa3\xee\xa3*\x15\x80\xc3\xe1\xf8\xc5\x88\xfc\xd2Pi\xb5\xda\x0a\xb3\xd9|}\xc9_\x19j}\xbe\xa0R\xab\xd5G\xf3\xf2\xf2\xae'\xff\xabC\xad\xcfGU\xd1\xd1\xd1+\xcdf\xb3\xafM\xfc\x7f@qq1f\xb3\x19\x95J\xb5R5e\xca\x94\xa3Z\xad\xf6hFF\x06\xb5\xdf\xd3\xfd\xaa\xe1t:\xc9\xc8\xc8@\x08\xb1w\xc9\x92%\xf9*\x80\xe4\xe4\xe4\x89\x1a\x8d\xa6\x22--\xedW\x1d\x04\xa7\xd3IZZ\x1a\x0e\x87#_\xa3\xd1L\x84\xab\xf6\x9b\xbe\xf9\xe6\x9b\x84\xac\xac\xac\x8f\x9cNg\xe8\xf0\xe1\xc3IL\xf4\xef\xcc\xed\xffu\xe4\xe5\xe5\x91\x95\x95U\xe7\xfc}u\x9f\xce\xd6[L\xff\xe8\xa3\x8fn-((H?\x7f\xfe|\xff\xb0\xb00\x12\x13\x13\x89\x8d\x8d\x05\xfe\xb9>\x9ev:\x9d\x94\x94\x94\x00p\xfe\xfcy\xf2\xf2\xf2(//G\x08\xf1\xa6F\xa3y\xa9\xceyhd\xc7\xf1\xd0\xa1C\xfd\xcf\x9c9\xf3LEEE\x7f\x87\xc3\x11\xdaN\xbc\xdb\x0a\x17\x84\x10{\xf1~9~\xe1\xe77\xff\x17\xd7q\x00\x14\xc6\xb0\x7f\x82\x00\x00\x00\x00IEND\xaeB`\x82\x00\x008k\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x007\xf8IDATx\xda\xec\xddOllY~\x17\xf0[\x93\x8e\xf2\x8f\xc4o\xa4\xe1\x9fF\xc4~,\x185\x04\xd9#\x88h\x10#WGH\x84M\x9eGb\x93\x95\xab\x17,\xb2\x18\x9e{\xc5\xec\xde\xb5\xc4bv\xe3\xc7\x08\x09\x89\xc5+\xaf\x88X\xa4\xedeHP\xdb\x1a\x04\x1d\xfe\xa8\xed0R\x18\x02y6\x22L$4\x9a\xe7D\x84\x00I\xcc=\xed\xe3i\xf7\xeb\xaa{\xcf\xad\xbaUu\xef\xad\xcfG\xba\xaa\xf7\x5c\xd7\xf5\xe7\xdcr\xd5\xb7\xce\xb9\xe7w\x06\xb7\xb7\xb7\x19Pn\xe3+o\xed\x15\x17[7\xdf\xf8\xe0Hk\x00\xd05\x9f\xd1\x04\x90\xe4 n\x00 \xf0A\xdfl|\xe5\xad\xad\xe2b\xb7\xd86cO\x1f\x00\x08|\xd03\xf9\x83\x7f\xeb\xe5\x03\xa0s\x06\xce\xe1\x83\xe96\xbe\xf2\xd6\xa3\xe2\xe2*\xfc\xf3\xc1\x8f\x1f\xdf|\xe3\x83+\xad\x03@W\xe8\xe1\x83r{\xaf\x85\xbd \xd7,\x00\x08|\xd0\x1f\x93\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x08u\xc3\xe2bs\xd2U\xd9]\xcf\x1f\x00\x08|\xd0qe\x134r\xcd\x03@W\x98\xb4\x01\x13\xc4R,/+v{\xfb\xe6\x1b\x1f\x9ci-\x00\xdaN\x0f\x1fL6J\xd8G\x89\x16\x00\x04>\xe8\xb0\x940\xf7$\xf6\x04\x02\x80\xc0\x07]R\x84\xb8Q\xf6\xe9R,\xd3\x8c\xb4\x18\x00\x02\x1ft\xcf\xc1\x82\xf6\x05\x00\x81\x0fV-\x96b\xd9\xae\xf3+\xb1G\x10\x00\x04>\xe8\x88Y\xc2\x9b^>\x00ZMY\x16\x88\xe2\xea\x19\xdf\x9b\xf1\xd7\x95h\x01\xa0\xb5\xf4\xf0\xc1\xc7\xe6\xe9\xa9\x1bi>\x00\x04>h\xbfyB\xdb\xbe\xf5u\x01\x10\xf8\xa0\xc5\xe2\xc4\x8b\xcd9o\xc6\xb9|\x00\x08|\xd0b\xa3\x96\xdc\x06\x00\x08|\xd0\xb4\xb8Z\xc6n\x037\xb5\xa9D\x0b\x00\x02\x1f\xb4S\xde\xe0m\x09|\x00\xb4\x8e\xb2,\xac\xb58\xd1\xe2*K_J-\xc5\xe3\x9bo|p\xa5u\x01h\x0b=|\xac\xbbQ\xc3a/\xc85+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x84\xb2\xbdl\xfeR,\x13o:s.\x1f\x00\x02\x1f\xb4\xc2\x22C\x99\x9a|\x00\x08|\xb0J\xb1\x14\xcb\x93\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96i\x94h\x01`\xe5\xf4\xf0\xb1\x8e\xf6\x96\x14\xf6\x02\xe7\xf2\x01 \xf0\xc1\x0a,3\x84\x8d\x94h\x01@\xe0\x83%*\xc2\xd7\xb0\xb8\xd8^\xe6]fw=\x8a\x00 \xf0\xc1\x92\x8cVp\x9f\x86u\x01X)\x936X\x1b\xb1\x14\xcb\xcb\x15\xdd\xfd\xdb7\xdf\xf8\xe0\xccQ\x00`\x15\xf4\xf0\xb1NFkz\xdf\x00\x08| \xf0-\xc1~\xeca\x04\x00\x81\x0f\x16\xa1\x08[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6Nq\xb1\xdb\x82\x87\xb2\x19{\x1a\x01@\xe0\x83\x86\xb5\xa9,\x8a\xc0\x07\xc0\xd2)\xcbB\xaf\xc5U.\xbe\x97\xb8\xfb\xcd\xfd\xaf\xd5\xbc\x9b\xf0{\xaf\xb2\xf4s\x04\xbfx\xf3\x8d\x0f.\x1c\x1d\x00\x96E\x0f\x1f}7J\xd8\xe7\xba\xd8\xde)\xb6\xadb\x9b%\x88]\x14\x01n+\xde\xc6y\xc2\xfe\x0a1\x03 \xf0A\x83\xca\xc2\xd5ivW\x10y\xab\xd8\xc6\xc5\xf6j\x9e;\x8a\xb71,\xfe\xf9\xc5b;.\xd9u\xdf\xfa\xba\x00\x08|\xd0\x80\x22T\x855l_\x1ff\x0d\xc3\xaf\xcf\x8b\xedq\x11\xce\xf6\x16\xb1\xfaE\x18\xae-\xb6Q\xf1\xcf\xcf\x16\xdbav\xd7\x83\xf8\xba\x91#\x04\xc0\xb2\xbc\xa1\x09\xe8\xb1\x87\xbd{\x97\xc5v\x14z\xe1\x96u\xe7\xb1\xc70\x0f[\x9c\x9d\x1b\xb6\xdd\x07\x8f\xed\xc8!\x02`\x19\xf4\xf0\xd1KqU\x8b\x10\xae\xc2\xd0j\x18\xb6\xddYf\xd8\x9b\x10\xfe\xee\x87{\x1f\xc7\xc7\xf4(\xf6@\x02\xc0\xc2\xe9\xe1\xa3\xcf\xc2\xb0\xedU\x9b\x1eP|<\xa3x\x0e\x9f\xf3\xf8\x00\x10\xf8`\xce`\xd5\xe6\xc7\x17\x86{_9R\x00,\x83!]\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00@\xe0\x03\x80%\x19\x0c\x06\x8f\x8am\xa8%@\xe0\x03\xa0\x9fao\xa7\xb8\xb8\xd2\x12 \xf0\x01\xd0\xcf\xb0wP\x5c|Xl\x1bZ\x03\x16\xcbZ\xba\x00,;\xe8=*.\xc6\xc5\xf6\xe4\xfeg\xb7\xb7\xb7gZ\x06\x04>\x00\xfa\x11\xf6\xc2\x10\xeeI\xb1mj\x0dX\x1eC\xba\x00,+\xec\xdd\x0f\xe1\xbe\x1e\xf6\xce\xb5\x0e,\x96\x1e>\x00\x16\x1d\xf4>5\x84\x0b\x08|\x00\xf4'\xec\xa5\x0c\xe1\x9ei)X,C\xba\x00,*\xecM\x1b\xc2\x05\x96L\x0f\x1f\x00M\x07\xbd\xbaC\xb8gZ\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b@Sao\x94\xdd\xf5\xd6\xd5\x0a{j\xf0\xc1\xe2\xe9\xe1\x03`\xde\xa0\x17\x86p\x8f\x8am_k\x80\xc0\x07@\xff\xc2^\x18\xc2\x1d\x17\xdb\xf6\x8c7\xa1\x06\x1f,\x81!]\x00f\x0d{\xa3\xecn\x08w[k@\xbb\xe9\xe1\x03\xa0n\xd0kr\x08\xf7L\x8b\x82\xc0\x07@\xbb\xc2\xde\xbcC\xb8\xc0\x0a\x18\xd2\x05 5\xec\x8d\xb2\xe6\x87p\xcf\xb4,,\x9e\x1e>\x00\xaa\x82\x9eY\xb8 \xf0\x01\xd0\xe3\xb0\xb7\x95\xdd\x15R^\xc8\x10\xae\x1a|\xb0\x1c\x86t\x01\x98\x16\xf6\xf6\x8a\x8b\x8b\xcc\xf9z \xf0\x01\xd0\xcb\xb0\x17\x86p\xdf+\xb6\x8d\x05\xde\x8d\x1a|\xb0$\x86t\x01x\x18\xf4\xb6\xb2\x05\x0e\xe1\x02\xab\xa1\x87\x0f\x80\xfb\xb0\xb7\xec!\xdc3\xad\x0e\x02\x1f\x00\xcb\x0b{\xcb\x18\xc2\x05V\xc4\x90.\xc0z\x07\xbd\xadluC\xb8g\x8e\x00,\x87\x1e>\x80\xf5\x0d{f\xe1\x82\xc0\x07@\x8f\xc3\xde\xca\x87p\xd5\xe0\x83\xe51\xa4\x0b\xb0^A/\xac\x9a\x11\x82\x96^=X#z\xf8\x00\xd6'\xec\x0d\x8b\x8b\xab\x96\x84=5\xf8@\xe0\x03\xa0\xe1\xb0\x97\x17\x17\xefgf\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\xdd\x96=\xb43G\x07\x04>\x00\x9a\xb1\x15\xc3U\x98\x8d\xbb\x13\x7f\xb6\xabY@\xe0\x03\xa0'noo/b\xd8\xfb\x94xN_\xf0\xfa\xe52\x02\xe1\x99\xa3\x03\x02\x1f\x00\x8b\x0f\x83ge\xe1\xeb\xb5@8\xcc\xf4\x0c\x82\xc0\x07@\x7f\x03a\x11\xfe\xae\x16t\xdb\xc0\x12\x98\xa5\x0b@\xa98\xc3wSK\x80\xc0\x07@?\xc3^\x98\xe5{\xd0\xf0\xcd\xaa\xc1\x07\x02\x1f\x00-\x12\x96`+\xab\xddwXl7\x9a\x09\x04>\x00:(N\xda\xd8/\xd9\xe5\xf2\xf6\xf66/.G5o\xfaL\xeb\x82\xc0\x07@;\xe4\x15\xd7\x7f4\xd4[\x84\xbeP\xd8\xf9\xb9\xe6\x02\x81\x0f\x80\x0e\x19\x0c\x06\xa3\xac\xbc\x0c\xcb\xe9\xc3\x99\xb6\xc5\xbfC\xf8\xbbL\xbc\xf93-\x0c\xcb\xa5,\x0b\xc0]\xc0\xd9\xca\xeeV\xa5\x18\x16[\x98\xa8P\xba*E\x11p\x06=n\x8b\xf0\xfc\xf3\x8a\xdd&M\xe4\xd8\xcb\xee\x8a<[\xaf\x17\x04>\x80V\x84\x9a\x9d\xec\xe3\x82\xc2\xc36\x86\x94x\x0e\xdd\xfb\xf1\xbf\xe71L]\x15\xdbY\x5cAcQB\x98++\xc3rX\xdc\xff\xd5\x84\x10|\x15{\x06\xdf+\xbbq5\xf8@\xe0\x03Xt\xc8\x0b\x81d/\xebF]\xb9\xad\x07\xff\xde\xcd\x1e\xf46\x16\xcf\xe5>\x04\x9e4\x19\x00cOgY\x19\x96\xeb\xecn\xe6\xee\xb40wR\xdcF8\x9f\xef\xa9W\x1c\x08|\x00\xcb\x0ay\x8fb\xc0\x0b!f\xbbc\x0f\x7f\xab\xe2\xfa\xef\x87\xc0\xe2y\x86 v\x16\x02`\x9cD1\xab\xaa2,yq\xfb\xaf\xcan \x9c\xcf\x17\xc3\xf5\xa4\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11W\xc5\xf6\xa2\x83a/\x18\xd6\xd87\xf4X\x86\x12*\xef\x15\xcf\xfbU\xb1\x8d\x8bm\xaff\x9b\x85\xfb{R\xb2\xcby\x11\xe6\xc6\x897\x17\xee{R}\xbeW^\x9d \xf0\x014\x19\xf4\x9ee\xdd\x9e@\xb05\xe3\xefm<\x08\x7f\xe1\xbc\xba<\x0e\xd5V9\xaa\xb8>O}\x00\xb1\x17pR\xe0\xbc\xf0*\x85\xe53\xa4\x0b\xf4\xd1\xc1\x1cA\xef&\xfbxrD\xd8^\xad0\xa4l6t\x1b!\xf8>+B\xdfi\x08u\x93&M\xc4\xc9\x16e\xbd\xa0\xc7u'[\x84\xfd\x8b\xdb=\x8c\xf7\x7fO\x0f\x1f\x08|\x00\xf3\x09=KE\xc8\x08\xe7\xb0\xed'\xfeJ8\xa7\xec,n\x17U\xe7\xa7-\xd9\xdb\xd9\xc7%b\x86\xf1r\x9e\x1e\xcb0\x5c\xfb$\x9e\xef\x97\xdf\x0f\xcf\xc6\xf3\x1c\x8f*B\xf0\xc1\x8c\xc7#\x8fC\xc5\xf7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x11G%\x81/\x84\x9d\xfb\x99\xad'm~\x12\x0fz\xd4\xbe\xff8\xe3\xd0\xec\xf0\xc16K/`\xf8\x9d\x17q\xe8;O\x08\x92Gs\x06\xe10\xb4{\x15\xefC\x0f\x1f\x08|\x00\x8d\x04\xa5\x8b\x22\xcc\x84U\x1f\x1e\x0eQ\x1e\x17\xdb\xb8\xeb5\xe0b\xfd\xbbq\xdc\x1e\xd6\x13\x1ce\xf5'\xa6\x84\xe0\xf7\x0f\x8b\xed\xf3%\xfb\x5c\xc7\xf5r\xe7y\xcc\xaf\xe2\x04\x92\xf7\x17\x5c?\x10\x98\xc2\xa4\x0d\xa0\xafB/_\x18\x8a\x0c\xe7\x90}\xb6\x08\x1a\xa3>\x16\xfc\x0d\x01\xaa\xd8B\x0f\x5c\x08~\x8f\x8b\xed\xdd,}\x89\xb3\xa0\xaa\xc7\xed\xa0\xa1\xc7y\x16\x1f\x1b \xf0\x014\x16\x84Bo\xde\xa3\xd0;\xd5\xb2\xf3\xf2\x16\xf9\x9c\xaf^\x0b\x7f\xa1\x00\xf2u\xc9\xaf\x84`\xf8\x97J\xae?or\xd8;<6\xafL\x10\xf8\x00h6\xfc\x1d\x14\xdbV\xf1\xdf/\x17\xdb\xe9\x84\xdd>Wq3#-\x09\x02\x1f\x00\xdd\x08\x7fa\xf5\x8dp\x0e\xdd}\xaf_\x18\xea\x0e\xb3\x93\xcb\xce\xdd{>i\xbd\x5c@\xe0\x03\xa0\xdd\xc1\xef\xa3^\xbf\xec\xae\xa0\xf3N\xc9\xae!\x10\xe6Z\x0c\x04>\x00\xbak\xee\xf5r\x01\x81\x0f\x80\x96\x8ae\x5c\xca\x8aR_\x9a\x5c\x01\x02\x1f\x00\xddV\x15\xe6\x0e4\x11\x08|\x00tT\x5c/w\xb7d\x97\xd3>\xd6*\x04\x04>\x80u\x09{a\xbd\xdc\xbcb7\xbd{ \xf0\x01\xd0a!\xcc\x95\xad\xbb{\xa8\x0c\x0b\x08|\x00t\xd4`0\xd8\xca\xca{\xef\xc2j\x1c&j\x80\xc0\x07\xb0\x90 rTl\x17q\xb8\x91\xc5Q\x86\x05\x04>\x80\x95\x84\xbdaq\xf1\xb4\xd8\xb6\x8b\xed*\x96\x0ba1\xed\xfc\xa4d\x97\xb0^\xeex\x01\xf7;\x16\xe6A\xe0\x03x8\x84\x18z\x9f>\x8c\xb3HY\x5c;O\x92/\x22\xecew\xb5\xfeB\x98?\x13\xe6A\xe0\x03\xd6P\x11\x00\x0eb\x18x\xdd\x8b\x18\x16h\xa6\x9dGS\xda\xf9\xdeq\xd3eX\x1e\x84\xbd{B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad\xd4X;\x97\xf5\xee5\xbe^\xee\x84\xb0woC\xe8\x03\x81\x0fX/\x07Y\xf9\x04\x82\x91&ZJ;\x1f-\xa0\x0cKY\xa0\x13\xfa@\xe0\x03\xd6A\xecu*+\x0frl\xa5\x87F\xday\xab\xb8xV\xb2\xcbu\xd1\xce\xf9\x02\xeezXl\x97B\x1f\x08|\xc0z+\xebu\xba\xc9\xac\xf4\xd0\x94q\xc2qh\x5c,\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xc9uGj\xc1\xcd/\x96a)[/7\x94a9Y\xd4\xfd\xd7\x0c}J\xb6\x80\xc0\x07\xf4,\x88\x84\xb07mi\xaf\xd0\xbbg\xa5\x87f\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6J\xae\xd3\xbb\xd7L\xa8\xaeZ/\xf7y\xd1\xce\x17\xcbx,\xf1x\x8eb\x98\x9f&\x94l9q\xe4@\xe0\x03\xfa\x11D\xb6\xb2\xe9\xab=\xe8\xddk\xa6\x8d\xab\xca\xdd4^\x86%!\xf4\x85p9\xac\x08}\xbbj/\x82\xc0\x07\xf4CY\xef\xde\x89\xde\xbdF\xb4r\xbd\xdc\x18\xfa\xaa\x86\x91\xf7c\xef$ \xf0\x01=\x0d|z\xf7\xe6\x14g\xbc\xee\x97\xec\x12\xca\xb0\xac\xac\x9d\xe3Z\xbd\x87\x15\xbb}\xbdx\x1e{\x8e&\x08|@7\xc3H\x18j\x9c6k\xf4rY\xe7\x94\xf5\x5cU\x98\x1b\xad\xfa\x01\xc6\xba\x7f\xa7\x15\xbb\x8d\xe3\xf0? \xf0\x01\x1d3,\xfb\x80\xd7<s\x07\xea\xbd\xac\xbc\x0c\xcbi\x8b\x8aY\x87\xe0Y5s\xf7\xc4\xcc]\x10\xf8\x80\xee)=\x7fO\xf3\xcc\x15\xf6\xaa\xd6\xcb\x0dZsn\x5c\x8d\x99\xbb\x86\xf9A\xe0\x03:f8\xe5\xe7\x97\x0bX\xcbu\xddT\x95a9l[\x1b\xc7!\xfc\xbcb\xb7\xfdX\xb7\x11\x10\xf8\x80\xb6\x8b=P\xd3\x02\xc9\x99\x16\x9a\xabm\xb7\xb2\xf2\xde\xbb\xd6\x96\xbb\x89\x13H\xaa\xce\xe7;\xb2\xfc\x1a\x08|@7\x94}`\x0b|\xf3\xc9\xb3\xf22,\x07-/w3\xca\xca\x87v\xc3s\x1b;\xcc \xf0\x01\xed7\x14\xf8\x9a\x17\xd7\xcb-+\xc3r\x1eK\xa1\xb4\xd6\x83\xf3\xf9\xca8\xc7\x13\x04>\xa0\x03\xa6\xf5\xf0]*\xb6<\x97\xaa\xa1\xda\xbc\x0bO\xa2x\x0d\x84@7ih7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcbM+\xafq\xa5if\x13'3l\x97\xecr\xdc\xa22,)\xc2y\x88\x0f\x87v\xc3D\x93\x1d\xf5\x19A\xe0\x03\xbacZ}8\x1f\xe6\xb3\x85\xbd\xaa2,K_/w^q\x16qxNz\xf5@\xe0\x03:\x1aN2\x81\xafQ\xa17\xacl\xa2\xc6Q\x17K\xdd\x84\x90\xa7W\x0f\x16\xeb\x0dM\x00,\xd0\xdb\xc5\xb6\x15\xb7a\xbc\x0ceZ\x9c\xbfW?@\x87\xb6{V\xb2\xcb\xb5\xde1@\xe0\x03\x96*N\xca8\xd3\x12\x8d\x19W\x5c\x7f\xa0\x89\x80i\x0c\xe9\x02\xb4\x5c,\xc3R\xb6^\xeey\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0U\x83\xc1\xa0j\xbd\xdcc\x93\x1d\x00\x81\x0f\xa0\xbba/\xcct\xceKv\x09eX\xf4\xee\x01\x02\x1f@\x87\x85\xb0WV\x86%\xb7b\x09 \xf0\x01t\xd4`0\x08\xcb\xd2=-\xd9%\x94a9\xd2R\x80\xc0\x07\xd0]Uan\xa4\x89\x00\x81\x0f\xa0\xa3\x06\x83\xc1^V]\x86\xe5LK\x01\x02\x1f@w\xe9\xdd\x03\x04>\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xb0\x8b\xeb\xe5\x02\x02\x1f\x00\xd9\xf7\xd7\xcb-+\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00\x1d\x96g\xe5eX\x0e\x94a\x01\x04>\x80\x8e\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x04>\x80\xee\xca+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eX\x8e\x95a\x01\x04>\x80\xee\x86\xbd\xb0^n\xd9D\x8c0Q#\xd7R\x80\xc0\x07\xd0]a\xa8\xb6l\xa2\xc6\x912,\x80\xc0\x07\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\xaa\xdap'\xf6\x92\x02%\xde\xd0\x04\x00+S\x15\xe6\x94a\xf9d\xb8\x1b\x16\x17;q\x0ba\xf9\xfe\xbc\xc7\xb7\x8b\xedL\x0b\x81\xc0\x07\xd0\xc6\xf0\xf2\xa4d\x97\xb0^\xee\x89\x96\xfa\x84\xf7\xa7\xfc|G\xe0\x83r\x86t\x01Vc\x5cq\xbd2,\x13B\xf0\x94\x9f\x1b\xd2\x05\x81\x0f\xa0]\x06\x83A\x08se\xeb\xe5\x862,\x17Z\xeaS\xae\xa6\xfc|\xa8i@\xe0\x03hS\xd8\x0b\xbdQy\xc9.\xa1\x0c\x8b\xde\xbdz\x81\x0f\xa8\xe0\x1c>`\x19!g\x98\xdd\x9dd\x1f\xb6\xfb\x7f\x87\x1e\xae/\xaf\xe1yj!\xec\x95\x95a\xc9M\xd4\xa8\x1d\xf8v5\x0d\x08|\xc0\xea\x9dL\x099\xc3x\xdd\xba\x04\xdf0\xb9\xe0i\xc9.\xd7E\xd8S\x86\xa5~\xe0\x03*\x18\xd2\x05\x96\x15\xf8&\xd9[\xb3v\xa8\x0as#/\x15@\xe0\x03\xfa\x16\xf86c\xafW\xef\x15\xcf3\x84\xdb\xb2\xa1\xc7s\xeb\xe5V\xba*i\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xcd\x94\xabGk\xd2\x0cz\xf7\xe6\x7f\x1d]i\x05\x10\xf8\x80v\x1b\xafk\xd0\x19\x0c\x06yV^\x86\xe5\xb90\x03\x08|@\x1fL\xeb\xe1\xda(\x02QoC_,\xc3RVf%\xf4|\xe6^\x1e\x80\xc0\x07t^\xec\xc1\x9a\xb6RB\x9f\x03O\x08\xbaeeX\xac\x97\x0b\x08|@\xef\xc2\xcf$\x9b}\xec\xe5\x8b\x13\x09\xf6Kv\xb9,\xc2\xde\xd8\xcb\xa2\xd6\x17\x87\xc1\x94\xedL\xeb\x80\xc0\x07\xb4\xe3\xc3:L\xde\xb8\x9eru\xde\xc3\xa7\x5c\xf5\x9c\xac\xa8\x01\x08|@/M\x0bA\xbd\xea\xe5\x8b\xcf\xa5\xac\x0c\xcb\xa9^)@\xe0\x03z)\x0eaN\xed\xe5\x8b\x93\x1c\xba\x1e\xf6\xac\x97\x0b\x08|\xc0\xda\x9b\x16\x866{\x12\x84\x0e\xb2\xf22,G\xca\xb0\x00\x02\x1f\xd0k\x15\xbd|\xcf\x06\x83\xc1VW\x9f[|\xec\xcfJv\x09\xcf\xdbz\xb9\x80\xc0\x07\xac\x85\xb2\x9e\xbcq\x87\x9fWU\x98\xcb\x95a\x01\x04>`-\xc4\x19\xbb\xd3\xea\xf2\xed\x0e\x06\x83\xce\x0d\xed\xc62,OJv9W\x86\x05\x10\xf8\x80u3*\xb9.\xef\xe0\xd0nU\xef\x9e\x89\x1a\x80\xc0\x07\xac\x978q\xe1p\xca\xd5au\x8aqW\x9eK\xec\x91\xdc.\xd9\xe5\xb8x\xbe\x17\x8e: \xf0\x01\xeb\x18\xfa\xf2\xe2\xe2r\xca\xd5\x9d\x18\xdaU\x86\x05\x10\xf8\x00\xaa\xed\xc5P4\xc9\xd7\x8b@\xb5\xd3\xf2\xc7\x1f\xc2^\xd9z\xb9G&j\x00\x02\x1f\xb0\xd6\xe2\xd0\xee\xa8d\x97q[\x1f{<\xcf\xf0i\xc9.\xd7\xb1\x17\x13@\xe0\x03\xd6>\xf4\x85Y\xbb\xcf\xa7\x5c\xdd\xe6s\xdf\xaa\xc2\xe8\xc8\xd1\x05\x04>\x80\x8fC_8\xcf\xed\xf8\xc1\x8f\xc20\xef;\xc5\xcf[\x19\x9a\x06\x83A\x18\x8a.[/\xf7\xdcz\xb9@\x1b\xbc\xa1\x09\x80\x96\x85\xbeQ\x9c\x04\xb1Ul\xa3\x96\xcfl\xad*\xc32rD\x01\x81\x0f`r\xe8\xdbk\xfbc,Bi\x9e\x95\xaf\x97\xfb\xdcz\xb9@[\x18\xd2\x05\xa8\x1f\xf6B\x0fdY\x99\x950\x14\x9dk)@\xe0\x03\xe8\xae0\x94[V\x86\xe5@\x19\x16@\xe0\x03\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05\x04>\x80n\xcb+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eXN\x95a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\x95\x95a9T\x86\x05\x10\xf8\x00:*\xae\x97[\xd6{w\x9dU\x17a\x06\x10\xf8\x00Z\xac\xaa\x0cK\xae\x0c\x0b \xf0\x01tT,\xc3\xf2\xa4d\x97seX\x00\x81\x0f\xa0\xdb\xaa\x86jsM\x04\x08|\x00\x1d\x15\xcb\xb0l\x97\xecr\xac\x0c\x0b \xf0\x01\xb4/\xc4\x8d\x8b\xed\xa2\xd8v*\xf6\x0beX\xcaz\xf7\xc2z\xb9\xca\xb0\x00\x02\x1f@\xcb\xc2\xde(\xbb[\x16-\xf4\xda\x9d\x15\xff/\x0blyV>Q\xe3\xc8D\x0d@\xe0\x03hW\xd8\x0b=z/\x1e\xfc(\x84\xb9\xaf\x17??\x8beW\x1e\xee\x1b\xfe\xff\xb4\xe4\xe6\xae\x8b\xb0\x97kU\xa0+\xde\xd0\x04\xc0\x1a\x84\xbd0<{6\xe5\xea\xb0T\xda\xcbb\x9f\xc3\xec\xe3^\xbbq\xc5M\x1e\xcc\xf88\xf6\xe2m_\x15\xdb\xeb\xbd\x83\x17)?s\xce \xf0\x01L\x16B\xd2F\xc5>\xcfB\x90+B\xd9?\xc9\xca\xd7\xcb\x0deXNfy\x10\xe1\xf7b\x99\x97I\x8fg714N|L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb2:\x08\x08|\x00}\x11\x02\xdaVB\xe8\x0b\xd7\xff\x83\x8a}F\xf3<\x90\x22`\xddO\x18\x09\x8fi\xbb\xa1\xe77),>\x991@\x86UC^\x0f\x81I\xbd\x8f\xe1g\xcek\x04\x81\x0f`%\xc2\xf9vavnv7\xeb\xf6\xc9\x1c7\xf5\xbc\x89\x1e\xb1p\x1b\xb1\xa7\xef$K\xec\xd9[\xa2\xcd\xec\xd3k\x06'?\xc6\x09\x01\xf2|\xc2ng1\x1c\x9exu\x82\xc0\x07\xd0d\xe8\x0bAm/\x06\xad\xf1\x84PS\xe5\xff\x15\xdb\x7fn\xf0\xf1\x84\x9e\xb0a\x0c\xa2\xfb=n\xfaIa\xb1\xaa\xe4\x0d\xd00\xb3t\x81u\x0b~g\xc5\xb6U\xfc3L\xd2\xb8\xa9\xf1\xab?Xl\xff8\xd6\xf0{\xd4\xe0\xe3\x19\x15\x17\xef\xae\xd1!\x08=~CC\xbf \xf0\x01,#\xf8\xe5\xd9\xddy}\xc75\x7f\xf5\xbe\x86\xdfN\x83\x8f%\xf4v\xbdS3\x80vQX\x99D\xd8\x03\x81\x0f`\xa9\xa1\xefU\xeca\xfb\x0f3\x84\xbe\x0f\x8b\xd0\x977\xd5\xdbW<\x8eqq1\xecq\xe8{\x1e\xdb\x1a\x10\xf8\x00\x96+\xae\xbe\xf1Wf\xfc\xf5P\xca%L\xc0\xc8\x1b\x0a}a\xe6k\xe89\xbc\xecY3\xbfS<7\xcb\xd0\x81\xc0\x07\xb0\x92\xb0\x17z\xe7\xe6\x0dk\xa1\x94Kca&N.\x19f\x93g\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A\xadl\xb6\xeeivW\x97\xaeJ\xa3\xe5E\xe2Ps\x08}\xc7\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdc\xb2\x9e\xb9\x10\xf4FqF\xef;\x15\xc1o!%F:<\x83\xf72\x86\xbd\x0b\xaf4\x10\xf8\x00V)\x84\xb4\xb2\x957\xf2\xfb\xd9\xa4aH2\x06\xbf\xb7\xb3O\xf7\xba].2\xd8tp\x06\xaf\xb0\x07\x02\x1f\xc0\xea\xc5\xe2\xcbe+n\x9cO:\xef,\xd6\xf0\x1b\x15\xff|\x9c}\x5c\xc7o\xe1\x05\x84;4\x83\xf74Sc\x0f\x04>\x80\x96\xa8\x0aiyE\x00\xbb\x0au\xfc\x8a\xed\xd1\xb2&$t`\x06o\xa8\xb1\xb7'\xec\x81\xc0\x07\xb0r\xb1\x0c\xcbvEp9k\xe3co\xf1\x0c\xdew\xd5\xd8\x83v\xb3\x96.\xb0Na\xafj\x0d\xd70dz\xd0\xc2\xc7\x1d\x1e\xf3}\xcfY\xe8\xe9\xcb\x8b\xed\xab\xc5\xf6\xb7[\xf0\xf0\xdeQv\x05\x04>\x806\x09a\xael\xa2\xc6QK\x87$\xc3P\xeen\xcb\x1eS\x08\xc7{m\xed\x0d\x05\x04>`\x0d\xc52,\xcfJv\xb9\x8e\xeb\xeb\xb6\xd1N\xcb\x1e\xcf}\x8d=3q\xa1#\x9c\xc3\x07\xac\x8bq\xc5\xf5m^\xfak\xa3E\x8f%L\x1a\xd9\x11\xf6\xa0[\xf4\xf0\x01\xbd\x17\xcb\xb0\x94\x0d\x89\x9e\xb7uE\x88\xf8\xd8\xdb\x14\xf6\x94]\x81\x0e\xd2\xc3\x07\xac\x83q\xc5\xf5m\xee\xdd{\xd4\x92\xc7q,\xecAw\xe9\xe1\x03zm0\x18T\xad\x97\xfb\xbc\xe5\xc3\x93!`\x9d\xc6\xe0\x17\xb6\xedU\x84=eW@\xe0\x03hk\xd8\x0b\x01)/\xd9\xe5\xa6\xe2\xfa\x95\x8b\xb3`\xcf*\x9e\xe3\xf7\x16\xf8\x10\x94]\x81\x1e0\xa4\x0b\xf4Y\xf2z\xb9\x1d\xb6\xc8\x19\xbc\xc2\x1e\xf4\x84\x1e>\xa0\x97\x06\x83A\x08B\xfb%\xbb\x842,G=x\xaa\x8b\x08|\xca\xae@\xcf\xe8\xe1\x03\xfa\xaa*\xcc\x8dz\xf2<\x9b\x0e|\xc2\x1e\x08|\x00\xed7\x18\x0c\xf6\xb2\xf22,\xa7=Z!\xa2\xc9\xc0\x17\xca\xael\x09{ \xf0\x01\xb4=\xecU\xad\x97\x1b\x1c\xf4\xe8)75kW\x8d=\x10\xf8\x00:\xa3\xaa\x0c\xcba\x11j\xaez\x12n\x87\x0d\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x85\x00\xb4\x95\x95\xf7\xde\x85\xf3\xd3\x8ez\xf4\x94\x9b\x18\xce}\xae\xc6\x1e\xf4\x9fY\xba@\x9f\xe4Yy\x19\x96\x83\x9e\xf5bm\xcd\xf9\xfb\xca\xae\xc0\x9a\xd0\xc3\x07\xf4B\x1c\xde,+\xc3r\xde\xc3p3k\x0f\xdf\x8d\xb0\x07\xebE\x0f\x1f\xd0\x17UC\xb5y\x0f\x9f\xf3\xee\x8caO\xd9\x15X3z\xf8\x80\xce\x1b\x0c\x06\xa3\xac|\xb6\xeaq\x8f\xca\xb0\xdc?\xe7Yz\xf7\xae\x85=XOz\xf8\x80\xae\x07\x9f\xaa2,\xad_/wFu\x03\x9f\xb2+\xb0\xc6\xf4\xf0\x01]\x17f\xe5\x96M\xd48\xeaK\x19\x96\xd7l\xd5\xd8\xf7T\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Y\xc9.a\xbd\xdc\xbc\xa7O\x7f\x98\xb8\xdf\xb1\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\x1f\xf4\xf8\xb9\xa7\x0c\xe9\x1e\x0a{@\xa0\x87\x0f\xe8\xa4X\x86\xa5l\x96j(\xc3r\xd2\xd3\xe7\xbe\x95\x95\x0fc\x07\xca\xae\x00\x02\x1f\xd0yUaf]{\xf7\xc2$\x95Q_\xc3. \xf0\x01kb0\x18T\xad\x97\xfb\xbc\xe7\xa5GvJ\xc2\x9e\xb2+\xc0\xa78\x87\x0f\xe8Z\xd8\x0beX\xf2\x92]\xfaZ\x86\xe5\xa1\xe1\x84\x9f]\x0a{\xc04z\xf8\x80\xae\x09a\xae\xec\xfc\xb5|\x0d\xca\x8flM\x09{\xca\xae\x00\x13\xe9\xe1\x03:#\xae.\xf1\xb4d\x97P\x86\xe5\xa8\xe7m\x10z8\x1f\x0eg\x1f\x0b{@\x15=|@\x97T\x85\xb9\xd1\x1a\xb4\xc1\xc3\xf3\xf7\xd4\xd8\x03\x92\xe8\xe1\x03:a0\x18\xece\xd5eX\xce\xd6\xa0)\x86\xf1\xf2]a\x0fH\xa5\x87\x0f\xe8\x0a\xbd{wB\x0f\x9f\x1a{\x80\xc0\x07\xf4\xcb`0\xc8\xb3\xf22,\x87=]/w\x92\x835z\xae@C\x0c\xe9\x02m\x0f{[Yy\x11\xe5P\x86\xe5h]\xdaC\xd8\x03\x04>\xa0\x8f\xf2\xac\xbc\x0c\xcb\x81\x19\xaa\x00\x02\x1f\xd0Qq\xbd\xdc\xfd\x92].\x9d\xcb\x06 \xf0\x01\xdd\x96W\x5c\x7f\xa0\x89\x00\x04>\xa0\xa3\x06\x83\xc1(+/\xc3r\xbc&eX\x00\x04>\xa0\x97a/\xac&Q6\x11c\x1d\xd6\xcb\x05\x10\xf8\x80^\x0bC\xb5e\x135\x8e\xccV\x05\x10\xf8\x80\x8e\x8aeX\x9e\x95\xecr\x9d\xadQ\x19\x16\x00\x81\x0f\xe8\xa3\xaa0\xa7\x0c\x0b\x80\xc0\x07tU,\xc3\xf2\xa4d\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xee\x1aW\x5c\xaf\x0c\x0b\x80\xc0\x07t\xd5`0\x08a\xael\xbd\xdcP\x86\xe5BK\x01\x08|@7\xc3^(\xc3\x92\x97\xec\x12\xca\xb0\xe8\xdd\x03\x10\xf8\x80\x0e\x0ba\xaf\xac\x0cKn\xa2\x06\x80\xc0\x07t\xd4`0\xd8).\x9e\x96\xecr]\x84=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\x0d\x06\x83\xbd\xac|\xbd\xdcs\xeb\xe5\x02\x08|@\xb7\xe9\xdd\x03\x10\xf8\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xd0z\xb9\x00\x02\x1f\xd0\xdd\xb0\xb7\x95\x95\x97Y\x09eXL\xd4\x00\x10\xf8\x80\x0e\xcb\xb3\xf22,\xd6\xcb\x05\x10\xf8\x80\xae\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x10\xf8\x80\xee\xca+\xae\xb7\xa2\x06\x80\xc0\x07t\xd5`0\x18e\xe5eXN\x95a\x01\x10\xf8\x80\xee\x86=\xeb\xe5\x02\x08|@\xcf\x850WV\x86\xe5H\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Y\xc9.\xd7\x992,\x00\x02\x1f\xd0iUa.W\x86\x05@\xe0\x03:*\x96ayR\xb2\xcb\xb92,\x00\x02\x1f\xd0mU\xbd{\xbd\x9e\xa8Q\x04\xde\xbdb\xbbM\xdc\x8eV\xf88\x0fj<\xce\x91\x975\x1d\xfd{\x1c\x15\xdb\xc5k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x985D\x14\x17\xdb%\xbb\x1c\xdf\xde\xde^\xf4\xb9\x0d\x8a\xe7wR\x5c\x9c'\xee\xfe4\x9e\xef\xb8\xec\xe3T5\x83\xfa!=\xb2t\xf5\xfd(\xbcn_LxO\x0a#\x10\xef\xc7\xf5\xbd\x05>\x80\x86C\xc4:\x95a\x19\xd5\xd8w\x15a*\x1c\xa7\x8d\x05<\x17h\xcb\xfbQx\x8d\xefW\xec\xf6\xac\xcf=}\x02\x1f\xb0\xaa\x10q\xb4.\x135b\xb9\x99\xc3\xc4\xddw\x97\xf9\xa1S\xdc\xd7Nq\xf14q\xf7C\xa5s\xe8\xa8\xd4/\x97\xb9\xc0\x07\x90\x1e\x22\xb6*B\xc4u\x11\x1c\xf25k\x96p~\xdeu\x8d}\x97\xf9\xb8R\x5c\xae\xe11\xa3\x1f\xefG\xc3,\xbd\x07{W\xe0\x03H7\xae\xb8~\xb4n\x0d\x12{3S{\x19\xb6\x9711\x22L(\xa9\xf1\x01g\x15\x14\x10\xf8\x00\x92C\xc4\xf9\xba\xae\x97[s\x02\xc7Q<\x0frQ\xc7)\xdcvj\xef\xdesk\x1c\x83\xc0\x07\xf0\x89\xa0Rq\xfdh\xcd\xdb'\xf5\xf9\x87!\xa8E\xf6\xaaU-uw/L\xae\xc9\xbd\xac\xe9\xb0\xab\x1a\xfb\xde\x08|\x00\x15\xe2L\xb8\xb2\x10\xf1|\xddO\xfa\xaf9\x81\xe3\xd9\x22\xca\xb4\xc4\xdbL\x0d\x93#\xab\xa0\xd0\x83\xbf\xb9\xe4\x9eu\x81\x0f\xa0<D<\xaa\x08\x11z\x8a>\xf9\xa1\x92:\x81#_\xd0\xfd\xa7\x9c\xc4~\x1e\x87\xa1\xa1\xebFYu\xef\xdd\xa5\xc0\x070\x7f\x888\xd0St'\xb6Cj\x90\xdbo\xb2LK\xc2Rw\x0f\x03\xfa\xc8\xd1\xa2'\x7fsW\xc5Ex\xedO\xeb\xe9;\x0e\xd7\xf7\xf9=\xea\x0d/\x03\xa0\xa1\x10QV\xd4\xf4\xd2\xea\x0c\x9f\xfa\x00\x1a\xc7\x99\xb8)\xb3d\xf3\xf8a\xd5\x84\xd4\xe3p\xa4\xe6\x1e=\xfb\x9b\x0b\xab\xfa\x0cc\xed\xc9\xb0m\x15\xdbY\xb1]\xac\xc3\x97Q\x81\x0fhB^q\xbd\x92\x1e\xd3\xdb\xe5\xc3\x84\xfdB1\xe6\xd1\xbc\xa19.u\x972QC\xcd=\xfa\x1e\xfc.\xd6\xedy\x1b\xd2\x05\xe6\x92\xd0Ku\xaa\xa4G\xe9\x07\xcf\xf3\xd4P=O\x99\x96\x9a\xeb\xe5\x0a\xe8 \xf0\x01$\x87\x88uZ/wVy\x96V\x0abs\xce\xb6L\x9d\xa8\xa1\xe6\x1e\x08|\x00\x9fP5D\xe8<\xb0\x0a5W\xe08\x98\xa5\x97/\x9e\xb3\xb4\x9f\xb0\xab\x99\xd4 \xf0\x01|\x22Dl\x15\x17\xcfJv\x09eG\x8e\xb4TR\xe8\x1bgiu\xc26fl\xd3\xd4\xdfQs\x0f\x04>\x80Z!\x22\x17\x1ejI\xed\xe5\xdb\x8f=v\xa9\xc1|\x94\xa5\xcd\x04Vs\x0f\x04>\x80O\x84\x88aV^\xcb\xed\x5c\x19\x96zjN\xe08J<N\xa9\x135\xd4\xdc\x83\xd5\xbd\x9fn\x85\xf7\xd4&\xebmN\xa2,\x0b0\x8b\xca\xde=M4\x93<\x06\xaf\xaa\xc9\x15\xa1L\xcb^B\x8f\x5cj\x19\x16\xe7Z\xae\xee\xc3>\x84\xf2\xf0A\x7f_\x1b.\xfc\x7f\xb7\x22\x9c_<\xd8\xce\x1c\xbbN\x1d\xeb\xbdx\xbc\xb7&\x1d\xe7b\x9fi\xc7\xf9d\xde\x11\x13\x81\x0f\xa8\xfb\xa6\x15\x02\xc9v\xc9.\xc7fy\xce&\xbc\xa1\xc7Zy/\x12C\xf7I\xc5\x87K\xca0\xb1\x9a{\xcb\xff\x1b\xda\x8a\xc1~\xaf\xe2oi\x92\x8d\x18\x14v\x1f\xdc^8\xffs\xdcD\xafz\xecez?q\xf7//\xea4\x80\xf8\xfa\xbd\xca\xd2f\x96W>\x8e\xd4\xe7U\xdc\xce`\x01\xcfe\x18\x8f\xf7\xfe\x1c\xc7\xf9Eq;\xf7K\xbf\xcd\x14\xfe\x0c\xe9\x02u\xdf\x84\x8f*z\x1f\x94a\x99/\xf4\x85\x0f\xed\x94\x09\x1c\x9b1\x1c\x96\x05\xc2\x94\x0fK\xc7k\x89_\x96\x8a-\xf4\xd6\xbc\xcc\xee&<m7t\xd3\xbb1\x10\x5c\xcd;,\x18\xbf\xac\xa5\x9eZ0\x9e\xa76d\xd5mg\xe9e\x84NZz\xbc\xc3P\xedY\x0c\x9a\xfb\x0d\xdc\xe4v\xfc2\x18\x8es\xed\xba\x9c\x02\x1fPG^\xf1&|d\xa2Fc\xed\x9c\xb4\xdf\xa47\xfd\xd8\x83\x94\xf2\x01\xa3\xe6\xde\xf2\x82\xdeU\xfc\xb0\xde^\xe0]\x85\xe1\xfb\xf7\x8b\xfb\x9akv|\xf1\x9a\x08_\x02.\x13v\x0d\xef\x05'\x0bh\xaf\xd0\xf3\x99\xb2\xde\xf3e\xd6\xd2\xd3G\xe2H\xc8E\x966a\xaa\xae\x8d\xf8\x85a\xa7\xce/\x09|@\xf2\xb7\xd5\xe2\xe2i\xc9.\xd7\x86\x06\x9b\x11C\xd8q\xe2\x1b\xff\xc1\x8c\x81Q\xcd\xbd\xe5\xfc\xdd\x1c\xc5\xa0\xb7\xb9\xc4\xbb}\x1az\x96\xe6\xec}\x1b%\xee\xb7[\xd1\xd3\x5c\xb7\xbd\xc2c\x1e\xa7>\xc66~\xc1|p\xcc7\x16x77u\xbf\xac\x09|@\xaa\xaa7aC\x83\xcd:\xc8\xd2V\xe08\x88a\xfc\xfe\xc3f\x98\xa5\xf5\xee\xa9\xb9\xb7\x1c\xabZ\xb3u\xb7Fp\x9a\xf4\xa5#<\xeew\x13w\xcf\x1f\xbe\x06\x1bx\x9fI\x09J\xef\xc6\xc7\xd8\xb6\xb0\x97W|1nJ\xed\x9eU\x81\x0fHy\x13\x1bf\xe5C\x13j\xb85,\x86\xb1<a\xd7\x8d\xd7\xf6K\xf9\x9dS\xc7ki\xe6i\xe7\xcb9\xef\xfbI\x0c \xb3\xbe\x06COUjA\xf0\xf1\xbc\x0dUc(\xf7<>\xb6\xb6\xbdO\x86\xc7\xff\xac\xad\xaf+\x81\x0fH\xfd\xd6]f\xa4\x89\x16\x12\xfa\x8e\x12?\xf4\xf7\xefkye\xd5\xe7\x0c\x99X\xb3\xfc\xe0~\x5cq<N\xb3\xbb\xde\xb4\xb7\x8b\xed\xb3a\xa6h\xdcv\xee\xff]\xfc\xfcq\xb1}9\xde\xd6M\x8d\x87\xf0\xacN\xa1\xee)\x7f\xdb)\xf77\xd7\xd0n\x8d\xa1\xdcV\xd6\x8cL\x98\xd0\xf6\xa9\xd0z\x7f\xcc\x1f\x1c\xef\xfb\xe3\xfcv\xbcn\xda\xb1\xbe\x99\xe5\x0b\x9b\xb2,@\xd5\x1bY\xe8!(;\xff\xe8\xb9:`\x0b\x15>DS\xcad\x84\xe3\x94r\xceV\xeex-]\xf8p\xde\x7f-\xb4\x84\x9f\x1d\xa5\x0eK\xc6c\x16\xb6\x93\x07%wR{\x93B\x10\x19\xce\x18X\xaf\xe2\x04\x84\xf7R^[\xc5\xbe'3\xbe\xbeB\xd8K\x19\xca\x1d\xb5\xf4\xf5\x9bZ\xf32|\x81;\x98v\xfe\xdd\x83\xe3|\xf6\xe0=8\xf4\x1c\xee=x\x0d\x8dgy\x80z\xf8\x80\xaao\xad\x07\x15\xdf\xb6s-\xb585&p\x84\x0f\x83\xaa\xe1\xb0\xcb6\x0e\x85\xad\xc11<\x89\x7f+a;,\xb6\xad\xe2g\xa3Y\xcfA\x0b\xbd\x86q\x82\xd4\xdbYz\xef\xdb\xce\x9c\x8f\xff4a\xd7\x99\x86vk\x0c\xe5\x1e\xb7\xf8T\x84Qb\xd8\x1b\xd6\x9dl\x11\x9esx\xbddw\xbd\x7f\xa7\x02\x1f\xb0\x08U\xb5\xdc\xac\x97\xbb\xbc\xde\x83\x9b\x06ng\xa4)W\x1a\x08B\xd0k\xeco&\x06\x87Q\x8d\xd7\xd0\xbc\x8f\xbf\xf1\xa1\xdd\x1aC\xb9\xd7YKOE\x88\xa7RT\xf5\xee\xdd\xc4\xb0\xf7j\x8e\xe3}Ul{\xb3~Q\x10\xf8\x80iob\xa1G\xa0l\xb6\xa7\xde\xa2%\xa91\x81\xa3\xcc\xf36\xcej\x5c\xa3cx\xb2\x88/G\xb1\xc7+\xa5\x07x\xaf\x81\xd7`\xeam\xd4\x99\xb5;\xce\xd2\x86r\xf7Z\xfc\xe5r\x98\xf2<W\xfd\xf8\x05>`\x9a\xaa0\xe7\xc4\xff\xe5\x06\x86\xd4\x09\x1c\x93\x5cg\x86\xde\xfb,\xe5\xd8n,q\x15\x8e\xa4\xa1\xdd\xf8xR\x86r\x0f[\xfee%\xa5]W>\x14-\xf0\x01\x93\xde\x88GY\xf9l\xcfS+4\xac\xc4\xac!\xfb\xc0\xd0{\xaf\xbf\x0c\x5cei\xe7\xd8\x0d\x1b\xb8\xbb<\xf1\x8bG\xe9\xd0n\x8d\xa1\xdc\xf3\x9e\x14t_y`\x15\xf8\x80Io\xc4Uo\xb0z\xf7V\xf3\xc1\x1eB\xf6q\xcd_Sso=\xa4|\x01\xdbi\xe05\x18\xbe8\x8cR\xc3a\xc9j\x1f)\xb3Z[Y\x82eF;\xab~\x00\x02\x1fP\xf7\x8d\xf8PY\x8f\x95\xcak\xee\xefX\xad\x87\x94\x1e\xa4GM\xdcQ\x1c^=L\xd8u\xe2\xd0n<?8\xa5\xa4\xcc\xa8G\xef5[\x02\x1f\xd0\x1a\xf1D\xeb\xb2\xde\xbbp.\x98\x89\x1a\xab\x0f\xe4u<\x9d\xf7\xdc-:!e\xc8~\xb7\xa9;\x8b\xc3\xac)\xabp<\x99\xf0\xfaKy\x0f9\xeeY\xcft\xbe\xea\x07 \xf0\x01\xaf\xbf\x11+\xc3\xd2\xde@\x1ezF\x9e\xcex\x5c\xe9\xb1\x15Mj\x18ei\xa5Z\xc6\x0f^\xc3\xa3\x84\xe0\xd9\xda\x12,S\xa4\xb4\xfdf\xf1\xdc\xc7\x02\x1f\xd0\x860\x11\xbe\x85\x97\xcd\x98\x0b'O\x8f\xb5\xd4\xca\x03\xf9,\xb6\xe7Y\xf6\x0a\xa6\x84\xcc\xab\xc4`\x16\xc2N^c\xf9\xb1\xbd\x8e}\xb1<K\xdc/,\x818.9\xafq\xa1,\xad\x06\xa4\x86\x89\x5c\x13\xad4\x90\x8f\xb2\xf9\x86\xe4\xc2\x07\xee\xb8-\x1f\xa4\xc5c9[\xc2\xdd\x5c\x14\xcf\xf7\xa0\xa3\xc7;\x84\x82\x9d\xb8\x85\x7f\x0f\xe3U[Y\xda\x12^\xcb\x0a}\xe3\xc4\x952\x0e\xe2c\xaf\xaa\xb9w\xd8\xb5z\x91a\xe8\xb9h\x83\xeb\xc4\xe3\x12j\x9b\x0eC\x00^\xf6\x17h\x81\x0f\xb8\x0f\x13\xdb%\xbb\x1c+\xc3\xb2\xf2\x0f\xffy\x87e7\xe2m\x8cZ\xf2\xb4v\x1d\xd9O\x1d\xe3\xbd\x18\xec\x86m\x0au\x09\xc2k\xea\xaa\x22\xccmd\xe5\x85\xdc\x83.\x97`\x09\x8f\xfbE\xe2\xbe\xe1\xd8\xbe\x88\xeb\x94\x87\xedd\x19_\xc4\x0c\xe9\x82\x0f\x9a\xaa0q\x93)\xc3\xd2\x86\x0f\x93\x8d\x06ng\xdf\x04\x8e\xf6}\xd9*\xb609\xe1{10\xecw,\xec\xd5]\x85\xa3\xec}f\xd4\xd5\xe3\x18{\xeb\xcek\xfe\xdaf<\xe6Wq\xa8w\xa1\xa5[\x04>\xa0*L\x1c\x99\xa8\xb1\xd2@\x902Q#|X\xbe\x9bx\x93c\xad\xba\xfa/Y\xf1\x9c\xb6W\xf1\x03\xffI\xd7\x9fS\x8dU8\xa69\xe8A\x09\x96\x10zgY\x0d\xe7\xbe\xf7\xf3\xc3\xe25q\x11\xbf\x044~\x9e\x9f\xc0\x07\xeb\xfd\xc1\xb3U\x11&\xae{R\xe5\xbe\xcbR\x86r\x8fj,\xbd\xb6\x19\x87\x92X\xcd\xdf\x5c\xe8-\x0f\xc1\xe6Y\xd6L\xafm\xdb\xbe<\xce\x12xN\xfb0!,~1\x1ef\xb3/\x81\x18lg\x1f\xf7\xfa\xe5M\x06?\x81\x0f\xd6[\xd5\x9b\xac\xa1\xdc\xd5\x86\x83QV}\xae\xdb\xcd\x83P\x98z\xbc\x0ej,nO3\xc7\xf2Q\x9c\xa8\xf2\xf5\x1e\x06\xbd\x87\x81gT\xf3\xd7\xae\xb3\xfe\xac\xa6\xf10\xf4\x9d\xcfyS\x1b\xf1K\xc1US_\xd0\x04>X\xdf\x0f\xa0aE\x988\xb7$\xd7j\x03B\x96\xde\xbb\xf7*~\xd8\x84@\x91\xb2\xa6\xeaF\xa66\xdf2\x8fe\x18\x96\xbf\xca\xe6\x9b\xa8r\x19CD\x186=|\xb0\xbd\xfd`\xbb\x5c\xf5s\x8d3l\xeb,\xffw\xd1\xb7SF\xc2\xf3)\xb6a\x96\xb6\x1aIR\xf0\x8bC\xbds\x9d\xe3g\x96.\xac\xafq\xc5\xf5z\xf7V\xab\xaa\x08vp3!\xb8\x85\xe3\x96rN\xd8G+ \xacj\xf6uq\xbf\x835\x0a{gY\xfd^\xbd\x10\xeeNb :K\xbc\xafW-y\xbe\xfb5~%\xbc\x0e\xf7\xfa\xf8\xe52\x9c\x0e\x13\x8b-\x1fe\xf3\x9f\xa7\x19\x86z\xcf\xc2)\x01\xb3\x0e\x7f\xeb\xe1\x83\xf5\xecq\xa8Z/\xf7y\xd7ja\xf5\xec\xf8\x0c\x13?4?\xb5\xf2I<\xf1=\xb5ga\xac\xb5\x17z\x1c\x1f\xd5\x0c{7\xf1\xd8=\x0e=D\xe1\xbc\xcc.\x95C\x8a\xcfw\x96\xe06\xee\xeb)\x06\xe1\xef\xb1\xd8\xc2d\x8e\xd0\x03{<\xe7\xcd\x85\xd7\xd1\x8bx\xaa\x87\xc0\x07$\xbd)\xe7\x15\x1f:\xb9\x96Z\xa9\x94\xe1\xd6\xeb8Qc\xda\xef_'\xdc\x86\x09\x1c\x8buR#\xec\x85\xa1\xda\xad\xd0+\xd4\xe1\xd9\xaaG\xd9l%e6\xfa\xfe\xe5#\x04\xf7b\x0bA\xedq\x0c\xf5\xd7s\xdc\xdc\x8bX\xecZ\xe0\x03*\xdf\x94\xad\x97\xdb\xde@\x1ez_\xb7\x13v=(\xf9py\x95\xa5\x0f\xc9?3\x81c!\xc71|\xb8\xa7\x9c\xb3\x17\xbe`\xbd\x1dV\x04\xe9\xf2\xdf]\x0c \xfbs\xdc\xc4\xee:,\xff\x17{\xfc\xc2{l\xf8\x9b\xfbbv\xd7\xebw3\xc3M\xd5\xee\x15\x15\xf8`\xbd>\x84\xaa\xce\xaf)\xeb5b\xf1\xc7\xa7\xaa\xf7\xf5^\xe5\x84\x9ax}\xeaL\xc1\xb1\xd6o\x5c\x9e\xb8\xdf\xb0\xeb\xab\xd8\xc4\xd7m\x13\xaf\xa1|\x9d\xbe|\x84\xd3fb\xaf_x\xce\x875\x83\xdfFVs$F\xe0\x83\xf5R\x15\xe6F\x9ah\xe5\xc7'e\x080\xf5\x8d>\xf5x\xee\xce2D\xc4\xd4\x00\x14\xda2eh\xf3\xb0'\xe7\xca\xa6\x0c]\xa7\xce\x1e_\xbb\xca\x00qVo\x1e\x83_\x9d\xf3\xfc\xf6\xeb\x04d\x81\x0f\xd6\xebC\xa8l\x88\xe9\xd4z\xb9+=>\xc3,mH,y]\xe3\x9a\x138\x8e\x16Q\xdd\x7fM\xa5\x84\xe7\x9b>\x145\x8f\xc3\xb0UC\xd7\xd7q\xe2BJ\x98\xd9^\xd7\xf3Jc\xf0\x0b_\xd2\xdei\xf8\xb5&\xf0\xc1\x1a\x85\x89\x94\x9an\xca\xb0\xacV\xcaP\xfa,\xeb\x1a'O\xe0\xf0\x1ah\xcc0a\x9fq\x0f\xdeW\xc2)\x22)\xe1l\xf4\xe0=&e\xd8\xf2\xd9\xa2\xd7\x95my\xf0\x1b\xd7\x08}\x02\x1f\xf0\xa90W6\xc4t\xd8\x83u,\xbb\xfc\xc1\x19>4S&j\xd4^\xd7x\x86\x09\x1c;\x8e\xc8\xdc_\xaeR\x86s\xcf\x16p\xf7\xbbK~\xba!\x98T\x0e\xe5\xde\xf7H\xc7\xd7b^\xe3\xb6\xd7V\x0c})\xc3\xe0\xc9\x7f\xaf\x02\x1f\xf4\xff\x03h\xab\xe2\x03\x7fR\xf1^\xdas|\xee\xcd\xbc\xaeq\x9c\xc0q\x9a\xb8\xbb\xd7\xc2|R?\x80\xaf\x1a~\x1d\xed,\xf9u{\x94\xf0%\xe5S=\xd25\xd6|\xde\x8e\xf7\xb1\xceR\xfe\xde\x93\x0bz\x0b|\xb0\x1eo\x1aeo\x0a\x07\xca\xb0\xacT\xeaD\x8dy\x87[S\x7f\x7fw\xd6\xc2\xae\xd4\x0a\xe1MO\xd6\xd8Zb\xd8\x1b\x16\x17OS\xde{\xa6\x8c\x1c\xa4\xbe\xbe\x9e\xc6\xfb\xf2\x1ai\x80\xc0\x07=\x960\x11\xe0|\xd6ezh\xec\xf8\xa4,\xb94\xf7\xba\xc6&p\xb4\xee\xd87\x1d\xd0\xf6\x96\xf4\xb8SK\xb0\x5cN+\xf1\x14\x83\xcc\xf3\xc4\xbb\x1c\xaf\xf9k\xf1\xba\xa9\x1b\x12\xf8\xa0\xdf\xaa\x86DrM\xb4R\xa9a;o\xf0\xf5\x90r\xd2\xfc\x86\xd7\xc6\xc25\x16\xf8bx\xdc_\xe2k6\xe5\x1c\xc5Q\xc2k:u2\xd1:\xbf\x16\x1b\x0b\xbb\x02\x1f\xf4\xb7\x07!\xbc\xe1\x96\x9dcs\xac\x0c\xcbJ\x8fO\x9e\xf8\xc1\xd9\xd8q\xaa9\x81\xe3\xa9\x09\x1c\x0b\xd5d\x8f\xdcxI\xaf\xd9\xf0\x98Sz\xa4+\xeb\x0b\xc6\xd7\xe2\xa8\xc6kq\xb8\x86\xef\x11\x8f\xb2\x1a\xe7\xe8\x09|\xb0\x9ea\xa2\xaa\x0c\x8b\xf5rW{|\xb6j\x04\xafF\x8fS\x1c\xc2\xb7\x02\xc7\xe2\xa4\x9ew\xb5\xd7\xd0k)\xbc>vk\xbe7\xcc\xfa\x9aMy=\x5cg\x89\x13\x7f\xe2\x17\x99\xd4\xa1\xdd\x935\x1c\xdaMy\x8d\xa4\xfe-\x0b|\xd0S\x07\x15\xdf\x0c\x8f\x94aY\xa9\xd4\x89\x1a\xcf\x17t\x9cRC\xe4\xf6:\xaco\xdap\xa0~\x95%\x0eU\xce\xdb\xb6\xb1\x17\xffY\xcd_\x9b\xb5\xd7v\x9c\xf8\x9a\x1d\xd5\x9c\x04\x96'\xb6\xd7F[\xbf\x80\x84 \xdat\x0fd\x8de\x16\xcf\x04>XS\xf1\x9bx\xd9\x87\xc0u\x1f*\xfcw\xf8\xf8\x84\x0f\x86\x94a\xb1\x85\xf5\xc2\xc6\x9e\x95\xd4%\x9cr\x138j;\xa9\xd1\xb6;3\xbe\x8e\xc2k\xe3\xc5\x92^\xb3)\xabi\xdc\x7fA9\xab\xf9Z\xac3\xb4\xfb\xa4\xa5K\x00\x86\xc7\xf4~\xf1\xd8\xce\x9a\x08~\xf1\xef-\xb4c\xca)\x1f\xc9!X\xe0\x83\xfe\xa9z\x03\xd0c\xb3\xc2\x9e\x80\x1ao\xd0G\x0b.\x97\x93g\xe9\x138\xd4\xe6\xab'\xb5\xbdB\xdb\x9e\xd5\x091!P\x14\xdbEV\xbfgo\xd6\xd7l\x08\xa4__\xe4\x17\x94\x9aC\xbbm\x9c\xb5{\x7f\xfcv\x1f\x04\xbf\xd1,\x8f3\x06\xc6\xd0\x1e)\x85\xd8\xcf\xeb\x8c\x00\x08|\xd0\xaf@1\xac\xf8&>wy\x0f\xe6R\xb5\xe2\xc9\xbd\xe4\xf3\xa0f\x15?(R\xefc\x7f\x9d\xeb\xa1\xcd\xd8\xb6\xa9\xe7V\x85\xd0\xf7^YH\x08\xa1+\xf4\xb2\xc5\xa0\xf7~I\x18\xb8i\xf8\xfd\xa4\xce\x17\x94\xd1\x9c_P\xf2,}h\xb75\xefa\xb1\x8d^\xef\xb1\x0f\xef\xc1\xa1\xf7\xf5{\xf1\xb8\xe61\xa8?\x9a\x16\xaa\x13\x8f\xef\x5c_\xde\x05>\xe8\x97\xaa7g\xbd{\xab\xfb`\xd8\xca\xd2{e\xf2e\x14\xc3\x8eC\xfb\xa9u\xbe\xf4\xf2\xd53\xaa\xb9\xff\xc3\x90p[l\xaf\xe2\xe5m\xf1\xb3\x0f\xb3\xbb^\xb6\xb2 p3\xc3}\xa6\x84\xb0\x94\xf0q\xda@\x9d\xc8:C\xbb\xbb-:\xb7t/\xe1\xb8>\x8bA\xee\xfe\xd8\x86\xed\xaa\xe6\xf1}\xdda\xdd\xc2\xcc\x02\x1f\xf4'PT\xf5\x1e=_@u\x7f\x9a\x0b\xe3\xf7.\x97\x5c\x0c;\xf5\x83s;\x9e7FZ\x80\xb9*.\xde\x9d\xe3&\xea\x94\xe3\x08ao\x98\x18\xba\x92\xce\x19\xac\xb1\x9aFcA\xb3\xe6\xd0n\xbe\x80\xe2\xd5\x8b\x08|\xd3l\xceq\x9f\xc7\xb3\x9c\x87-\xf0A?\xc2^\xd5\x8c.eXV{|\xf6\xb2\xf4\xd2\x19K\xed\xb9\x88!!u\xf8\xf1\xa0%\x1f\xb2]\x09}\xa1W\xf4x\xc1ws\x1f\xf6R\xbf\xcc=Jx\xbd\x86}R{\xecF\x0d\xf7F\xe7Y\xfa\xb9\xa5\xe3\x15\xff]O\x1a\xce]\xb4\x10\xf6f\x0a\xd8\x02\x1f\xf4CU\x99\x8f\xdcz\xb9+\xfdPH\x1d\x0e=_Q1\xec\xd4\x90i\x02G\xfd\xd0\x17>\x9c\x9f/\xe8\xe6CP\xdfz-\xecU\x85\xf7\x94\x89\x04\xe3,\xad\x87\xb1\xf1s\x82g\x18\xda]\xe5\x17\xd9e\x7f\xf9yw\xd6\xb0'\xf0A?\x02E\x18\xa2)[V\xe9z\xda\x9a\x96,-Lm\xd6\xd8w\x15\xa1\xa4\xce\xda\xa6OL\xe0\xa8\xdd\xbe\xe1\xb8~9knRE8\xef\xf2\x9d\xe2v\x873|\x91\xdb\xa9x?\x09\x81\x22\xb5l\xd0hA\xed\x15B\xe4i\xe2\xee\xcfV\xb5\x22L\xfc\xbby\x9c-\xbe\x177\xdc\xfe\xe3y\xdf\xc7\x05>\xe8\xbe\xaa7\x81\x91&ZY\x18\x0f=\x00\xa9\x135\x8eW|\x8ee^#\x90\x8c\x1d\xdd\x99BLx=\x1c\xce\x11\xfc.c\xd0\xdb*9\xcf\xb3\xea5\xb4U\xf1zM\x0d\x15\xf9\x82\x8b\xb7\x8f\xba\xf0z\x0cm\x10{\xdd>\x9b\xdd\x9d\xb3y\xde\xd0M\xdf\xc4/a!\xe8\x8d\x9ah\xebAq#\xfe\x12!\xda\xf8\xca[gY\x8de\x8a\xa2\xf3\x9bo|0\x5cQ\xa0\x08\xe7\x86\xbdW\xf6\xd8B/\x80#\x0b\xad\xfb2\x10\xfev\xc3\xdf\xe6N\xc9{\xceu\x0cp\xe1}\xe9\xc4\xea8\x9d9\xb6\x8f\xe2q\xbd?\xbe\x8f\x12>WBP|\x15\x8f\xf5\xd9\x22\xbe\xfc\xbd\xe1\xd0@\xa7\xe9\xdd\x83\x0e\x8a=~jb\xf6\xf3\xd8~?\xb8\xb5\xe9q\x19\xd2\x85\xee~\x8b\xcc\xb3\xf2s\xc3\x0e\xf5\x08\x00 \xf0Aw\xc3\xdeVV~\x82\x7f8\xff\xc3D\x0d\x00\x04>\xe8\xb0<+/\x9bp\xa0\x0c\x0b\x00\x02\x1ftT,\x89QV\x86e\xd9+5\x00 \xf0\x01\x0d\xcb+\xae\xb7^.\x00\x02\x1ftU,\x8aZ6\xbd\xffxE+5\x00 \xf0\x01\x0d\x84\xbd\xaa%\xba\xac\x97\x0b\x80\xc0\x07\x1d\x17\x86j\xcb&j\x1c)\xc3\x02\x80\xc0\x07\x1d\x95\xb0DW\xa8\xc8\xaf\x0c\x0b\x00\x02\x1ftXU\x98S\x86\x05\x00\x81\x0f\xba*\x96ayR\xb2\xcby\x5c\xa6\x09\x00\x04>\xe8\xa8q\xc5\xf5\xca\xb0\x00 \xf0AW\x0d\x06\x83\x10\xe6\xca\xd6\xcb\x0deX.\xb4\x14\x00\x02\x1ft3\xec\x852,y\xc9.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb0\x10\xf6\xca\xca\xb0\xe4&j\x00 \xf0AG\x0d\x06\x83\x9d\xe2\xe2i\xc9.\xd7E\xd8S\x86\x05\x00\x81\x0f:\xac*\xcc\x8d4\x11\x00\x02\x1ft\xd4`0\xd8\xcb\xca\xd7\xcb=\xb7^.\x00\x02\x1ft\x9b\xde=\x00\x04>\xe8\xab\xc1`\x90g\xe5eX\x0e\xad\x97\x0b\x80\xc0\x07\xdd\x0d{[Yy\x99\x95P\x86\xc5D\x0d\x00\x04>\xe8\xb0<+/\xc3b\xbd\x5c\x00\x04>\xe8\xaa\xb8^\xee~\xc9.\x97E\xd8\x1bk)\x00\x04>\xe8\xae\xbc\xe2z+j\x00 \xf0AW\x0d\x06\x83QV^\x86\xe5X\x19\x16\x00\x04>\xe8n\xd8KY/7\xd7R\x00\x08|\xd0]a\xa8\xb6\xac\x0c\xcb\x912,\x00\x08|\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\x00\x10\xf8\xa0\xd3\xaa\xc2\x5c\xae\x0c\x0b\x00\x02\x1ftT,\xc3\xf2\xa4d\x97seX\x00\x10\xf8\xa0\xdb\xaaz\xf7\x94a\x01@\xe0\x83\xae\x1a\x0c\x06!\xccm\x97\xec\x12\xca\xb0\x5ch)\x00\x04>\xe8f\xd8K)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xb2\xf5r\x8fL\xd4\x00@\xe0\x83\x8e\x8aeX\x9e\x96\xecr]\x84\xbd\x5cK\x01 \xf0Aw\x8d+\xae\x1fi\x22\x00\x04>\xe8\xa8\xc1`\xb0\x97\x95\xaf\x97{n\xbd\x5c\x00\x04>\xe8\xb6\xaa2,#M\x04\x80\xc0\x07\x1d5\x18\x0c\xf2\xac|\xbd\xdc\xe7\xd6\xcb\x05@\xe0\x83\xee\x86\xbdP\x86\xa5\xac\xccJ(\xc3\x92k)\x00\x04>\xe8\xae0\x94[V\x86\xe5@\x19\x16\x00\x04>\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05@\xe0\x83n\xcb+\xae\xb7\xa2\x06\x00\x02\x1ft\xd5`0\x18e\xe5eXN\x95a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850WV\x86\xe5H\x19\x16\x00\x04>\xe8\xa8\xb8^\xee\xb3\x92]\xae\xb3\xea\x22\xcc\x00 \xf0A\x8bU\x85\xb9\x5c\x19\x16\x00\x04>\xe8\xa8X\x86\xe5I\xc9.\xe7\xca\xb0\x00 \xf0A\xb7U\xf5\xee\x99\xa8\x01\xc0J\xbc\xa1\x09`~\xb1\x0c\xcbv\xc9.\xc7\xb7\xb7\xb7\x17Z\x0a\xa0{\xbe\xf9\xf8\x0b\xe3\xf8\xcf\xfcK/\xbf}\xd5\xc5\xe7\xa0\x87\x0f\xe6\xf4'\xff\xf8\x07\xc2\x17\xa7\xb2\xde=eX\x00\xba\xed$\xbb[9\xe9e\x11\xfe\xce\x8am$\xf0\xc1\x9ay\xfc\x9d\x1f\xdb\xca\xca\xd7\xcb=2Q\x03\xa0\xbb\xbe\xf4\xf2\xdb!\xf0]\xc7\xff\x86\xa2\xfa/\x8a\xd0wUly\xb1=\x12\xf8\xa0\xe7\xfe\xdc\x1f\xbe\x91\xfd\xea{\xbf\xf2\xf9\x92]\xae\x8b\xb0\x97k)\x80\xce{}$'\xd4[\x0de\xb8\xbe\x17\x86|\x8bmG\xe0\x83\x9e\xfa\xdc\xcb\x1f\xac\xda\xc5P.@?\x8c\xb3\xbbSt&\x09\xc3\xbd\x1f\x16\xa1\xef\xa2\xad\xc3\xbd\x02\x1f\xcc\xe8\xa7\xfe\xe0G\xb3\xf3_>+\xdb%\x94a9\xd1R\x00\xdd\xf7\xa5\x97\xdf\x0e\xa7\xe6T\xbd\xa7\x87\xc9{a\xb8\xf7U\x1c\xee\xdd\x12\xf8\xa0\xe3~\xf7\xdf\x7f\xb7j\x97\x91V\x02\xe8\x95\xd4\x95\x92\xc2y\xdda\xb87L\xf28)\xb6\xa1\xc0\x07\x1d\xf4\xd7\xbe\xfb(\xfb\xd6\xc5\xaf\x97\xed\xf2\xdcz\xb9\x00\xfd\xf2\xa5\x97\xdf\x0e\xe5\xb5\xcek\xfeZ(\xc8\xff~\x9c\xe41Z\xd5$\x0f\x81\x0fj\xfa\xf1?\xfeL\xf6\xad\xb3\xd2\xb0\x17\xce\xf1\xc8\xb5\x14@/\x8dg\xfc\xbd0\xc9\xe3E\xb1]\xc5I\x1e[\x02\x1f\xb4\xd8\x9b\xbf\xf3\xe3\xd9w\xbe\xf3\x9d\xb2]\xac\x97\x0b\xd0S_z\xf9\xed\x10\xf8\xae\xe7\xb8\x890\xdc\xfb\xb0\xa6\xdf\x9e\xc0\x07-\xf3\x17\xfe\xef\x0fe\xbf\xfaK\xff\xa2l\x97\xcb\x22\xec\x1di)\x80^\x1b7t;\xa1\xa6\xdf{q\xb8\xf7`\x91\xc3\xbd\x02\x1f\xd4\xf0c\xd7\x95\x7f2\xca\xb0\x00\xf4_\xd3_\xec\xc3p\xef\xd7\xb3\x05\xd6\xf4\x13\xf8 \xd1_\xfd\xbd\x9f\xa8*\xc3rz{{{\xa6\xa5\x00\xfa-\x96h9^\xd0\xcd\xdf\xd7\xf4kt\x09\xb77\x1c6\xa8\x16&j\xbc\xfa\xd6\xff,\xdd\xe7\x1f\xfd\xdc\xdf\xfc\xc56L\xbd\x07`).b8[\x940\xdc\xbb[|\xae\x84\xde\xc4\xb0\x8d\x8b\xa0y5\xeb\x8d\x0dnoo\x1d2\x886\xbe\xf2\xd6Y\xfc#\xfb\x84P\x86\xe5W\xfe\xd9/O\xfd\xbd|\xefg\xb2\x9f\xb9\xf8\xef\x1a\x10\x80E:\x8e\xc1\xef\xac\xee/\x1a\xd2\x85\x09~\xf4\xf7>\xf7\xfd\x7f\x87\xf5r\xcb\xca\xb0\xec\xbe\xf5\xd3\xd9\xdf\xf8\xad\xefh4\x00\x16-\xf4(\xbe\x7f\xbf\x84[\x9dI\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f>\xfc\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf\xfc\xf6\x8f\x94\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xd1\x12n\xd9]M\xbf\xa3\x94\x9a~\x86t\xe1\x81\xcf\xff\xec/\xfc\xceO|g\xe3O\xdf\xff\xffs\x7f\xf1e\xf6\xaf~\xf1\x9fO\xdd\xff\xe7\x7f\xf6\xed\xec\x17\xfe\xd3ok8\x00V\xed4\xbb\x1b\xee\x9d\xb8\xde\xafI\x1b\x10\xbd\xb9\xfd\xd5\xf1Od\x1f\x87\xbd\xe03\xbf}S\xfa;?\x9b\xfd/\x0d\x07@\x1b\x84%\xdc\x9e|\xf3\xf1\x17BQ\xe8\xfbI\x1e\xdf_\x04@\x0f\x1f\xc4\xb0\x97M\x98m\xb5\xf1#\x7f\x94\xfd\x89\x1f\xfd\xcd\xec_\xbe\x7f\xfa\xa9\xdf\xf9\xfb?\xf7\xb7\xb2\xbf\xfb\xeb\xffM\xe3\x01\xd0F\xa1\xc7\x22\xf4\xf6\x1d\x855\x80\x05>\x84\xbd)a\xef\xa1?\xfb\xd9?\xc8\xfe\xcf\xef\xff\x9b\xec_\xff\xbb\x7f\xfb\xd1\xff77\xb7\xb2\x7f\xba\xf1#\xce\xdd\x03\xa0\x0b\xc1\xef@\xe0C\xd8\xabQG\xe9\xef\xfc\xe4\xb7\xb3_\xfa\x8f\x17\xd9;_\xfc\xf3\xca\xb0\x00\xd0f\x9f\x18\xda\x15\xf8\x10\xf6jz\xe7\x07\xbf\x95\xfd\xf5\xef\xfe\xa6\x06\x04\xa0\x8d&N\xde\x10\xf8\x10\xf6f\xf0\xf7\xfe\xf0\xd7\xb3\x9f\xfe\xfd\xff\xaa!\x01h\x830l\x1b>\xd7\x8e\xa6\xad\xc6!\xf0!\xec\x09}\x00t\xd3ev7l{\xf2pF\xee$\xca\xb2 \xec\xcd\xe8\xbf\xfc\xc0\xe7\xb2\x9f\xce\x04>\x00\x96\xae\xf6\x12k\x02\x1f\xc2\xde\x0c\xde\xbe\xfd\x1f\xd9\xcf\xff\xde\xafiT\x00\x96%\x0c\xdb\xdeO\xc2\xb8\xaa\xfb\xcb\x02\x1f\xc2^M\x9f\x1f\xfc\xef_\xfe\xf9\xdf\xfd\xb5\xafiU\x80\xb56,\xb6gK\xb8\x9f\xf3\x18\xf2\xc6\xf3\xdc\x88\xc0\x87\xb0W\xcf\xf1\xaf^<\x1fiU\x80\xf5\xf6\xcd\xc7_8X\xf0]\x84a\xdb\x8f\x8a&7qc&m \xec\xd5\xf8\xe3\xfb\x8d\xcb\xaf\x09{\x00\xc2\xdeVq\xf1r\x017=qY\xb4&\xe8\xe1C\xd8\x13\xf6\x00\xa8\xa7\xe9\xde\xbd0l{\xf4z\xed\xbc&\xe9\xe1C\xd8\x13\xf6\x00H\xf4\xcd\xc7_xT\x5c\x5c\x15\xdb\xc6\x9c7u\xbf\xd6m>\xcb$\x8c\xba\xf4\xf0!\xec\x09{\x00\xa4\xdb\x9b3\xec\x85a\xdb<K\xa8\x9d'\xf0\x81\xb0\x07\xc0j\xe43\xfe^X\xf2\xec\xa8N\xed<\x81\x0f\x84=\x00\x96\xec\x9b\x8f\xbf0,.6k\xfc\xca\x5c\xb5\xf3\x04>\x10\xf6\x00X\xbe\xd4\xcf\x86\x8f\x96<\x9b\xb7v^\x93L\xda@\xd8\x13\xf6\x00\xa8\x90X\x8a\xa5\xd1\xdayM\xd2\xc3\x87\xb0'\xec\x01Pm\xda\xe7C\x98\x841\x8eA\xefU[\x1f\xbc\xc0\x87\xb0'\xec\x01P\xed\xf5\xda{\x8d,y&\xf0\x81\xb0\x07@\x0b|\xf3\xf1\x17\xc2gD(\xc5\xb2\xd4\xday\x02\x1f\xc2\x9e\xb0\x07\xc0\xf2\x0c\x8b\xed\xddl\x01K\x9e-\x8bI\x1b\x08{\x00\xd0s\x9f\xd1\x04\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{Z\x15\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\xd66\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xa2\xc0W\x84\xb3G\xc2\x1e\x00@\x8f\x03_\xe1\xa8\x08i[\xc2\x1e\x00@\x0f\x03_\xec\xdd\x0b\x01m\xd4\xc0m\x09{\x00\x00m\x0b|\x85\x83x9W\xb8\x12\xf6\x00\x00\xda\x1b\xf8\xee\x83\xd5f\x11\xda\xf6\x84=\x00\x80\x1e\x05\xbe\x18\xf06'\x84?a\x0f\x00\xa0\x0f\x81oB\xc0{Rg\xf2\x86\xb0\x07\x00\xd0\xe2\xc0\x17\x83\xdd\x93\x84\x10(\xec\x01\x00t1\xf0\x95\x04\xbb\xca\xe0%\xec\x01\x00t;\xf0\x95N\xde\x10\xf6\x00\x00:\x10\xf8\x8a\xd0\x16B\xd6f\xc9.\x07\xc2\x1e\x00@\x87\x03_V=l\xbb\xfb\xfa\xe4\x0da\x0f\x00\xa0#\x81/\x06\xb9\xdd\x84]\x0f\x84=\x00\x80\x0e\x06\xbel\xcap\xed\x04#a\x0f\x00`u\x06\xb7\xb7\xb73\xfdb\x11\xe0^\x15\x17\x1b\x89\xbb_\x16\xdb\xb6\xb0\x07\x00\xb0|3\xf5\xf0\xc5\xc9\x1a\x1b5~E\xd8\x03\x00\xe8R\xe0\xcbfX:M\xd8\x03\x00X\x8d\xdaC\xbaq\xb2\xc6Ka\x0f\x00\xa0\x1bf\xe9\xe1\xcb\x85=\x00\x80\xee\xa8\xd5\xc3\xf7\xe6\xf6W\x1f\x15\x17WY\xbd\xf3\xf7\x84=\x00\x80\x15\xaa\xdb\xc3\xb7'\xec\x01\x00\xf4;\xf0\x1d,\xe9q\x09{\x00\x00\xcb\x0e|on\x7fu'k\xae\xbc\x8a\xb0\x07\x00\xd0\xb6\xc0\x97-\xa7wO\xd8\x03\x00XE\xe0\x8b\x935\xf6\x96\xf1\x80\x8a\xfb\x1a:,\x00\x00\xcdI\x9a\xa5\x1bW\xd6x\xb1\xc4\xc7u]lG\xc56\xfe\x8d\xcb\xaf\xbdr\x98\x00\x00\x16\x1f\xf8\xae\x8a\x8b\xcd\x15<\xbe\x9bb;\x09\xe1\xaf\x08~\x17\x0e\x17\x00\xc0\x02\x02_\x1cb}\xbf\x05\x8f\xf52\xbb\xeb\xf5;\xd1\xeb\x07\x00\x90.\xe5\x1c\xbeQK\x1ek\x98!\x1c\x86\x95\xaf\x8a\x10:\x8e\xb3\x86\x01\x00\xa8P\xda\xc3\x17'k|\xaf\xc5\x8f?\xf4\xfa\xe5\xbfq\xf9\xb5\x13\x87\x12\x00`\xb2\xaa\x1e\xbeQ\xcb\xc3\xde\x91\xb0\x07\x00P\xee\x8d\x8a\xeb\x0fZ\xf6xM\xe2\x00\x00h*\xf0\xc5\xc9\x1a\x9b-y\x9c&l\x00\x004\x1d\xf8\xb2v\x0c\xe7\x1egz\xf3\x00\x00\xe62q\xd2\xc6\x9b\xdb_\xdd*.^\xae\xe81)\xba\x0c\x00\xd0\xa0i=|\xa3\x15<\x96\xe3\x18\xf2\xce\x1c\x16\x00\x80\xfe\x04>\xbdy\x00\x00\xcb\x0e|on\x7fu/[\xfcd\x8d\xd3\x18\xf2\x94T\x01\x00Xv\xe0\xcb\x16\xd7\xbb\x17z\xf3\xc61\xe8]iz\x00\x80\xe5\xf8\xc4\xa4\x8d\x05M\xd68\xcf\x14H\x06\x00X\x99\xd7{\xf8F\x0d\xddn(\x90<\x8eA\xefJ3\x03\x00\xf4'\xf0\x85\xde\xbc0d;\xd6\xb4\x00\x00-\x0b|on\x7f5\x84\xbdY&k\xe8\xcd\x03\x00\xe8B\xe0\xcb\xea\xf7\xeeY\xee\x0c\x00\xa0\x03>\x9a\xb4Qc\xb2F\xe8\xcd\x0b\x93/,w\x06\x00\xd0\x11\xf7=|\x07\x15\xfb\xe9\xcd\x03\x00\xe8x\xe0\x1bM\xb9\xderg\x00\x00]\x0f|q\xb2\xc6\xc6\x83\x9fY\xee\x0c\x00\xa0O\x81/\xfb\xb8wOo\x1e\x00@O\x03_\x08x{z\xf3\x00\x00\xfa\xe9\xff\x0b0\x00\xb2\x10\xef\xec0\x8f}\x9d\x00\x00\x00\x00IEND\xaeB`\x82\x00\x006\xc9\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006VIDATx\xda\xec\xddO\x8c$Y~\x17\xf0\x88\xf5X\xfe\x87]\xbd\xd2\xf2O+\xdc\xd9\x1cX\x0d\x18U\xad\xc0bA\xac\xaa\xc6B\xc2\x5c\xdc5\x12\x17\x9f*\xe7\xc0\xc1\x87\xa5kN\xec\xad\xa3%\x0e\xbem5#$$\x0e\x93u\xc2\xe2\xe0\xa9>\x1a\x1bu\x95\x16\xc1\x98?\xda*\xb3\x92Y\x0c\xee*\xc4\xb2\x96\xd0j\xbbla\x0c\xd8n\xe2M\xbd\xda\xa9\xe9\xc9|\x11\x91\x19\x99\x19\x11\xf9\xf9H\xa1\xec\xae\x8c\xca\x8c|\x99\x95\xf9\xcd\xf7\xe2\xfd^\xfe\xea\xd5\xab\x0cH\xdb\xfa\xca\x97\xf6\xcb\x8b\xd1\xf5{\x1f\x1ei\x0d\x00\xfa\xe63\x9a\x00j9\x8c\x1b\x00\xf4N\xae\x87\x0f\xd2\xb6\xbe\xf2\xa5Qy\xf1\x22\xfe\xf7\xed\xeb\xf7><\xd1*\x00\xf4\x89\x1e>\xa8V\xdc\xf9\xb7^>\x00zG\x0f\x1f$l}\xe5K\xf7\xca\x8b\xcb\xf0\xcf;?~p\xfd\xde\x87\x97Z\x07\x80\xbe\xd0\xc3\x07i\xfb\xaf\x85\xbd\xa0\xd0,\x00\x08|0\x1c\xd3\xc2\xdd~\xec\xf9\x03\x00\x81\x0f\xfa\xac\x0cu{\xe5\xc5\xfdiWe7=\x7f\x00 \xf0A\xcf\xa5&h\x14\x9a\x07\x80\xbe0i\x03\xa6x\xad\x14\xcb,o]\xbf\xf7\xe1\xa9\xd6\x02\xa0\xeb\xf4\xf0\xc1t\xe3\x1a\xfb(\xd1\x02\x80\xc0\x07=V'\xcc=\x8c=\x81\x00 \xf0A\x9f\x94!n\x9c}\xba\x14\xcb,c-\x06\x80\xc0\x07\xfds\xb8\xa4}\x01@\xe0\x83u\x8b\xa5X\xb6\x9b\xfcJ\xec\x11\x04\x00\x81\x0fzb\x9e\xf0\xa6\x97\x0f\x80NS\x96\x05\xa2\xb8z\xc6\xf7\xe6\xfcu%Z\x00\xe8,=|\xf0\xb1Ez\xea\xc6\x9a\x0f\x00\x81\x0f\xbao\x91\xd0v`}]\x00\x04>\xe8\xb08\xf1\xe2\xfe\x827\xe3\x5c>\x00\x04>\xe8\xb0qGn\x03\x00\x04>h[\x5c-c\xb7\x85\x9b\xba\xafD\x0b\x00\x02\x1ftS\xd1\xe2m\x09|\x00t\x8e\xb2,l\xb48\xd1\xe22\xab\xbf\x94Z\x1d\x0f\xae\xdf\xfb\xf0R\xeb\x02\xd0\x15z\xf8\xd8t\xe3\x96\xc3^PhV\x00\x04>\xe8\x8ee\xcc\xac\xddW\xa2\x05\x00\x81\x0f:\xa0\x0ce\xfb\xd9\xe2\xa5X\xa6\xdet\xe6\x5c>\x00\x04>\xe8\x84e\x8625\xf9\x00\xe8\x0c\x936\xd8H\xb1\x14\xcb\x8b%\xdf\xcd\xdb\xd7\xef}x\xa2\xb5\x01X7=|l\xaaU\xf4\xc0\x8d53\x00]\xa0\x87\x8f\x8d\xb3\xa4R,\xb3(\xd1\x02\xc0\xda\xe9\xe1c\x13\xed\xaf(\xec\x05\xce\xe5\x03@\xe0\x835Xe\x08\x1b+\xd1\x02\x80\xc0\x07+T\x86\xaf\xbd\xf2b{\x95w\x99\xdd\xf4(\x02\x80\xc0\x07+2^\xc3}\x1a\xd6\x05`\xadL\xda`c\xac\xa8\x14\xcb,o]\xbf\xf7\xe1\xa9g\x01\x80u\xd0\xc3\xc7&\x19o\xe8}\x03 \xf0\x81\xc0\xb7\x02\x07\xb1\x87\x11\x00\x04>X\x862l\x85\xb0w\x7f\xcd\x871\xf6L\x00 \xf0\xc1\xb0\xc3\x96\xc0\x07\x80\xc0\x07\xcb\xb0\xf5\x95/\xed\x94\x17\xbb\x1d8\x94\xfb\xb1\xa7\x11\x00\x04>hY\x97\xca\xa2\x08|\x00\xac\x9c\xb2,\x0cZ\x5c\xe5\xe2{5w\xbf\xbe\xfd\xb5\x86w\x13~\xefeV\xff\x1c\xc1/^\xbf\xf7\xe1\xb9g\x07\x80U\xd1\xc3\xc7\xd0\x8dk\xecsUn\xef\x94\xdb\xa8\xdc\xe6\x09b\xe7e\x80\x1b\xc5\xdb8\xab\xb1\xbfB\xcc\x00\x08|\xd0\xa2T\xb8z\x96\xdd\x14D\x1e\x95\xdb\xa4\xdc^.rG\xf16\xf6\xca\x7f~\xb1\xdc\x8e\x13\xbb\x1eX_\x17\x00\x81\x0fZP\x86\xaa\xb0\x86\xed\xeb\xc3\xaca\xf8\xf5i\xb9=(\xc3\xd9\xfe2V\xbf\x08\xc3\xb5\xe56.\xff\xf9\xd9r{\x92\xdd\xf4 \xben\xec\x19\x02`U\xde\xd0\x04\x0c\xd8\xdd\xde\xbd\x8br;\x0a\xbdp\xab\xba\xf3\xd8cX\x84-\xce\xce\x0d\xdb\xee\x9dc;\xf2\x14\x01\xb0\x0az\xf8\x18\xa4\xb8\xaaE\x08Wah5\x0c\xdb\xee\xac2\xecM\x09\x7f\xb7\xc3\xbd\x0f\xe21\xdd\x8b=\x90\x00\xb0tz\xf8\x18\xb20l{\xd9\xa5\x03\x8a\xc73\x8e\xe7\xf09\x8f\x0f\x00\x81\x0f\x16\x0cV]>\xbe0\xdc\xfb\xd23\x05\xc0*\x18\xd2\x05\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x04>\x00X\x91<\xcf\xef\x95\xdb\x9e\x96\x00\x81\x0f\x80a\x86\xbd\x9d\xf2\xe2RK\x80\xc0\x07\xc00\xc3\xdeay\xf1\x8dr\xdb\xd2\x1a\xb0\x5c\xd6\xd2\x05`\xd5A\xef^y1)\xb7\x87\xb7?{\xf5\xea\xd5\xa9\x96\x01\x81\x0f\x80a\x84\xbd0\x84{Rn\xf7\xb5\x06\xac\x8e!]\x00V\x15\xf6n\x87p_\x0f{gZ\x07\x96K\x0f\x1f\x00\xcb\x0ez\x9f\x1a\xc2\x05\x04>\x00\x86\x13\xf6\xea\x0c\xe1\x9ej)X.C\xba\x00,+\xec\xcd\x1a\xc2\x05VL\x0f\x1f\x00m\x07\xbd\xa6C\xb8\xa7Z\x0d\x04>\x00\xfa\x13\xf6\xcc\xc2\x85\x0e2\xa4\x0b@[ao\x9c\xdd\xf4\xd65\x0a{j\xf0\xc1\xf2\xe9\xe1\x03`\xd1\xa0\x17\x86p\x8f\xca\xed@k\x80\xc0\x07\xc0\xf0\xc2^\x18\xc2\x9d\x94\xdb\xf6\x9c7\xa1\x06\x1f\xac\x80!]\x00\xe6\x0d{\xe3\xecf\x08w[k@\xb7\xe9\xe1\x03\xa0i\xd0ks\x08\xf7T\x8b\x82\xc0\x07@\xb7\xc2\xde\xa2C\xb8\xc0\x1a\x18\xd2\x05\xa0n\xd8\x1bg\xed\x0f\xe1\x9ejYX>=|\x00T\x05=\xb3pA\xe0\x03`\xc0ao\x94\xdd\x14R^\xca\x10\xae\x1a|\xb0\x1a\x86t\x01\x98\x15\xf6\xf6\xcb\x8b\xf3\xcc\xf9z \xf0\x010\xc8\xb0\x17\x86p?(\xb7\xad%\xde\x8d\x1a|\xb0\x22\x86t\x01\xb8\x1b\xf4F\xd9\x12\x87p\x81\xf5\xd0\xc3\x07\xc0m\xd8[\xf5\x10\xee\xa9V\x07\x81\x0f\x80\xd5\x85\xbdU\x0c\xe1\x02kbH\x17`\xb3\x83\xde([\xdf\x10\xee\xa9g\x00VC\x0f\x1f\xc0\xe6\x86=\xb3pA\xe0\x03`\xc0ao\xedC\xb8j\xf0\xc1\xea\x18\xd2\x05\xd8\xac\xa0\x17V\xcd\x08AK\xaf\x1el\x10=|\x00\x9b\x13\xf6\xf6\xca\x8b\xcb\x8e\x84=5\xf8@\xe0\x03\xa0\xe5\xb0W\x94\x17\xcf3\xb3pa#\x19\xd2\x05\x18v\xd0\x0bC\xb8a\x16\xeen\xc7\x0e\xed\xd4\xb3\x03\x02\x1f\x00\xed\x18\xc5p\x15f\xe3\xee\xc4\x9f\xedj\x16\x10\xf8\x00\x18\x88W\xaf^\x9d\xc7\xb0\xf7)\xf1\x9c\xbe\xe0\xf5\xcbU\x04\xc2S\xcf\x0e\x08|\x00,?\x0c\x9e\xa6\xc2\xd7k\x81p/\xd33\x08\x02\x1f\x00\xc3\x0d\x84e\xf8\xbb\x5c\xd2m\x03+`\x96.\x00Iq\x86\xef}-\x01\x02\x1f\x00\xc3\x0c{a\x96\xefa\xcb7\xab\x06\x1f\x08|\x00tHX\x82-U\xbb\xefI\xb9]k&\x10\xf8\x00\xe8\xa18i\xe3 \xb1\xcb\xc5\xabW\xaf\x8a\xf2r\xdc\xf0\xa6O\xb5.\x08|\x00tCQq\xfdGC\xbde\xe8\x0b\x85\x9d\x9fj.\x10\xf8\x00\xe8\x91<\xcf\xc7Y\xba\x0c\xcb\xb3\xbb3m\xcb\x7f\x87\xf0wQ\xf3\xe6O\xb50\xac\x96\xb2,\x007\x01g\x94\xdd\xacJ\xb1Wna\xa2BrU\x8a2\xe0\xe4\x03n\x8b\xf0\xf8\x8b\x8a\xdd\xa6M\xe4\xd8\xcfn\x8a<[\xaf\x17\x04>\x80N\x84\x9a\x9d\xec\xe3\x82\xc2{]\x0c)\xf1\x1c\xba\xe7\xf1\xbfg1L]\x96\xdbi\x5cAcYB\x98K\x95ayR\xde\xff\xe5\x94\x10|\x19{\x06?H\xdd\xb8\x1a| \xf0\x01,;\xe4\x85@\xb2\x9f\xf5\xa3\xae\xdc\xe8\xce\xbfw\xb3;\xbd\x8d\xe5c\xb9\x0d\x81'm\x06\xc0\xd8\xd3\x99*\xc3r\x95\xdd\xcc\xdc\x9d\x15\xe6N\xca\xdb\x08\xe7\xf3=\xf2\x8a\x03\x81\x0f`U!\xef^\x0cx!\xc4l\xf7\xec\xf0G\x15\xd7\x7f?\x04\x96\x8f3\x04\xb1\xd3\x10\x00\xe3$\x8ayU\x95a)\xca\xdb\x7f\x99\xba\x81p>_\x0c\xd7\xd3\x86\xc3\xd5\xe0\x8350i\x03\x18l\xd0\x8b+D\x5c\x96\xdb\xfb=\x0c{\xc1^\x83}C\x8fe(\xa1\xf2A\xf9\xb8_\x96\xdb\xa4\xdc\xf6\x1b\xb6Y\xb8\xbf\x87\x89]\xce\xca07\xa9ys\xe1\xbe\xa7\xd5\xe7{\xe9\xd5\x09\x02\x1f@\x9bA\xefq\xd6\xef\x09\x04\xa39\x7fo\xebN\xf8\x0b\xe7\xd5\x15q\xa8\xb6\xcaQ\xc5\xf5E\xdd\x03\x88\xbd\x80\xd3\x02\xe7\xb9W)\xac\x9e!]`\x88\x0e\x17\x08z\xd7\xd9\xc7\x93#\xc2\xf6r\x8d!\xe5~K\xb7\x11\x82\xef\xe32\xf4=\x0b\xa1n\xda\xa4\x898\xd9\x22\xd5\x0bz\xdct\xb2E\xd8\xbf\xbc\xdd'\xf1\xfeo\xe9\xe1\x03\x81\x0f`1\xa1g\xa9\x0c\x19\xe1\x1c\xb6\x83\x9a\xbf\x12\xce);\x8d\xdby\xd5\xf9i+\xf6V\xf6q\x89\x98\xbdx\xb9H\x8fe\x18\xae}\x18\xcf\xf7+n\x87g\xe3y\x8eG\x15!\xf8p\xce\xe7\xa3\x88C\xc5\xb7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x15G\x89\xc0\x17\xc2\xce\xed\xcc\xd6\x93.?\x88;=j\xdf?\xce84\xbbwg\x9b\xa7\x170\xfc\xce\xfbq\xe8\xbb\xa8\x11$\x8f\x16\x0c\xc2ah\xf72\xde\x87\x1e>\x10\xf8\x00Z\x09J\xe7e\x98\x09\xab>\xdc\x1d\xa2<.\xb7I\xdfk\xc0\xc5\xfaw\x93\xb8\xdd\xad'8\xce\x9aOL\x09\xc1\xef\x1f\x96\xdb\xe7\x13\xfb\x5c\xc5\xf5r\x179\xe6\x97q\x02\xc9\xf3%\xd7\x0f\x04f0i\x03\x18\xaa\xd0\xcb\x17\x86\x22\xc39d\x9f-\x83\xc6x\x88\x05\x7fC\x80*\xb7\xd0\x03\x17\x82\xdf\x83r{7\xab\xbf\xc4YP\xd5\xe3v\xd8\xd2q\x9e\xc6c\x03\x04>\x80\xd6\x82P\xe8\xcd\xbb\x17z\xa7:v^\xde2\x1f\xf3\xe5k\xe1/\x14@\xbeJ\xfcJ\x08\x86\x7f)q\xfdY\x9b\xc3\xde\xe1\xd8\xbc2A\xe0\x03\xa0\xdd\xf0wXn\xa3\xf2\xbfo\x97\xdb\xb3)\xbb}\xae\xe2f\xc6Z\x12\x04>\x00\xfa\x11\xfe\xc2\xea\x1b\xe1\x1c\xba\xdb^\xbf0\xd4\x1df'\xa7\xce\xdd{:m\xbd\x5c@\xe0\x03\xa0\xdb\xc1\xef\xa3^\xbf\xec\xa6\xa0\xf3Nb\xd7\x10\x08\x0b-\x06\x02\x1f\x00\xfd\xb5\xf0z\xb9\x80\xc0\x07@G\xc52.\xa9\xa2\xd4\x17&W\x80\xc0\x07@\xbfU\x85\xb9CM\x04\x02\x1f\x00=\x15\xd7\xcb\xddM\xec\xf2l\x88\xb5\x0a\x01\x81\x0f`S\xc2^X/\xb7\xa8\xd8M\xef\x1e\x08|\x00\xf4X\x08s\xa9uw\x9f(\xc3\x02\x02\x1f\x00=\x95\xe7\xf9(K\xf7\xde\x85\xd58L\xd4\x00\x81\x0f`)A\xe4\xa8\xdc\xce\xe3p#\xcb\xa3\x0c\x0b\x08|\x00k\x09{{\xe5\xc5\xa3r\xdb.\xb7\xcbX.\x84\xe5\xb4\xf3\xc3\xc4.a\xbd\xdc\xc9\x12\xeew\x22\xcc\x83\xc0\x07pw\x081\xf4>}#\xce\x22ey\xed<M\xb1\x8c\xb0\x97\xdd\xd4\xfa\x0ba\xfeT\x98\x07\x81\x0f\xd8@e\x008\x8ca\xe0u\xef\xc7\xb0@;\xed<\x9e\xd1\xce\xb7\x8e\xdb.\xc3r'\xec\xdd\x12\xfa@\xe0\x0360\x84T\x95\x07\xd9\xd3J\xad\xb5s\xaaw\xaf\xf5\xf5r\xa7\x84\xbd[[B\x1f\x08|\xc0f9\xcc\xd2\x13\x08\xc6\x9ah%\xed|\xb4\x842,\xa9@'\xf4\x81\xc0\x07l\x82\xd8\xeb\x94*\x0frl\xa5\x87V\xdayT^<N\xecrU\xb6s\xb1\x84\xbb\xde+\xb7\x0b\xa1\x0f\x04>`\xb3\xa5z\x9d\xae3+=\xb4eR\xe3yh],\xed\x22\xf4\x81\xc0\x07l\xb8q\xe2\xba#\xb5\xe0\x16\x17\xcb\xb0\xa4\xd6\xcb\x0deXN\x96u\xff\x0dC\x9f\x92- \xf0\x01\x03\x0b\x22!\xec\xcdZ\xda+\xf4\xeeY\xe9\xa1\x1d\x93\x8a\xeb\x97\xde\x8b*\xf4\x81\xc0\x07l\xae\xfd\xc4uz\xf7\xda\x09\xd5U\xeb\xe5>-\xdb\xf9|\x15\xc7\x12\x9f\xcfq\x0c\xf3\xb3\x84\x92-'\x9e9\x10\xf8\x80a\x04\x91Q6{\xb5\x07\xbd{\xed\xb4qU\xb9\x9b\xd6\xcb\xb0\xd4\x08}!\x5c\xeeU\x84\xbe]\xb5\x17A\xe0\x03\x86!\xd5\xbbw\xa2w\xaf\x15\x9d\x5c/7\x86\xbe\xaaa\xe4\x83\xd8;\x09\x08|\xc0@\x03\x9f\xde\xbd\x05\xc5\x19\xaf\x07\x89]B\x19\x96\xb5\xb5s\x5c\xab\xf7I\xc5n_+\x1f\xc7\xbeg\x13\x04>\xa0\x9fa$\x0c5\xce\x9a5z\xb1\xaas\xca\x06\xae*\xcc\x8d\xd7}\x80\xb1\xee\xdf\xb3\x8a\xdd&q\xf8\x1f\x10\xf8\x80\x9e\xd9K}\xc0k\x9e\x85\x03\xf5~\x96.\xc3\xf2\xacC\xc5\xacC\xf0\xac\x9a\xb9{b\xe6.\x08|@\xff$\xcf\xdf\xd3<\x0b\x85\xbd\xaa\xf5r\x83\xce\x9c\x1b\xd7`\xe6\xaea~\x10\xf8\x80\x9e\xd9\x9b\xf1\xf3\x8b%\xac\xe5\xbai\xaa\xca\xb0<\xe9Z\x1b\xc7!\xfc\xa2b\xb7\x83X\xb7\x11\x10\xf8\x80\xae\x8b=P\xb3\x02\xc9\xa9\x16Z\xa8mGY\xba\xf7\xae\xb3\xe5n\xe2\x04\x92\xaa\xf3\xf9\x8e,\xbf\x06\x02\x1f\xd0\x0f\xa9\x0fl\x81o1E\x96.\xc3r\xd8\xf1r7\xe3,=\xb4\x1b\x1e\xdb\xc4\xd3\x0c\x02\x1f\xd0}{\x02_\xfb\xe2z\xb9\xa92,g\xb1\x14Jg\xdd9\x9f/\xc59\x9e \xf0\x01=0\xab\x87\xefB\xb1\xe5\x85T\x0d\xd5\x16}x\x10\xe5k \x04\xbaiC\xbba&\xef\x17c)\x17@\xe0\x03:nVy\x8dKM3\x9f8\x99a;\xb1\xcbq\x87\xca\xb0\xd4\x11\xceC\xbc;\xb4\x1b&\x9a\xec\xa8\xcf\x08\x02\x1f\xd0\x1f\xb3\xea\xc3\xf90\x9f/\xecU\x95aY\xf9z\xb9\x8b\x8a\xb3\x88\xc3c\xd2\xab\x07\x02\x1f\xd0\xd3p\x92\x09|\xad\x0a\xbda\xa9\x89\x1aG},u\x13B\x9e^=X\xae74\x01\xb0Do\x95\xdb(n{\xf12\x94iq\xfe^\xf3\x00\x1d\xda\xeeqb\x97+\xbdc\x80\xc0\x07\xacT\x9c\x94q\xaa%Z3\xa9\xb8\xfeP\x13\x01\xb3\x18\xd2\x05\xe8\xb8X\x86%\xb5^\xeeY\x9c\xf1\x0a \xf0\x01\xf4\xd4\xa4\xe2z\xbd{\x80\xc0\x07\xd0Wy\x9eW\xad\x97{l\xb2\x03 \xf0\x01\xf47\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01\xf4X\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa7\xf2<\x0f\xcb\xd2=J\xec\x12\xca\xb0\x1ci)@\xe0\x03\xe8\xaf\xaa07\xd6D\x80\xc0\x07\xd0Sy\x9e\xefg\xd5eXN\xb5\x14 \xf0\x01\xf4\x97\xde=@\xe0\x03\x18\xaa<\xcf\x8b,]\x86\xe5I\x1f\xd7\xcb\x05\x04>\x00\xb2\xef\xaf\x97\x9b*\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00=Vd\xe92,\x87\xca\xb0\x00\x02\x1f@O\xc5\xf5r\x0f\x12\xbb\x5c\x94ao\xa2\xa5\x00\x81\x0f\xa0\xbf\x8a\x8a\xeb\xad\xa8\x01\x08|\x00}\x95\xe7\xf98K\x97a9V\x86\x05\x10\xf8\x00\xfa\x1b\xf6\xc2z\xb9\xa9\x89\x18a\xa2F\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pmj\xa2\xc6\x912,\x80\xc0\x07\xd0S\xb1\x0c\xcb\xe3\xc4.W\x992,Um\xb8\x13{I\x81\x8474\x01\xc0\xdaT\x859eX>\x19\xee\xf6\xca\x8b\x9d\xb8\x85\xb0|{\xde\xe3[\xe5v\xaa\x85@\xe0\x03\xe8bxy\x98\xd8%\xac\x97{\xa2\xa5>\xe1\xf9\x8c\x9f\xef\x08|\x90fH\x17`=&\x15\xd7+\xc32%\x04\xcf\xf8\xb9!]\x10\xf8\x00\xba%\xcf\xf3\x10\xe6R\xeb\xe5\x862,\xe7Z\xeaS.g\xfc|O\xd3\x80\xc0\x07\xd0\xa5\xb0\x17z\xa3\x8a\xc4.\xa1\x0c\x8b\xde\xbdf\x81\x0f\xa8\xe0\x1c>`\x15!g/\xbb9\xc9>l\xb7\xff\x0e=\x5coo\xe0yj!\xec\xa5\xca\xb0\x14&j4\x0e|\xbb\x9a\x06\x04>`\xfdNf\x84\x9c\xbdx\xdd\xa6\x04\xdf0\xb9\xe0Qb\x97\xab2\xec)\xc3\xd2<\xf0\x01\x15\x0c\xe9\x02\xab\x0a|\xd3\xecoX;T\x85\xb9\xb1\x97\x0a \xf0\x01C\x0b|\xf7c\xaf\xd7\xe0\x95\x8f3\x84\xdb\xd4\xd0\xe3\x99\xf5r+]&\xdawO\xf3\x80\xc0\x07\xacQ<O\xefz\xc6\xd5\xe3\x0di\x06\xbd{\x8b\xbf\x8e.\xb5\x02\x08|@\xb7M65\xe8\xe4y^d\xe92,O\x85\x19@\xe0\x03\x86`V\x0f\xd7V\x19\x88\x06\x1b\xfab\x19\x96T\x99\x95\xd0\xf3Yxy\x00\x02\x1f\xd0{\xb1\x07k\xd6J\x09C\x0e<!\xe8\xa6\xca\xb0X/\x17\x10\xf8\x80\xc1\x85\x9fi\xee\x0f\xb1\x97/N$8H\xecrQ\x86\xbd\x89\x97E\xa3/\x0e\xf9\x8c\xedT\xeb\x80\xc0\x07t\xe3\xc3:L\xde\xb8\x9aqu1\xc0\x87\x5c\xf5\x98\xac\xa8\x01\x08|\xc0 \xcd\x0aA\x83\xea\xe5\x8b\x8f%U\x86\xe5\x99^)@\xe0\x03\x06)\x0ea\xce\xec\xe5\x8b\x93\x1c\xfa\x1e\xf6\xac\x97\x0b\x08|\xc0\xc6\x9b\x15\x86\xee\x0f$\x08\x1df\xe92,G\xca\xb0\x00\x02\x1f0h\x15\xbd|\x8f\xf3<\x1f\xf5\xf5\xb1\xc5c\x7f\x9c\xd8%<n\xeb\xe5\x02\x02\x1f\xb0\x11R=y\x93\x1e?\xae\xaa0W(\xc3\x02\x08|\xc0F\x883vg\xd5\xe5\xdb\xcd\xf3\xbcwC\xbb\xb1\x0c\xcb\xc3\xc4.g\xca\xb0\x00\x02\x1f\xb0i\xc6\x89\xeb\x8a\x1e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0f\x89\x13\x17\x9e\xcc\xb8:\xacN1\xe9\xcbc\x89=\x92\xdb\x89]\x8e\xcb\xc7{\xeeY\x07\x04>`\x13C_Q^\x5c\xcc\xb8\xba\x17C\xbb\xca\xb0\x00\x02\x1f@\xb5\xfd\x18\x8a\xa6\xf9Z\x19\xa8v:~\xfc!\xec\xa5\xd6\xcb=2Q\x03\x10\xf8\x80\x8d\x16\x87v\xc7\x89]&]=\xf6x\x9e\xe1\xa3\xc4.W\xb1\x17\x13@\xe0\x036>\xf4\x85Y\xbbOg\x5c\xdd\xe5s\xdf\xaa\xc2\xe8\xd8\xb3\x0b\x08|\x00\x1f\x87\xbep\x9e\xdb\xf1\x9d\x1f\x85a\xdew\xca\x9fw24\xe5y\x1e\x86\xa2S\xeb\xe5\x9eY/\x17\xe8\x8274\x01\xd0\xb1\xd07\x8e\x93 F\xe56\xee\xf8\xcc\xd6\xaa2,c\xcf( \xf0\x01L\x0f}\xfb]?\xc62\x94\x16Yz\xbd\xdc\xa7\xd6\xcb\x05\xba\xc2\x90.@\xf3\xb0\x17z SeV\xc2Pt\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pn\xaa\x0c\xcb\xa12,\x80\xc0\x07\xd0Sq\xbd\xdc\x83\xc4.\x17\xd6\xcb\x05\x04>\x80~+*\xae\xb7\xa2\x06 \xf0\x01\xf4U\x9e\xe7\xe3,]\x86\xe5\x992,\x80\xc0\x07\xd0\xdf\xb0W\xb5^n\xa0w\x0f\x10\xf8\x00z,\x84\xb9T\x19\x96'\xca\xb0\x00\x02\x1f@O\xc5\xf5rS\xbdwWYu\x11f\x00\x81\x0f\xa0\xc3\xaa\xca\xb0\x14\xca\xb0\x00\x02\x1f@O\xc52,\x0f\x13\xbb\x9c)\xc3\x02\x08|\x00\xfdV5T[h\x22@\xe0\x03\xe8\xa9X\x86e;\xb1\xcb\xb12,\x80\xc0\x07\xd0\xbd\x107)\xb7\xf3r\xdb\xa9\xd8/\x94aI\xf5\xee\x85\xf5r\x95a\x01\x04>\x80\x8e\x85\xbdqv\xb3,Z\xe8\xb5;-\xff\x9f\x0alE\x96\x9e\xa8qd\xa2\x06 \xf0\x01t+\xec\x85\x1e\xbd\xf7\xef\xfc(\x84\xb9\xaf\x95??\x8deW\xee\xee\x1b\xfe\xff(qsWe\xd8+\xb4*\xd0\x17oh\x02`\x03\xc2^\x18\x9e=\x9dquX*\xedE\xb9\xcf\x93\xec\xe3^\xbbI\xc5M\x1e\xcey\x1c\xfb\xf1\xb6/\xcb\xed\xf5\xde\xc1\xf3:?s\xce \xf0\x01L\x17B\xd2V\xc5>\x8fC\x90+C\xd9?\xc9\xd2\xeb\xe5\x862,'\xf3\x1cD\xf8\xbdX\xe6e\xda\xf1\xec\xd6\x0c\x8dS\x8f\xe9\xb5\xff\xbf\x8ca\xb1\xeag\x97V\x07\x01\x81\x0f`(B@\x1b\xd5\x08}\xe1\xfa\x7fP\xb1\xcfx\x91\x03)\x03\xd6\xed\x84\x91pL\xdb-=\xbeia\xf1\xe1\x9c\x012\xac\x1a\xf2z\x08\xac\xd5\xfb\x18~\xe6\xbcF\x10\xf8\x00\xd6\x22\x9co\x17f\xe7f7\xb3n\x1f.pSO\xdb\xe8\x11\x0b\xb7\x11{\xfaN\xb2\x9a={+t?\xfb\xf4\x9a\xc1\xb5\x8fqJ\x80<\x9b\xb2\xdbi\x0c\x87'^\x9d \xf0\x01\xb4\x19\xfaBP\xdb\x8fAk2%\xd4T\xf9\x7f\xe5\xf6\x9f[<\x9e\xd0\x13\xb6\x17\x83\xe8\xc1\x80\x9b~ZX\xac*y\x03\xb4\xcc,]`\xd3\x82\xdfi\xb9\x8d\xca\x7f\x86I\x1a\xd7\x0d~\xf5\x07\xcb\xed\x1f\xc7\x1a~\xf7Z<\x9eqy\xf1\xee\x06=\x05\xa1\xc7o\xcf\xd0/\x08|\x00\xab\x08~Evs^\xdfq\xc3_\xbd\xad\xe1\xb7\xd3\xe2\xb1\x84\xde\xaew\x1a\x06\xd0>\x0a+\x93\x08{ \xf0\x01\xac4\xf4\xbd\x8c=l\xffa\x8e\xd0\xf7\x8d2\xf4\x15m\xf5\xf6\x95\xc71)/\xf6\x06\x1c\xfa\x9e\xc6\xb6\x06\x04>\x80\xd5\x8a\xabo\xfc\x959\x7f=\x94r\x09\x130\x8a\x96B_\x98\xf9\x1az\x0e/\x06\xd6\xcc\xef\x94\x8f\xcd2t \xf0\x01\xac%\xec\x85\xde\xb9E\xc3Z(\xe5\xd2Z\x98\x89\x93K\xf6\xb2\xe9\xb3[\xfb&\xf4V\xbe\x1d{/\x01\x81\x0f`-BPK\xcd\xd6}\x96\xdd\xd4\xa5\xab\xd2jy\x918\xd4\x1cB\xdfq\x8f\xdb6\x84\xbd=\xa5W@\xe0\x03X\x9b\xb8^n\xaag.\x04\xbdq\x9c\xd1\xfbNE\xf0[J\x89\x91\x1e\xcf\xe0\xbd\x88a\xef\xdc+\x0d\x04>\x80u\x0a!-\xb5\xf2Fq;\x9b4\x0cI\xc6\xe0\xf7V\xf6\xe9^\xb7\x8be\x06\x9b\x1e\xce\xe0\x15\xf6@\xe0\x03X\xbfX|9\xb5\xe2\xc6\xd9\xb4\xf3\xceb\x0d\xbfq\xf9\xcf\x07\xd9\xc7u\xfc\x96^@\xb8G3x\x9fej\xec\x81\xc0\x07\xd0\x11U!\xad\xa8\x08`\x97\xa1\x8e_\xb9\xdd[\xd5\x84\x84\x1e\xcc\xe0\x0d5\xf6\xf6\x85=\x10\xf8\x00\xd6.\x96a\xd9\xae\x08.\xa7]<\xf6\x0e\xcf\xe0}W\x8d=\xe86k\xe9\x02\x9b\x14\xf6\xaa\xd6p\x0dC\xa6\x87\x1d<\xeep\xcc\xb7=g\xa1\xa7\xaf(\xb7\xaf\x96\xdb\xdf\xee\xc0\xe1\xbd\xa3\xec\x0a\x08|\x00]\x12\xc2\x5cj\xa2\xc6QG\x87$\xc3P\xeen\xc7\x8e)\x84\xe3\xfd\xae\xf6\x86\x02\x02\x1f\xb0\x81b\x19\x96\xc7\x89]\xae\xe2\xfa\xba]\xb4\xd3\xb1\xe3\xb9\xad\xb1g&.\xf4\x84s\xf8\x80M1\xa9\xb8\xbe\xcbK\x7fmu\xe8X\xc2\xa4\x91\x1da\x0f\xfaE\x0f\x1f0x\xb1\x0cKjH\xf4\xac\xab+B\xc4c\xefR\xd8Sv\x05zH\x0f\x1f\xb0\x09&\x15\xd7w\xb9w\xef^G\x8e\xe3X\xd8\x83\xfe\xd2\xc3\x07\x0cZ\x9e\xe7U\xeb\xe5>\xed\xf8\xf0d\x08X\xcfb\xf0\x0b\xdb\xf6:\xc2\x9e\xb2+ \xf0\x01t5\xec\x85\x80T$v\xb9\xae\xb8~\xed\xe2,\xd8\xd3\x8a\xc7\xf8\xbd%\x1e\x82\xb2+0\x00\x86t\x81!\xab\xbd^n\x8f-s\x06\xaf\xb0\x07\x03\xa1\x87\x0f\x18\xa4<\xcfC\x10:H\xec\x12\xca\xb0\x1c\x0d\xe0\xa1.#\xf0)\xbb\x02\x03\xa3\x87\x0f\x18\xaa\xaa07\x1e\xc8\xe3l;\xf0\x09{ \xf0\x01t_\x9e\xe7\xfbY\xba\x0c\xcb\xb3\x01\xad\x10\xd1f\xe0\x0beWF\xc2\x1e\x08|\x00]\x0f{U\xeb\xe5\x06\x87\x03z\xc8m\xcd\xdaUc\x0f\x04>\x80\xde\xa8*\xc3\xf2\xa4\x0c5\x97\x03\x09\xb7{-\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x87\x004\xca\xd2\xbdw\xe1\xfc\xb4\xa3\x01=\xe46\x86s\x9f\xaa\xb1\x07\xc3g\x96.0$E\x96.\xc3r8\xb0^\xac\xd1\x82\xbf\xaf\xec\x0al\x08=|\xc0 \xc4\xe1\xcdT\x19\x96\xb3\x01\x86\x9by{\xf8\xae\x85=\xd8,z\xf8\x80\xa1\xa8\x1a\xaa-\x06\xf8\x98w\xe7\x0c{\xca\xae\xc0\x86\xd1\xc3\x07\xf4^\x9e\xe7\xe3,=[\xf5x@eXn\x1f\xf3<\xbd{W\xc2\x1el&=|@\xdf\x83OU\x19\x96\xce\xaf\x97;\xa7\xa6\x81O\xd9\x15\xd8`z\xf8\x80\xbe\x0b\xb3rS\x135\x8e\x86R\x86\xe55\xa3\x06\xfb>\x13\xf6`\xb3\xe9\xe1\x03z+\x96ay\x9c\xd8%\xac\x97[\x0c\xf4\xe1\xef\xd5\xdc\xefX\xd9\x15@\x0f\x1f\xd0g\x93\x8a\xeb\x0f\x07\xfc\xd8\xeb\x0c\xe9>\x11\xf6\x80@\x0f\x1f\xd0K\xb1\x0cKj\x96j(\xc3r2\xd0\xc7>\xca\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07\xf4^U\x98\xd9\xd4\xde\xbd0Ie<\xd4\xb0\x0b\x08|\xc0\x86\xc8\xf3\xbcj\xbd\xdc\xa7\x03/=\xb2\x93\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0oa/\x94a)\x12\xbb\x0c\xb5\x0c\xcb]{S~v!\xec\x01\xb3\xe8\xe1\x03\xfa&\x84\xb9\xd4\xf9k\xc5\x06\x94\x1f\x19\xcd\x08{\xca\xae\x00S\xe9\xe1\x03z#\xae.\xf1(\xb1K(\xc3r4\xf06\x08=\x9cw\x87\xb3\x8f\x85=\xa0\x8a\x1e>\xa0O\xaa\xc2\xdcx\x03\xda\xe0\xee\xf9{j\xec\x01\xb5\xe8\xe1\x03z!\xcf\xf3\xfd\xac\xba\x0c\xcb\xe9\x064\xc5^\xbc|W\xd8\x03\xea\xd2\xc3\x07\xf4\x85\xde\xbd\x1b\xa1\x87O\x8d=@\xe0\x03\x86%\xcf\xf3\x22K\x97ay2\xd0\xf5r\xa79\xdc\xa0\xc7\x0a\xb4\xc4\x90.\xd0\xf5\xb07\xca\xd2E\x94C\x19\x96\xa3Mi\x0fa\x0f\x10\xf8\x80!*\xb2t\x19\x96C3T\x01\x04>\xa0\xa7\xe2z\xb9\x07\x89].\x9c\xcb\x06 \xf0\x01\xfdVT\x5c\x7f\xa8\x89\x00\x04>\xa0\xa7\xf2<\x1fg\xe92,\xc7\x1bR\x86\x05@\xe0\x03\x06\x19\xf6\xc2j\x12\xa9\x89\x18\x9b\xb0^.\x80\xc0\x07\x0cZ\x18\xaaMM\xd482[\x15@\xe0\x03z*\x96ay\x9c\xd8\xe5*\xdb\xa02,\x00\x02\x1f0DUaN\x19\x16\x00\x81\x0f\xe8\xabX\x86\xe5ab\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xfe\x9aT\x5c\xaf\x0c\x0b\x80\xc0\x07\xf4U\x9e\xe7!\xcc\xa5\xd6\xcb\x0deX\xce\xb5\x14\x80\xc0\x07\xf43\xec\x852,Eb\x97P\x86E\xef\x1e\x80\xc0\x07\xf4X\x08{\xa92,\x85\x89\x1a\x00\x02\x1f\xd0Sy\x9e\xef\x94\x17\x8f\x12\xbb\x5c\x95aO\x19\x16\x00\x81\x0f\xe8\xb1\xaa07\xd6D\x00\x02\x1f\xd0Sy\x9e\xefg\xe9\xf5r\xcf\xac\x97\x0b \xf0\x01\xfd\xa6w\x0f@\xe0\x03\x86*\xcf\xf3\x22K\x97ayb\xbd\x5c\x00\x81\x0f\xe8o\xd8\x1be\xe92+\xa1\x0c\x8b\x89\x1a\x00\x02\x1f\xd0cE\x96.\xc3b\xbd\x5c\x00\x81\x0f\xe8\xab\xb8^\xeeAb\x97\x8b2\xecM\xb4\x14\x80\xc0\x07\xf4WQq\xbd\x155\x00\x04>\xa0\xaf\xf2<\x1fg\xe92,\xcf\x94a\x01\x10\xf8\x80\xfe\x86=\xeb\xe5\x02\x08|\xc0\xc0\x850\x97*\xc3r\xa4\x0c\x0b\x80\xc0\x07\xf4T,\xc3\xf28\xb1\xcbU\xa6\x0c\x0b\x80\xc0\x07\xf4ZU\x98+\x94a\x01\x10\xf8\x80\x9e\x8aeX\x1e&v9S\x86\x05@\xe0\x03\xfa\xad\xaawo\x90\x135\xca\xa0\xbb_n\xafjn\xe3.\x04\xf3\x06\xc7kr\x0d}\xfd\xbb\x1c\x97\xdb\xf9k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x98\xf3\xcd5\x04\x83\xed\xc4.\xc7\xaf^\xbd:\x1f\xe2c/\x1f\xd7IyqVs\xf7\xa2\x03\x87\x5c\xf7\x18Bal\xe7[\xd2\xc7\xf7\xa3Iy\xf1\xfe\x94\xf7\xa40\x02\xf1<\xae\xef-\xf0\x014|sU\x86\xa5~\x88\xba\xbf\xce^\xbe\xd8\xbb\xb1[sw\xbd{\xf4\xf1\xfd(\xfc-\x1eT\xec\xf6x\xc8=}\x02\x1f\xb0\xcc\xb0\x93Z/\xf7h\xe8\x135b\x11\xe9g-\x87\xc3e\x98\xd4\xdcOal\xfa\xea\xb0\x07\x7f\x87\x02\x1f\xd0\xbbo\xd3\xa3\xf2\xe2Qb\x97\xab28\x14\x1b\xd2\x1cu?h\xd6\xd2\xcb\x17\xef\xf3~\x8d]\x15\xc6\xa6\xaf\xefG{\x15_>\xef\xda\x1dj;\x08|\xc02L*\xae\x1foJC\xc4b\xd2Ok\xee\xbe\x8e\x10\x5c\xf7>\x15\xc6\x06\x81\x0f\xe0\xfb\xdf\xa6\xf7+\xbe%\x9fm\xe0\xb0`\x08U\xd75\xf6[i/_\x83\xde\xbdM\xea\x91\x05\x81\x0f\xa0\x86\xaa\x19\x9c\xe3Mk\x90x\xaeb\xdd\x99\xadE\x9c\xf0\xb2\xaa Z\x87\xa1\x5c\xfa\xec\xb2\xc1\xbe\xd7Cm\x04\x81\x0fhM\x9c\x09\x97\xea1z\xba\xc1\xc3\x82!\xf0]\xd5\xd8\xef\xfe*\x02V,\x99S\xa7w\xef,\x96\x98\x81\xbe~\xe1\x0a\xef9g\x0d\xfeN\x05>\x80D\x80\xb8W\x11T\xc27\xe7b\x83?t^6x\xfc\x87\xcb\xec\xe5\xabQ2\xe7\xae\xb1W7\x030\xce\xaa{\xef.\x04>\x80z\xdf\x8cS3\xe1\x0e7}\xbd\xdc\xb8\x84\xdcE\x8d]\xb7\xb2\xe5\xf6\xf2\x1df\xf5f->5Q\x83\x81\xfc\xed\x85\xd7\xf1^6\xbb\xa7\xef8\x5c?\xe4\xf7\xa87\xbc\x0c\x80E\xc5\xb2\x07\xa9\xa2\xa6\x17\xd6\xcb\xfdD\xd8z^g\xbf\xb2][\xafUX\xa3'\xf6\xd6F\xf7\xc82\xc8\xd0\x17V\xf5\x09K\x08\xee\x94\x97a\x1b\x95\xdbi\xb9\x9do\xc2\x97Q\x81\x0fhCU0p\xd2\xff\xc7\x1f:\xa7\xe5\x07N\xe8e\xa8\xaa\xf7u\xdb\xcb\xd7v\xe8\xaa\xdb\xbb\xb7\xf1=\xb2\x0c:\xf8\x9do\xda\xe36\xa4\x0b,$\x96\xf6H\x85\x17\xab3|\xda\xb8n8k\xf3\x5c\xbe\x06\xbd{gzdA\xe0\x03\xb8\x1b \x8a\xc4.Vg\x98\x22\x9eOt\x5cc\xd7\xb6\xcf\xe5\xab:\xcf\xf2V\xe1Y\x02\x81\x0f\xe0VUi\x0f\xab3\xa4CU\x9d\x9a_\xad\xf4\xf2\xc5\xe5\xee\x0ej\xecz\xacG\x16\x04>\x80\xbb\x01\xe2qb\x97Ps\xeeHKM\x17\x83p\x9d\xf6i\xab\x97\xaf\xa8\xb1\x8f\x1eY\x10\xf8\x00>\xa1*\xac\x14N\xfa\xaf\xd5\x86u{\xf9F\x0b\x86\xf3:\xbd{G\x9e3\x10\xf8\x00n\x03\xc4^y\xf10\xb1\x8b\x93\xfek\x88\xe1\xaaN\x8fZ\xe8\xe5+\x16\xb8\xab:\xbfk\xbd\x5cX\xcf\xfb\xe9(\xbc\xa7\xc6\xf7\xd5\xa5Q\x96\x05\x98Ge\xef\x9e&\xaa\x1d\xfa&5\x96\xa4\x0b\x0e\xc2~M\xcf\x89\xacQ#\xf1\xd6\xd8\xb3\xb1\x96\x0f\xfbp~fx\x8enk\xc3\x85\xff\xa7f\xbd\x87\x1e\xe1\xf3;\xdb\xa9\xf3d{\xf5\x5c\xef\xc7\xe7{4\xedy.\xf7\x99\xf5<\x9f,\xda\xfb.\xf0\x01M\xdf\xb4B0\xd8N\xec\xe2\xa4\xff\xe6B/\xdf\x075\xf6+\xe6\x08fu\xc2\xb7\xd29\xab\xfd\x1b\x1a\xc5\xe7q\xbf\xe2oi\x9a\xad\x18\x14v\xef\xdc^\xa8\xeb8i\xa3W=~Ax^s\xf7\xb7\x97\xb5\xcer\x0cG\x97Y\xbdY\xe5\x95\xc7Q\xf7q\x95\xb7\x93/\xe1\xb1\xec\xc5\xe7\xfb`\x81\xe7\xf9\xfd\xf2vn\x97~\x9b+\xfc\x19\xd2\x05\x9a\xbe\x09\x1fU\xf4>8\xe9\xbf\xa1\xf8aUgq\xf7\x83&\xe7\xf2\xc5\x0f\x9a\xdd\x1a\xbbz\xceV\xf4e\xa9\xdcBo\xcd\x8b\xecf\xc2\xd3vK7\xbd\x1b\x03\xc1\xe5\xa2\xc3\x821\xf8?\xad\xb9\xfbd\x89k>O\xb2\xfa\xcb\xff\x9dt\xf4\xf9\x0eC\xb5\xa71h\x1e\xb4p\x93\xe1\xf5\xf2~\x08\xc2\xa1\xb7\xbfi\xdb\x0b|@\x13E\xc5\x9b\xb0\x93\xfe\x17k\xdb6\xf7\xab\xbb\xef\x13C\x82+\x09z\x97\xf1\xc3z{\x89w\x15N\x0bx\x1e\x96\xe4[0\xf4\x85/\x00u\xd7|>YB{\x85\x9e\xcf\x875v\xbd\xc8:z\xfaH\x1c\x099\xaf\xf9\x85\xab\xa9\xad\xf8\x85aG\xe0\x03\x96\xf2m\xb5\xbcx\x94\xd8\xc5I\xff\x8b}\xc8\x86\x9e\x80:\xc5\x98k\xf5\xf2\xd5\xec\xddS:g\xf9\x7f7G1\xe8\xdd_\xe1\xdd>\x0a=K\x0b\xf6\xbe\x8dk\xee\xb7[\xde\xcfa\x8b\xed\x15\x8eyR\xf7\x18\xbb\xf8\x05\xf3\xces\xbe\xb5\xc4\xbb\xb9nz\x1a\x86\xc0\x07\xd4U\xf5&lXpqE\x8b\xfb\xd5\xdaG\x8f\xec\xd2\xadk\xcd\xd6\xdd\x06\xc1i\xda\x17\x90p\xdc\xef\xd6}=.R6h\xca\xfbL\x9d\xa0\xf4n<\xc6\xae\x85\xbd\xa2\xe2\x8bq[\x1a\xf7\xac\x0a|@\x9d7\xb1\xbd,\xdd[t\xd6\xd5\xf3h\xfa$\x0e\xad>\xa9\xb1k\xb2\x97\xaff\xef\x9e\xd29\xab\xb1\xc8\xdf\xc5\xc5\x82\xf7\xfd0\x06\x90y_\x8f\xa1\xa7\xaa\xce\xb9\xa5[\x8b\x84\xcb;\xaf\xdb\xbaC\xb9g\xf1\xd8\xba\xf6>\x19\x8e\xffqW_W\x02\x1fP\xf7[w\xcaX\x13\xb5\xa6n1\xe6b\xce\xebn\xe9\x91]M\x88\x0f=\xa8\xa9\xa1\xfa\xf0\x5c?\xcbnz\xd3\xde*\xb7\xcf\x86\x99\xa2q\xdb\xb9\xfdw\xf9\xf3\x07\xe5\xf6v\xbc\xad\xeb\x06\x87\xf0\xb8\x0c\x22;\x0b<\x84q\xcd\xfb[hh\xb7\xc1P\xeeu\x17\xdfojLh\xfbTh\xbd}\xce\xef<\xdf\xb7\xcf\xf3[\xf1\xbaY\xcf\xf5\xf5<_\xb0\x05>\xa0\xea\x8d,\x84\x87\xd4\xf9GO\x9d\xf4\xdfz@\xa8\xf3\xc11\xb5\x97\xaff\xef\xdeq\x17\x87\xc3\x06\xecdJh\x09\x1f\xe6_,\x9f\x87{\xe5\xb6\x1fz\xac\xc29Y\xb3\x86\xd8\xc3\xdfX\xf8\x90/\xb7\x10v\xc2\xf3\xfe\xa4\xc1\xfd\x1f-\xf0z\xbcl\x10\xb0\x16\x19\xda\x0da\xaf\xceP\xee\xb8\xa3\xef7U\xeb\x8a\xdf\xba\x88!o\xef\xf69\x9f\xf2<\x9f\xc6\xeb\xc2c\xbdw'\xe8\xd7\xfd\x02.\xf0\x01s}k=\xac\xf8\xb6]h\xa9\xd6C_h\xd3\xab:\x1f~\xd3>t+~G\xe9\x9c\xd5?\x9f'\xb1\xdd\xafcP\x1b\xc5\x0f\xf3\xf39o\xefe|\x8d\xbc\x95\xd5\xef}\xdbY\xf0\xf8\x9f\xd5\xd8u\xae\xa1\xdd\x06C\xb9\xc7\x1d>u\xa4N(\x0eao\xaf\xe9d\x8b;A\xffA|\x1e\x04>\xa0uG\x15\xdf\xba\x9d\xf4\xbf<u\x82\xf4\xe1\xdd\x99\x985{\xf7<g\xeb\x0b\x04!\xe8\xb5\xd6\xfe18\x8ck\xee~\xd8\xc2\xf1\xb7>\xb4\xdb`(\xf7\xaa\xab_T\xe2\xdf]U\xef\xdeu\x0c{/\x17x\xbe/co\xf0\x5c_\x14\x04>`\xd6\x9bX\xe8\x11H\x15\x0b\xbd\xe8\xe2\x89\xd3C\x11'TT\x9d0\xbf\xf5\xda\x87`UH\xf4\x9c\xad\xef\xf9<YF\xd0\x8e=^u\xca\xf9\xec/x?/\x1b\xdcF\x93\xa1\xddIVo(w\xbf\xc3_T\xf6\xea<\xceu\x1f\xbf\xc0\x07\xccR\x15\x0c\x0c\x0b._Qc\x9f\xc3\x18\xd0\xc3\x07\xec\xae\xe7\xcc\xebd\xd6\x97\x83\x15\xae\xc2Qkh7\x1eO\x9d\xa1\xdc'\x1d?\xe7\xb4N\xbb\xae}(Z\xe0\x03\xa6\xbd\x11\x8f+\xc2\x83\xb5WW \xb6qe/_|\xbe\xaa>\xf4=g\xc3}\x9d\x5cf\xf5\xce\xb1\xdbk\xe1\xee\xc2\xeb\xacN\xb9\x98\xe4\xd0n\x83\xa1\xdc\xb3\x81\x14t_{`\x15\xf8\x80io\xc4Uo\xb0z\x8aVg\x5c\xf3C85\xfcn\xa2\xc6\xf0\xd5\x09\xf3;\x8b\xdeI\x1c\x96\x1c\xd7\xdc=\xb5\xdek\x9dY\xad\x9d,\xc12\xa7\x9du\x1f\x80\xc0\x074}#\xb6\xf6\xea\x0a\xc5\xb6\xae:G\xab\xea\x83\xf3\xc8s6xuz\x90\xee\xb5\xf4\x9a\x0c\xf7U\xa7,\xcc\xd4\xa1\xddx~p\x9d\x02\xc5\xe3\x01\xbdnG\x02\x1f\xd0\x19\xf1<\xb0TO\x90\xb5W\xd7\x17\xc2\xaf\xe7\xfc]\xcf\xd9f\xa83!`\xb7\xad;\x8b\xc3\xacuV\xe1x8\xe5\xdc\xc1:\xaf\xc7\xe3\x81\xad\xdeS\x08|@\x97(\xc3\xd2A\x0d\x8a1O\x0d\x8b\x9e\xb3\x8dx\x8d\xac\xe3\x1c\xb1q\xcd/\x22\x93;_*\xc75\x82ggK\xb0\xccP\xa7\xed\xef\x97\x8f}\x22\xf0\x01kWc\xc6\x9c\xb5W\xd7\x1f\xc6\x9b\xf6\xf2Y\xe3\x98e\x86\xcc\xcb\x9a\xc1,\x84\x9d\xa2\xc1\xf2c\xfb=\xfb\x92rZs\xbf\xb0:\xce$q^\xe3R\xbd\xe1%\x0b\xdc\x09\x14)\x85&Z\xeb\x87\xeb\xcb8\xeb\xf1\xfd\x06\xbf6\xee\xf0\x17\x8c\xd3\x15\xdc\xcdy\xd9n\xbd\x9c\xac\x12C\xc1N\xdc\xc2\xbf\xf7\xe2U\xa3\xac\xde\x12^\xabz]Nj\xae\x94q\x18\x8f\xbd\xaa\xe6\xde\x93\xbe-\xfb\x17\xbeT\x95mpU\xf3y\x09\x93\xab\xf6B\x00^\xf5\x17h\x81\x0f\xb8\x1df\xd9N\xecr\xac\xa4Gg>\x5c\x8b\x9a\x1f,]_\xe3x\xd73\xfa\xa9\x80\xb7\x1f\x83\xdd^\x97B]\xcd/\x16\x97\x15an+K\xcf$\x0f\xfa\x5c\x82\xa5h\xf0e,<\xb7\xef\xc7\xbf\xe5\xb0\x9d\xac\xa2G\xd3\x90.\xf8\xa0\xa9\x1afQ\xd2\xa3[\xea>\x17\x85\xa6\xea\xc7\x97\xadr\x0b\xc3\xee\xdf\x8b\x81\xe1\xa0ga\xaf\xe9*\x1c\xa9\xf7\x99q\x9f\xbf\x8ce\xf5&\xb1|*\xf8\x85\xb0\x1c\x87z\x97Z\xbaE\xe0\x03\x8a\x8ao\xe6GN\xfa\xef\x94Z\xcf\x85\xe7\xac\xdb_\xb2\xe29m/\xe3\x07\xfe\xc3\xbe?\xa6\x06\xabp\xcc\xfc\x223\x80\x12,!\xf4^\xcc\xf1{\xb7\xbd\x9f\xdf(_\x13\xe7\xf1K@\xeb\xe7\xf9\x09|\xb0\xd9\x1f<\xa3\xf2\xe2Qb\x97\xab\x81T\xb9\x87\xae\xfc\xcd\x85\x1e\xda\x10l\x1eg\xf5\xd6\x90\xed\xdb\x97\xc7y\x02\xcf\xb3!L\x08\x8b_\xb2\xf6\xe6l\x83[\xdb\xd9\xc7\xbd~E\x9b\xc1O\xe0\x83\xcdV\xf5&k(\x17\xda\x09z\xf7\xe2D\x95\xaf\x0d0\xe8\xdd\x0d<\xe3\x86\xbfv\x95\x0dg5\x8d\xbb\xa1\xefl\xc1\x9b\xda\x8a_\x0a.\xe3\xb9~\x0b3i\x036\xf7\x03(\xbc)\xa5N\x9cW\xd2\x03\xda\xf9[\x0b\xe7f\x9d.\x18\xf4B\xafQ\x08\x13\xe7\xd9'\x87\xf5O\xef\xfc;\x9c\x8b\xbb\xbd\xe6\xc0\x13\x86$\xc3\xca0\x075\x7f\xe5|h\xa7\x1f\xdc\x86\xbe\x18\xd4\x1e/xs\x1f\x05\xbf8\x13z\xbc\xc8\x0cf\x81\x0f6\xd7\xa4\xe2z\xbd{,\xf3C1\x17\xf6\x92B\x0f\xd1I\x0cD\xa75\xef\xebeG\x1e\xefA\x83_\x09+q\xec\x0f\xf1\xcbe8\x1d&\x16[\x0eA|\xd1\xf34C\x90?\x0d\xa7\x04\xcc;\xfc-\xf0\xc1f\xf68T\xad\x97\xfb\xb4o\xb5\xb0\xa0\x83\x7fg\xf7\x1a\x86\xbd\xeb\x18\x0e&}\x9c\xc0\x10\x1f\xef<\xc1\xed\xa3\x19\xaaC\x5c\xef9>\xa6\xfd8\xa22n\x18\x86_\x17^G\xa1\x9cK6O\xe8s\x0e\x1fl\xe6\x87PQ\xf1\xa1Sh)X\xd8I\x83\xb0\x17f\xb8\x8eB\xafP\x8f\x83\xcfQ6_I\x99\xd0F\x93!\xbf\x10B/m\xb9\x85\xc0\xf7\xa0\xdc\x9ed7\xe7.\xce\xeb\xfd8\xc4+\xf0\x01\x95o\xca\xd6\xcb\x85\xe5~\xb1\x0a\x1f\xeeu\x8aK\x87/Xo\x85\x15A\xfa\xfcw\x17\x03\xc8\x22\xbdW\xbbq\xe4a\xd0B\x98\x8f\xa1~T\xfe\xf7\x8b\xe5v\x9c5_21\x98\xc4*\x0b\x02\x1f0\xf5M\xb9\xea\xfc\x9aP\x86\xe5HK\xc1\xc2\x8a\x9a\xfb\xed\xf5}\x15\x9b8j0i\xa3\xcd\x9a\x86\x98\x9e\x87\xbf\xf3\xd8\xeb\x17\x1e\xf3\x93\x86\xc1o+k8\x12#\xf0\xc1f\xa9\x0ascM\x04\x0b\x07\xa0\xd0\xdbUgh\xf3\xc9@\xce\x95\xad3t\xfd\xacf\x88\xd9\xb8\xca\x00\xa1g7\xd6;\x0d\xc1\xef\xb8\xc1\xaf\x1e4\x09\xc8\x02\x1fl\xd6\x87Pj\x88\xe9\x99\xf5r\xa1\x15u\xce\xaf\xba\x1eBQ\xf38\x0c[5t\x1dF\x0e\xf6k\x86\x99\xed\xb6\xea\xce\xf54\xf8\x85/\xdd\xef\xb4\xfcZ\x13\xf8`\x83\xc2^\xd5z\xb9\x812,\xd0\x8e\xbd\x1a\xfbL\x06\xf0\xbe\x12N\x11\xa9\x13\xce\xc6w\xdec\xea\x0c[>^\xf6\xba\xb2\x1d\x0f~\x93\x06\xa1O\xe0\x03>\x15\xe6RCLO\x86X\x12\x01\xd6\xf4\xe5\xaa\xcep\xee\xe9\x12\xee~w\xc5\x0f7\x04\x93\xca\xa1\xdc\xdb\x91\x838)\xa5hp\xdb\x1b+\x86\xbe:\xc3\xe0\xb5\x83\xb1\xc0\x07\xc3\xff\x00\x1ae\xe9\xde\xbb\xdb\xda_\xc0\xe2\xea~\x00_\xb6\xfcw\xbe\xd2\x1e\xb1\xf2\xfe\xea\xac\xeaq\xfd\xfa{O\x9c\x14Vg\xad\xd9\xedx\x1f\x9b\xacN8\xae]\xd0[\xe0\x83\xcdx\xd3H\xbd)\x1c*\xc3\x02\xab\xb5\x84\xc9\x1a\xa3\x15\x86\xbd\xbd\xf2\xe2Q\x9d\xf7\x9e\x19#\x07\xe3\x9aw\xf5(\xde\x97\xd7H\x0b\x04>\x18\xb0\xf8f\x99*\xc3r6\xef2=\xc0B\x7f\x9bm\x07\xb4\xfd\x15\x1dw\xdd\x12,\x17\xb3J<\xc5 \xf3\xb4\xe6]N\xe2}n\xaa\xab\xb6nH\xe0\x83a\xab\x1a\x12)4\x11\xacEk\x81/\x86\xc7\x83\x15\x1dw\x08{u\xceQ\x1c\xd7x\xef\xa9\x13f\xeeo\xf8\xfbTkaW\xe0\x83\xe1\xf6 \x847\xdc\xd496\xc7\xca\xb0\xc0\xda\xb4\xd9#7Y\xd1{J8\xe6\x875v\xad\xac/\x18O#\x19\xd7\xbc\xeb\x8d\x1c\xda\x8d=\x9b[m\xdd\x9e\xc0\x07\xc3}\xa3H\xf5\xeeY/\x17\x96\xa3\xeeyW\xfb-\xfd\xad\x87\xbf\xe3\xdd\x86\xef\x0d\xf3\xdc\xcf\xa8f\xb0\xbc\xcajN\x02\x8b_8\xeb\x0e\xed\x9el\xe0\xd0n\x9d\xd7\xc8\x99\xc0\x07\x9b\xed\xb0\xe2\x9b\xe1\x912,\xd0\xbe\xd8sUk\xa8r\xd1\xb5cc/\xfe\xe3\x86\xbf6\xefl\xdeIV\xaf\xb7i\xdcp\x12XQ\xb3\xbd\xb6\xb2\x8e\x96j\x09A\xb4\xed\x1e\xc8\x18n\xeb|)?\x15\xf8`C\xc5o\xe2\xa9\x0f\x81\xab!T\xf8\x87\x0e\xab\xbb<X1o9\x95\xd8\xb3\xf7\xfe\x8a\xdeS\xea\xac\xa6\x11<mz\x9aH\xc3\xa1\xdd\x87qX\xb9k\xc21=/\x8f\xed\xb4\x8d\xe0\x17\xc3^h\xc7:\xe7J\xd6\x0e\xc1\x02\x1f\x0cO\xd5\x1b\x80\x155`\xb9\xea\xd6\x8f\x0b\xbdV\xa7MBL\x08\x14\xe5v\x9e5\xef\xd9\x9b7|\x84@\xfa\xb5\x1a\xbb\xce}\x9aH\xc3\xa1\xdd.\xce\xda\xbd}\xfev\xef\x04\xbf\xf1<\xc7\x19\x03ch\x8f\xed\x1a\xbb\x9f5\x19\xa9\x11\xf8`@\xe2\x9b\xc5n\xc5\x1b\xc4\x89\x96\x82\xe5\x89\x1f\xc2u\xcf\xad\x0a\xa1\xef\x83TH\x08\xa1+\xf4\xb2\xc5\xa0\xf7<\x11\x06\xae\xdb|\x1c\x0dJ\xb0\x04\xe3\x05\xeby\x86\xb0Xwh\xb73\xefa\xb1\x8d^\x9f\xc8\x12\xde\x83C\xef\xeb\xf7\xe2\xf3Z\xc4\xa0~oV\xa8\xae\xf9\xfc.\xf4\xe5\xfd\x0d\x7f\x9a0(Uo\xcez\xf7`5\xc6\xe5\xf6\xa2\xc1\xfe\xbb\xb7A\xa1\xfc\xe0\xbf\x0doMfh^\xc7\xfb\xfc\xa0\xc5\xc7P\xd4\x0c\x1f\xcf\x16\xfd\x22\x19\xc2b<'\xf1y\x9d\xb6\x0a\x01iV\x9d\xbf\x15\xdb\xaf\xf9\xbc>\x8e\xe1\xee\xf6\xe7!\xdc\xde_\xe0~\x9f4-\xcc\xac\x87\x0f\x06\x22\x9eg\x93z\x03y\xba\x84\xea\xfe\xc0\xf4\x00sY^\xbc\xbb\xc0M4\x0d{{5CW\xads\x06\x1b\xac\xa6q\x1b4\xdbh\xb3\xd3\xac\xfe\xd0n\xb1\x84\xe2\xd5\xcb\x08|\xb3,\x12\xf6\x8e\xe79\x0f[\xe0\x83a\x84\xbd\xaa\x19]\xca\xb0\xc0\xeaC_\xe8\x81:^\xf2\xdd\xdc\x86\xbd\xba_\xe6*\xcf+\x8b\xef'u{\xec\xc6-/\xcdXd\xf5\x86\xa6\xd7>kw\xc6p\xee\xb2\x85\xb07W\xc0\x16\xf8`\x18\x8e*z\x04\x0a\xeb\xe5\xc2ZB_\xf8p~\xba\xa4\x9b\x0f\xe7\x09\x8e^\x0b{U\xe7\x0e\xd6\x99H0\xc9\xea\xf50\xb6~Np\xc3Y\xbb\xbbq\xb6\xf2\xba\x8cV|\x7f\xef\xce\x1b\xf6\x04>\x18\x808\x8b.\xb5\xac\xd2UG\xceu\x81M\x0d}\xe1t\x8b\xb7\xb3\xf6&U\x84\xf3\xbf\xde)owo\x8e/r;\x15\xef'!P\xd4\xe9\xb5jm(wJ{\x85\x10\xf9\xac\xe6\xee\x8f\xe7-m\xd3\xc2q\x86\xa0\xfd [~/n\xb8\xfd\x07\x8b\xbe\x8f\x0b|\xd0\x7fUo\x02cM\x04k\x0f}!\xc4\x8c\xca\xed\xc9\x02\xc1\xef\x22\x06\xbd\xd0\xab7\x99\xb1O\xd5\xd0\xee(\x11\xf6FY\xfd\x922\xc5\x92\x8b\xb7\x8f\x1b\xb4\xd3d\x8d\xcf\xebe\xecu\xfblvs\xce\xe6YK7\x1d\x1e\xfb\xd3\x18\xf4\xc6m\xb4u^\xde\x88\xbfD\x88\xb6\xbe\xf2\xa5\xd3\xac\xc12E\xd1\xd9\xf5{\x1f\xee\xad\xe3xc\xfd\xae\xd4\xac\xbc0\xe4\xb2\xe7\x99\x85n\x89\x7f\xbb\xe1os'\xf1\x9es\x15\x03\x5cx_:\xb1:No\x9e\xdb{\xf1y\xbd}~\xef\xd5\xf8\x5c\x09A\xf1e|\xaeO\x971\xc1NY\x16\xe87\xbd{\xd0C\xb1\xc7OM\xcca>\xb7\xdf\x0fn]:.C\xba\xd0\xdfo\x91E\x96\x9e\xda\xffD\x8f\x00\x00\x02\x1f\xf47\xec\x8d\xb2t\x11\xe5p\xfe\x87\x89\x1a\x00\x08|\xd0cE\x96.\x9bp\xa8\x0c\x0b\x00\x02\x1f\xf4T\xac\x80\x9f*\xc3r\x91\x98\xc1\x07\x80\xc0\x07\xf4@Qq\xbd\xf5r\x01\x10\xf8\xa0\xafbQ\xd4\xd4\xf4\xfe\xe3\xb8\x1e%\x00\x08|\xd0\xc3\xb0\x17j9\xa5&bX/\x17\x00\x81\x0fz.\x0c\xd5\xa6&j\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xec\x12*\xf2+\xc3\x02\x80\xc0\x07=V\x15\xe6\x94a\x01@\xe0\x83\xbe\x8aeX\x1e&v9\x8b\xcb4\x01\x80\xc0\x07=5\xa9\xb8^\x19\x16\x00\x04>\xe8\xab<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ck)\x00\x04>\xe8g\xd8\x0beX\x8a\xc4.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb1\x10\xf6ReX\x0a\x135\x00\x10\xf8\xa0\xa7\xf2<\xdf)/\x1e%v\xb9*\xc3\x9e2,\x00\x08|\xd0cUan\xac\x89\x00\x10\xf8\xa0\xa7\xf2<\xdf\xcf\xd2\xeb\xe5\x9eY/\x17\x00\x81\x0f\xfaM\xef\x1e\x00\x02\x1f\x0cU\x9e\xe7E\x96.\xc3\xf2\xc4z\xb9\x00\x08|\xd0\xdf\xb07\xca\xd2eVB\x19\x16\x135\x00\x10\xf8\xa0\xc7\x8a,]\x86\xc5z\xb9\x00\x08|\xd0Wq\xbd\xdc\x83\xc4.\x17e\xd8\x9bh)\x00\x04>\xe8\xaf\xa2\xe2z+j\x00 \xf0A_\xe5y>\xce\xd2eX\x8e\x95a\x01@\xe0\x83\xfe\x86\xbd:\xeb\xe5\x16Z\x0a\x00\x81\x0f\xfa+\x0c\xd5\xa6\xca\xb0\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xecr\x95)\xc3\x02\x80\xc0\x07\xbdV\x15\xe6\x0aeX\x00\x10\xf8\xa0\xa7b\x19\x96\x87\x89]\xce\x94a\x01@\xe0\x83~\xab\xea\xddS\x86\x05\x00\x81\x0f\xfa*\xcf\xf3\x10\xe6\xb6\x13\xbb\x842,\xe7Z\x0a\x00\x81\x0f\xfa\x19\xf6\xea\x94a\xd1\xbb\x07\x80\xc0\x07=\x16\xc2^j\xbd\xdc#\x135\x00\x10\xf8\xa0\xa7b\x19\x96G\x89]\xae\xca\xb0Wh)\x00\x04>\xe8\xafI\xc5\xf5cM\x04\x80\xc0\x07=\x95\xe7\xf9~\x96^/\xf7\xccz\xb9\x00\x08|\xd0oUeX\xc6\x9a\x08\x00\x81\x0fz*\xcf\xf3\x22K\xaf\x97\xfb\xd4z\xb9\x00\x08|\xd0\xdf\xb0\x17\xca\xb0\xa4\xca\xac\x842,\x85\x96\x02@\xe0\x83\xfe\x0aC\xb9\xa92,\x87\xca\xb0\x00 \xf0AO\xc5\xf5r\x0f\x12\xbb\x5cX/\x17\x00\x81\x0f\xfa\xad\xa8\xb8\xde\x8a\x1a\x00\x08|\xd0Wy\x9e\x8f\xb3t\x19\x96g\xca\xb0\x00 \xf0A\x7f\xc3\x9e\xf5r\x01\x10\xf8`\xe0B\x98K\x95a9R\x86\x05\x00\x81\x0fz*\xae\x97\xfb8\xb1\xcbUV]\x84\x19\x00\x04>\xe8\xb0\xaa0W(\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<L\xecr\xa6\x0c\x0b\x00\x02\x1f\xf4[U\xef\x9e\x89\x1a\x00\xac\xc5\x1b\x9a\x00\x16\x17\xcb\xb0l'v9~\xf5\xea\xd5\xb9\x96\x02\xe8\x9f\xaf?\xf8\xc2$\xfe\xb3\xf8\xf2\x8bo]\xf6\xf11\xe8\xe1\x83\x05\xfd\xc9?\xfe\x81\xf0\xc5)\xd5\xbb\xa7\x0c\x0b@\xbf\x9dd7+'\xbd(\xc3\xdfi\xb9\x8d\x05>\xd80\x0f\xbe\xf3c\xa3,\xbd^\xee\x91\x89\x1a\x00\xfd\xf5\xe5\x17\xdf\x0a\x81\xef*\xfe7\x14\xd5\x7f\xbf\x0c}\x97\xe5V\x94\xdb=\x81\x0f\x06\xee\xcf\xfd\xe1\x1b\xd9\xaf}\xf0\xab\x9fO\xecrU\x86\xbdBK\x01\xf4\xde\xeb#9\xa1\xdej(\xc3\xf5\xbd0\xe4[n;\x02\x1f\x0c\xd4\xe7^\xfc`\xd5.\x86r\x01\x86a\x92\xdd\x9c\xa23M\x18\xee\xfdF\x19\xfa\xce\xbb:\xdc+\xf0\xc1\x9c~\xea\x0f~4;\xfb\x95\xd3\xd4.\xa1\x0c\xcb\x89\x96\x02\xe8\xbf/\xbf\xf8V85\xa7\xea==L\xde\x0b\xc3\xbd/\xe3p\xefH\xe0\x83\x9e\xfb\xdd\x7f\xff\xdd\xaa]\xc6Z\x09`P\xea\xae\x94\x14\xce\xeb\x0e\xc3\xbda\x92\xc7I\xb9\xed\x09|\xd0C\x7f\xed\xbb\xf7\xb2o\x9e\xffFj\x97\xa7\xd6\xcb\x05\x18\x96/\xbf\xf8V(\xafu\xd6\xf0\xd7BA\xfe\xe7q\x92\xc7x]\x93<\x04>h\xe8\xc7\xff\xf83\xd97O\x93a/\x9c\xe3Qh)\x80A\x9a\xcc\xf9{a\x92\xc7\xfb\xe5v\x19'y\x8c\x04>\xe8\xb07\x7f\xe7\xc7\xb3\xef|\xe7;\xa9]\xac\x97\x0b0P_~\xf1\xad\x10\xf8\xae\x16\xb8\x890\xdc{\xb7\xa6\xdf\xbe\xc0\x07\x1d\xf3\x17\xfe\xef\x0fe\xbf\xf6\xcb\xff\x22\xb5\xcbE\x19\xf6\x8e\xb4\x14\xc0\xa0MZ\xba\x9dP\xd3\xef\x838\xdc{\xb8\xcc\xe1^\x81\x0f\x1a\xf8\xb1\xab\xca?\x19eX\x00\x86\xaf\xed/\xf6a\xb8\xf7k\xd9\x12k\xfa\x09|P\xd3_\xfd\xbd\x9f\xa8*\xc3\xf2\xec\xd5\xabW\xa7Z\x0a`\xd8b\x89\x96\xe3%\xdd\xfcmM\xbfV\x97p{\xc3\xd3\x06\xd5\xc2D\x8d\x97\xdf\xfc\x9f\xc9}\xfe\xd1\xcf\xfd\xcd_\xea\xc2\xd4{\x00V\xe2<\x86\xb3e\x09\xc3\xbd\xbb\xe5\xe7J\xe8M\x0c\xdb\xa4\x0c\x9a\x97\xf3\xdeX\xfe\xea\xd5+O\x19D[_\xf9\xd2i\xfc#\xfb\x84P\x86\xe5W\xff\xd9\xaf\xcc\xfc\xbdb\xffg\xb2\x9f9\xff\xef\x1a\x10\x80e:\x8e\xc1\xef\xb4\xe9/\x1a\xd2\x85)~\xf4\xf7>\xf7\xfd\x7f\x87\xf5rSeXv\xbf\xf4\xd3\xd9\xdf\xf8\xed\xefh4\x00\x96-\xf4(>\xbf]\xc2\xad\xc9$\x0f\x81\x0f^\xf3c\xbf\xfd\x97\xb3{\xdf\xf8\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf|\xfbG\x92eX\xde\xfeS?\x9c\xfd\xf0\xef\xfe\x91\x86\x03`U>Z\xc2-\xbb\xa9\xe9wT\xa7\xa6\x9f!]\xb8\xe3\xf3?\xfb\x0b\xbf\xf3\x13\xdf\xd9\xfa\xd3\xb7\xff\xff\xdc_|\x91\xfd\xab_\xfa\xe73\xf7\xff\xf9\x9f}+\xfb\x85\xff\xf4m\x0d\x07\xc0\xba=\xcbn\x86{\xa7\xae\xf7k\xd2\x06Don\x7fu\xf2\x13\xd9\xc7a/\xf8\xcc\xb7\xaf\x93\xbf\xf3\xb3\xd9\xff\xd2p\x00tAX\xc2\xed\xe1\xd7\x1f|!\x14\x85\xbe\x9d\xe4\xf1\xfdE\x00\xf4\xf0A\x0c{\xd9\x94\xd9V[?\xf2G\xd9\x9f\xf8\xd1\xdf\xca\xfe\xe5\xf3g\x9f\xfa\x9d\xbf\xffs\x7f+\xfb\xbb\xbf\xf1\xdf4\x1e\x00]\x14z,Bo\xdfQX\x03X\xe0C\xd8\x9b\x11\xf6\xee\xfa\xb3\x9f\xfd\x83\xec\xff\xfc\xfe\xbf\xc9\xfe\xf5\xbf\xfb\xb7\x1f\xfd\xff\xfe\xfdQ\xf6O\xb7~\xc4\xb9{\x00\xf4!\xf8\x1d\x0a|\x08{\x0d\xea(\xfd\x9d\x9f\xfcV\xf6\xcb\xff\xf1<{\xe7\x8b\x7f^\x19\x16\x00\xba\xec\x13C\xbb\x02\x1f\xc2^C\xef\xfc\xe07\xb3\xbf\xfe\xdd\xdf\xd2\x80\x00t\xd1\xd4\xc9\x1b\x02\x1f\xc2\xde\x1c\xfe\xde\x1f\xfeF\xf6\xd3\xbf\xff_5$\x00]\x10\x86m\xc3\xe7\xda\xd1\xac\xd58\x04>\x84=\xa1\x0f\x80~\xba\xc8n\x86mO\xee\xce\xc8\x9dFY\x16\x84\xbd9\xfd\x97\x1f\xf8\x5c\xf6\xd3\x99\xc0\x07\xc0\xca5^bM\x0f\x1f\xc2\xde\x1c\xdez\xf5?\xb2\x9f\xff\xbd_\xd7\xa8\x00\xacJ\x18\xb6\xbd\x9d\x84q\xd9\xf4\x97\xf5\xf0!\xec5\xf4\xf9\xfc\x7f\xff\xca\xcf\xff\xee\xaf\xff\xa2V\x05\xd8h{\xe5\xf6x\x05\xf7s\x16C\xded\x91\x1b\xd1\xc3\x87\xb0\xd7\xcc\xf1o^\xfc\xe2X\xab\x02l\xb6\xaf?\xf8B\x98\x05\xfbp\x89w\x11\x86m?*\x9a\xdc\xc6\x8d\x09|\x08{\xc2\x1e\x00\xcd\xc2\xde\xa8\xbcx\xb1\x84\x9b\x9e\xba,Z\x1b\x0c\xe9\x22\xec\x09{\x004s\xd8\xf2\xed\x85a\xdb\xa3\xd7k\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@M_\x7f\xf0\x85{\xe5\xc5e\xb9m-xS\xb7k\xdd\x16\xf3L\xc2hJ\x0f\x1f\xc2\x9e\xb0\x07@}\xfb\x0b\x86\xbd0l[d5j\xe7\x09| \xec\x01\xb0\x1e\xc5\x9c\xbf\x17\x96<;jR;O\xe0\x03a\x0f\x80\x15\xfb\xfa\x83/\xec\x95\x17\xf7\x1b\xfc\xcaB\xb5\xf3\x04>\x10\xf6\x00X\xbd\xba\x9f\x0d\x1f-y\xb6h\xed\xbc6\x99\xb4\x81\xb0'\xec\x01P\xa1f)\x96Vk\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@\xb5Y\x9f\x0fa\x12\xc6$\x06\xbd\x97]=x\x81\x0faO\xd8\x03\xa0\xda\xeb\xb5\xf7ZY\xf2L\xe0\x03a\x0f\x80\x0e\xf8\xfa\x83/\x84\xcf\x88P\x8ae\xa5\xb5\xf3\x04>\x84=a\x0f\x80\xd5\xd9+\xb7w\xb3%,y\xb6*&m \xec\x01\xc0\xc0\x09|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0\x09|\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\xc0\xe6\x06>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x16\xf3\x99\x96\xc3\xd9=a\x0f\x00`\xc0\x81\xaftT\x86\xb4\x91\xb0\x07\x000\xc0\xc0\x17{\xf7B@\x1b\xb7p[\xc2\x1e\x00@\xd7\x02_\xe90^.\x14\xae\x84=\x00\x80\xee\x06\xbe\xdb`u\xbf\x0cm\xfb\xc2\x1e\x00\xc0\x80\x02_\x0cx\xf7\xa7\x84?a\x0f\x00`\x08\x81oJ\xc0{\xd8d\xf2\x86\xb0\x07\x00\xd0\xe1\xc0\x17\x83\xdd\xc3\x1a!P\xd8\x03\x00\xe8c\xe0K\x04\xbb\xca\xe0%\xec\x01\x00\xf4;\xf0%'o\x08{\x00\x00=\x08|eh\x0b!\xeb~b\x97Ca\x0f\x00\xa0\xc7\x81/\xab\x1e\xb6\xdd}}\xf2\x86\xb0\x07\x00\xd0\x93\xc0\x17\x83\xdcn\x8d]\x0f\x85=\x00\x80\x1e\x06\xbel\xc6p\xed\x14ca\x0f\x00`}\xf2W\xaf^\xcd\xf5\x8be\x80{Y^l\xd5\xdc\xfd\xa2\xdc\xb6\x85=\x00\x80\xd5\x9b\xab\x87/N\xd6\xd8j\xf0+\xc2\x1e\x00@\x9f\x02_6\xc7\xd2i\xc2\x1e\x00\xc0z4\x1e\xd2\x8d\x935^\x08{\x00\x00\xfd0O\x0f_!\xec\x01\x00\xf4G\xa3\x1e\xbe7\xb7\xbfz\xaf\xbc\xb8\xcc\x9a\x9d\xbf'\xec\x01\x00\xacQ\xd3\x1e\xbe}a\x0f\x00`\xd8\x81\xefpE\xc7%\xec\x01\x00\xb4\xa4\xf6\x90\xee\x9b\xdb_\xdd)/\xbe!\xec\x01\x00\xf4K\x93\x1e\xbeU\xf4\xee\x09{\x00\x00\xeb\x08|q\xb2\xc6\xfe*\x0e\xa8\xbc\xaf=O\x0b\x00@{j\x0d\xe9\xc6\x955\xde_\xe1q]\x95\xdbQ\xb9M~\xf3\xe2\x17_z\x9a\x00\x00\x96\x1f\xf8.\xcb\x8b\xfbk8\xbe\xebr;\x09\xe1\xaf\x0c~\xe7\x9e.\x00\x80%\x04\xbe8\xc4\xfa\xbc\x03\xc7z\x91\xdd\xf4\xfa\x9d\xe8\xf5\x03\x00h7\xf0M\xca\x8b\x83\x0e\x1d\xb3^?\x00\x80\xb6\x02_\x9c\xac\xf1\xbd\x0e\x1f\x7f\xe8\xf5+\xca\xe0w\xe2\xa9\x04\x00\x98\xaej\x96\xee\xb8\xe3a\xefH\xd8\x03\x00H{\xa3\xe2\xfa\xc3\x8e\x1d\xaf\xe1\x5c\x00\x80\xb6\x02_\x9c\xacq\xbf#\xc7i\xc2\x06\x00@\xdb\x81/\xeb\xc6p\xeeq\xa67\x0f\x00`!S'm\xbc\xb9\xfd\xd5Qy\xf1bM\xc7\xa4\xe82\x00@\x8bf\xf5\xf0\x8d\xd7p,\xc71\xe4\x9dzZ\x00\x00\x86\x13\xf8\xf4\xe6\x01\x00\xac:\xf0\xbd\xb9\xfd\xd5\xfdl\xf9\x935\x9e\xc5\x90\xa7\xa4\x0a\x00\xc0\xaa\x03_\xb6\xbc\xde\xbd\xd0\x9b7\x89A\xefR\xd3\x03\x00\xac\xc6'&m,i\xb2\xc6Y\xa6@2\x00\xc0\xda\xbc\xde\xc37n\xe9vC\x81\xe4I\x0cz\x97\x9a\x19\x00`8\x81/\xf4\xe6\x85!\xdb\x89\xa6\x05\x00\xe8X\xe0{s\xfb\xab!\xec\xcd3YCo\x1e\x00@\x1f\x02_\xd6\xbcw\xcfrg\x00\x00=\xf0\xd1\xa4\x8d\x06\x935Bo^\x98|a\xb93\x00\x80\x9e\xb8\xed\xe1;\xac\xd8Oo\x1e\x00@\xcf\x03\xdfx\xc6\xf5\x96;\x03\x00\xe8{\xe0\x8b\x935\xb6\xee\xfc\xccrg\x00\x00C\x0a|\xd9\xc7\xbd{z\xf3\x00\x00\x06\x1a\xf8B\xc0\xdb\xd7\x9b\x07\x000L\xff_\x80\x01\x00e|\xfb\xc4\xd4o\x058\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = b"\x00\x11\x0bF\x95g\x00p\x00a\x00r\x00a\x00m\x00e\x00t\x00r\x00i\x00c\x00f\x00i\x00t\x00t\x00i\x00n\x00g\x00\x06\x07\x03}\xc3\x00i\x00m\x00a\x00g\x00e\x00s\x00\x1c\x053\xe8'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00z\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x10\x0a1\xdeg\x00m\x00o\x00d\x00e\x00l\x00-\x00v\x00i\x00e\x00w\x00e\x00r\x00.\x00p\x00n\x00g\x00\x1c\x053\xf0'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00x\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x1c\x053\xf4'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00y\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g"
qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00(\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00:\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x9e\x00\x00\x00\x00\x00\x01\x00\x00F6\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00~\xa5\x00\x00\x00x\x00\x00\x00\x00\x00\x01\x00\x006|"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Oct 15 12:53:43 2018
# by: The Resource Compiler for PySide (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = b"\x00\x006x\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006\x05IDATx\xda\xec\xddOl-Y~\x17\xf0\xaaIG\xf9G\xe27\xd2\xf0O#\xe2\xfbX0j\x08\xb2G\x10\xd1 F\xbe\x1d!\x116\xb1Gb\x93\xd5\xbb\xbd`\x91\xc5\xf0\xdc+f\xf7\xca\x12\x8b\xd9\xb5\x1f-$$\x16\xefzE\xc4\x22m/C\x82\xfaZ\x83\xa0\xc3\x1f\x8d\x1dF\x0aC \xcfF\x84\x89\x84F\xf3\x9c\x88\x10 \x89\xa9\xd3>\x9e\xf6\xbc\xb6OU\xdd\xbfUu?\x1f\xa9t\xdf\xf3-\xdf?\xa7\xae\xef\xfd\xdes\xea\xfcN~}}\x9d\x01i\x1b_yk\xaf\xbc\x18\x5c\xbd\xff\xd1\xa1\xd6\x00\xa0k>\xa3\x09\xa0\x96\xfd\xb8\x01\x80\xc0\x07}\xb3\xf1\x95\xb7\x06\xe5\xc5N\xb9m\xc6\x9e>\x00\x10\xf8\xa0g\x8a;\xff\xd6\xcb\x07@\xe7\xe4\xce\xe1\x83\x87m|\xe5\xadG\xe5\xc5E\xf8\xe7\x9d\x1f?\xbez\xff\xa3\x0b\xad\x03@W\xe8\xe1\x83\xb4\xbd\xd7\xc2^Ph\x16\x00\x04>\xe8\x8f\xfb\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x0cu\xc3\xf2b\xf3\xbe\xab\xb2\x9b\x9e?\x00\x10\xf8\xa0\xe3R\x134\x0a\xcd\x03@W\x98\xb4\x01\xf7\x88\xa5X^V\xec\xf6\xf6\xd5\xfb\x1fM\xb4\x16\x00m\xa7\x87\x0f\xee7\xaa\xb1\x8f\x12-\x00\x08|\xd0au\xc2\xdcn\xec\x09\x04\x00\x81\x0f\xba\xa4\x0cq\xa3\xec\xd3\xa5X\x1e2\xd2b\x00\x08|\xd0=\xfb\x0b\xda\x17\x00\x04>X\xb5X\x8ae\xab\xc9\xaf\xc4\x1eA\x00\x10\xf8\xa0#\xa6\x09oz\xf9\x00h5eY \x8a\xabg|w\xca_W\xa2\x05\x80\xd6\xd2\xc3\x07\x9f\x98\xa5\xa7n\xa4\xf9\x00\x10\xf8\xa0\xfdf\x09mO\xac\xaf\x0b\x80\xc0\x07-\x16'^l\xcex3\xce\xe5\x03@\xe0\x83\x16\x1b\xb5\xe46\x00@\xe0\x83y\x8b\xabe\xec\xcc\xe1\xa66\x95h\x01@\xe0\x83v*\xe6x[\x02\x1f\x00\xad\xa3,\x0bk-N\xb4\xb8\xc8\xea/\xa5V\xc7\xe3\xab\xf7?\xba\xd0\xba\x00\xb4\x85\x1e>\xd6\xddh\xcea/(4+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x86\xb2\xbdl\xf6R,\xf7\xdet\xe6\x5c>\x00\x04>h\x85E\x8625\xf9\x00\x10\xf8`\x95b)\x96\xdd\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96\x87(\xd1\x02\xc0\xca\xe9\xe1c\x1d\xed-)\xec\x05\xce\xe5\x03@\xe0\x83\x15Xf\x08\x1b)\xd1\x02\x80\xc0\x07KT\x86\xafay\xb1\xb5\xcc\xbb\xccnz\x14\x01@\xe0\x83%\x19\xad\xe0>\x0d\xeb\x02\xb0R&m\xb06b)\x96\x97+\xba\xfb\xb7\xaf\xde\xffh\xe2(\x00\xb0\x0az\xf8X'\xa35\xbdo\x00\x04>\x10\xf8\x96\xe0I\xeca\x04\x00\x81\x0f\x16\xa1\x0c[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6vy\xb1\xd3\x82\x87\xb2\x19{\x1a\x01@\xe0\x839kSY\x14\x81\x0f\x80\xa5S\x96\x85^\x8b\xab\x5c|\xb7\xe6\xeeW\xb7\xbf\xd6\xf0n\xc2\xef\xbd\xca\xea\x9f#\xf8\xc5\xab\xf7?:st\x00X\x16=|\xf4\xdd\xa8\xc6>\x97\xe5\xf6N\xb9\x0d\xcam\x9a vV\x06\xb8A\xbc\x8d\xd3\x1a\xfb+\xc4\x0c\x80\xc0\x07s\x94\x0aW'\xd9MA\xe4A\xb9\x8d\xcb\xed\xd5,w\x14ocX\xfe\xf3\x8b\xe5v\x94\xd8\xf5\x89\xf5u\x01\x10\xf8`\x0e\xcaP\x15\xd6\xb0}}\x985\x0c\xbf>/\xb7\xc7e8\xdb[\xc4\xea\x17a\xb8\xb6\xdcF\xe5??[n\x07\xd9M\x0f\xe2\xebF\x8e\x10\x00\xcb\xf2\x86&\xa0\xc7\xee\xf6\xee\x9d\x97\xdba\xe8\x85[\xd6\x9d\xc7\x1e\xc3\x22lqvn\xd8v\xee<\xb6C\x87\x08\x80e\xd0\xc3G/\xc5U-B\xb8\x0aC\xaba\xd8v{\x99a\xef\x9e\xf0w;\xdc\xfb8>\xa6G\xb1\x07\x12\x00\x16N\x0f\x1f}\x16\x86m/\xda\xf4\x80\xe2\xe3\x19\xc5s\xf8\x9c\xc7\x07\x80\xc0\x073\x06\xab6?\xbe0\xdc\xfb\xca\x91\x02`\x19\x0c\xe9\x02\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x02\x1f\x00,I\x9e\xe7\x8f\xcam\xa8%@\xe0\x03\xa0\x9fao\xbb\xbc\xb8\xd0\x12 \xf0\x01\xd0\xcf\xb0\xb7_^|\xa3\xdc6\xb4\x06,\x96\xb5t\x01Xv\xd0{T^\x8c\xcbm\xf7\xf6g\xd7\xd7\xd7\x13-\x03\x02\x1f\x00\xfd\x08{a\x08\xf7\xb8\xdc6\xb5\x06,\x8f!]\x00\x96\x15\xf6n\x87p_\x0f{\xa7Z\x07\x16K\x0f\x1f\x00\x8b\x0ez\x9f\x1a\xc2\x05\x04>\x00\xfa\x13\xf6\xea\x0c\xe1N\xb4\x14,\x96!]\x00\x16\x15\xf6\x1e\x1a\xc2\x05\x96L\x0f\x1f\x00\xf3\x0ezM\x87p'Z\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b\xc0\xbc\xc2\xde(\xbb\xe9\xadk\x14\xf6\xd4\xe0\x83\xc5\xd3\xc3\x07\xc0\xacA/\x0c\xe1\x1e\x96\xdb\x13\xad\x01\x02\x1f\x00\xfd\x0b{a\x08w\x5cn[S\xde\x84\x1a|\xb0\x04\x86t\x01\x986\xec\x8d\xb2\x9b!\xdc-\xad\x01\xed\xa6\x87\x0f\x80\xa6Ao\x9eC\xb8\x13-\x0a\x02\x1f\x00\xed\x0a{\xb3\x0e\xe1\x02+`H\x17\x80\xbaao\x94\xcd\x7f\x08w\xa2ea\xf1\xf4\xf0\x01P\x15\xf4\xcc\xc2\x05\x81\x0f\x80\x1e\x87\xbdAvSHy!C\xb8j\xf0\xc1r\x18\xd2\x05\xe0\xa1\xb0\xb7W^\x9ce\xce\xd7\x03\x81\x0f\x80^\x86\xbd0\x84\xfbA\xb9m,\xf0n\xd4\xe0\x83%1\xa4\x0b\xc0\xdd\xa07\xc8\x168\x84\x0b\xac\x86\x1e>\x00n\xc3\xde\xb2\x87p'Z\x1d\x04>\x00\x96\x17\xf6\x961\x84\x0b\xac\x88!]\x80\xf5\x0ez\x83luC\xb8\x13G\x00\x96C\x0f\x1f\xc0\xfa\x86=\xb3pA\xe0\x03\xa0\xc7ao\xe5C\xb8j\xf0\xc1\xf2\x18\xd2\x05X\xaf\xa0\x17V\xcd\x08AK\xaf\x1e\xac\x11=|\x00\xeb\x13\xf6\x86\xe5\xc5EK\xc2\x9e\x1a| \xf0\x010\xe7\xb0W\x94\x17\x1fff\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\x9d\x96=\xb4\x89\xa3\x03\x02\x1f\x00\xf31\x88\xe1*\xcc\xc6\xdd\x8e?\xdb\xd1, \xf0\x01\xd0\x13\xd7\xd7\xd7g1\xec}J<\xa7/x\xfdr\x19\x81p\xe2\xe8\x80\xc0\x07\xc0\xe2\xc3\xe0$\x15\xbe^\x0b\x84\xc3L\xcf \x08|\x00\xf47\x10\x96\xe1\xefbA\xb7\x0d,\x81Y\xba\x00$\xc5\x19\xbe\x9bZ\x02\x04>\x00\xfa\x19\xf6\xc2,\xdf\xfd9\xdf\xac\x1a| \xf0\x01\xd0\x22a\x09\xb6T\xed\xbe\x83r\xbb\xd2L \xf0\x01\xd0Aq\xd2\xc6\x93\xc4.\xe7\xd7\xd7\xd7Ey9jx\xd3\x13\xad\x0b\x02\x1f\x00\xedPT\x5c\xff\xf1Po\x19\xfaBa\xe7\xe7\x9a\x0b\x04>\x00:$\xcf\xf3Q\x96.\xc3rrw\xa6m\xf9\xef\x10\xfe\xcek\xde\xfcD\x0b\xc3r)\xcb\x02p\x13p\x06\xd9\xcd\xaa\x14\xc3r\x0b\x13\x15\x92\xabR\x94\x01'\xefq[\x84\xe7_T\xecv\xdfD\x8e\xbd\xec\xa6\xc8\xb3\xf5zA\xe0\x03hE\xa8\xd9\xce>)(<lcH\x89\xe7\xd0}\x18\xff{\x1a\xc3\xd4E\xb9M\xe2\x0a\x1a\x8b\x12\xc2\x5c\xaa\x0c\xcbAy\xff\x17\xf7\x84\xe0\x8b\xd83\xf8A\xea\xc6\xd5\xe0\x03\x81\x0f`\xd1!/\x04\x92\xbd\xac\x1bu\xe5\x06w\xfe\xbd\x93\xdd\xe9m,\x9f\xcbm\x08<\x9eg\x00\x8c=\x9d\xa92,\x97\xd9\xcd\xcc\xdd\x87\xc2\xdcqy\x1b\xe1|\xbe\xa7^q \xf0\x01,+\xe4=\x8a\x01/\x84\x98\xad\x8e=\xfcA\xc5\xf5\xdf\x0b\x81\xe5\xf3\x0cAl\x12\x02`\x9cD1\xad\xaa2,Ey\xfb\xafR7\x10\xce\xe7\x8b\xe1\xfa\xbe\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11\x17\xe5\xf6\xa2\x83a/\x186\xd87\xf4X\x86\x12*\x1f\x94\xcf\xfbU\xb9\x8d\xcbm\xafa\x9b\x85\xfb\xdbM\xecrZ\x86\xb9q\xcd\x9b\x0b\xf7}_}\xbeW^\x9d \xf0\x01\xcc3\xe8=\xcb\xba=\x81`0\xe5\xefm\xdc\x09\x7f\xe1\xbc\xba\x22\x0e\xd5V9\xac\xb8\xbe\xa8\xfb\x00b/\xe0}\x81\xf3\xcc\xab\x14\x96\xcf\x90.\xd0G\xfb3\x04\xbd\xab\xec\x93\xc9\x11a{\xb5\xc2\x90\xb29\xa7\xdb\x08\xc1\xf7Y\x19\xfaNB\xa8\xbbo\xd2D\x9cl\x91\xea\x05=j:\xd9\x22\xec_\xde\xeeA\xbc\xff[z\xf8@\xe0\x03\x98M\xe8Y*CF8\x87\xedI\xcd_\x09\xe7\x94M\xe2vVu~\xda\x92\xbd\x9d}R\x22f\x18/g\xe9\xb1\x0c\xc3\xb5\xbb\xf1|\xbf\xe2vx6\x9e\xe7xX\x11\x82\xf7\xa7<\x1eE\x1c*\xbe=\x9fO\x0f\x1f\x08|\x00sq\x98\x08|!\xec\xdc\xcel=n\xf3\x93\xb8\xd3\xa3\xf6\xbd\xc7\x19\x87f\x87w\xb6iz\x01\xc3\xef\xbc\x88C\xdfE\x8d y8c\x10\x0eC\xbb\x17\xf1>\xf4\xf0\x81\xc0\x070\x97\xa0tV\x86\x99\xb0\xea\xc3\xdd!\xca\xa3r\x1bw\xbd\x06\x5c\xac\x7f7\x8e\xdb\xddz\x82\xa3\xac\xf9\xc4\x94\x10\xfc\xfea\xb9}>\xb1\xcfe\x5c/w\x96\xc7\xfc*N \xf9p\xc1\xf5\x03\x81\x07\x98\xb4\x01\xf4U\xe8\xe5\x0bC\x91\xe1\x1c\xb2\xcf\x96Ac\xd4\xc7\x82\xbf!@\x95[\xe8\x81\x0b\xc1\xefq\xb9\xbd\x9b\xd5_\xe2,\xa8\xeaq\xdb\x9f\xd3\xe3\x9c\xc4\xc7\x06\x08|\x00s\x0bB\xa17\xefQ\xe8\x9dj\xd9yy\x8b|\xce\x17\xaf\x85\xbfP\x00\xf92\xf1+!\x18\xfe\xa5\xc4\xf5\xa7\xf3\x1c\xf6\x0e\x8f\xcd+\x13\x04>\x00\xe6\x1b\xfe\xf6\xcbmP\xfe\xf7\xcb\xe5vr\xcfn\x9f\xab\xb8\x99\x91\x96\x04\x81\x0f\x80n\x84\xbf\xb0\xfaF8\x87\xee\xb6\xd7/\x0cu\x87\xd9\xc9\xa9s\xf7\x9e\xdf\xb7^. \xf0\x01\xd0\xee\xe0\xf7q\xaf_vS\xd0y;\xb1k\x08\x84\x85\x16\x03\x81\x0f\x80\xee\x9ay\xbd\x5c@\xe0\x03\xa0\xa5b\x19\x97TQ\xeas\x93+@\xe0\x03\xa0\xdb\xaa\xc2\xdc\xbe&\x02\x81\x0f\x80\x8e\x8a\xeb\xe5\xee$v9\xe9c\xadB@\xe0\x03X\x97\xb0\x17\xd6\xcb-*v\xd3\xbb\x07\x02\x1f\x00\x1d\x16\xc2\x5cj\xdd\xdd\x03eX@\xe0\x03\xa0\xa3\xf2<\x1fd\xe9\xde\xbb\xb0\x1a\x87\x89\x1a \xf0\x01,$\x88\x1c\x96\xdbY\x1cndq\x94a\x01\x81\x0f`%aoX^<-\xb7\xadr\xbb\x88\xe5BXL;\xef&v\x09\xeb\xe5\x8e\x17p\xbfca\x1e\x04>\x80\xbbC\x88\xa1\xf7\xe9\x1bq\x16)\x8bk\xe7\xfb\x14\x8b\x08{\xd9M\xad\xbf\x10\xe6'\xc2<\x08|\xc0\x1a*\x03\xc0~\x0c\x03\xaf{\x11\xc3\x02\xf3i\xe7\xd1\x03\xed|\xebh\xdeeX\xee\x84\xbd[B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad4\xb7vN\xf5\xee\xcd}\xbd\xdc{\xc2\xde\xad\x0d\xa1\x0f\x04>`\xbd\xecg\xe9\x09\x04#M\xb4\x94v>\x5c@\x19\x96T\xa0\x13\xfa@\xe0\x03\xd6A\xecuJ\x95\x079\xb2\xd2\xc3\x5c\xdayP^<K\xecrY\xb6s\xb1\x80\xbb\x1e\x96\xdb\xb9\xd0\x07\x02\x1f\xb0\xdeR\xbdNW\x99\x95\x1e\xe6e\x5c\xe38\xcc],\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xe2\xbaC\xb5\xe0f\x17\xcb\xb0\xa4\xd6\xcb\x0deX\x8e\x17u\xff\x0dC\x9f\x92- \xf0\x01=\x0b\x22!\xec=\xb4\xb4W\xe8\xdd\xb3\xd2\xc3|\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6\x12\xd7\xe9\xdd\x9bO\xa8\xaeZ/\xf7y\xd9\xceg\xcbx,\xf1x\x8eb\x98\x7fH(\xd9r\xec\xc8\x81\xc0\x07\xf4#\x88\x0c\xb2\x87W{\xd0\xbb7\x9f6\xae*w3\xf72,5B_\x08\x97\xc3\x8a\xd0\xb7\xa3\xf6\x22\x08|@?\xa4z\xf7\x8e\xf5\xee\xcdE+\xd7\xcb\x8d\xa1\xafj\x18\xf9I\xec\x9d\x04\x04>\xa0\xa7\x81O\xef\xde\x8c\xe2\x8c\xd7'\x89]B\x19\x96\x95\xb5s\x5c\xab\xf7\xa0b\xb7\xf7\xca\xe7\xb1\xe7h\x82\xc0\x07t3\x8c\x84\xa1\xc6\x87f\x8d\x9e/\xeb\x9c\xb2\x9e\xab\x0as\xa3U?\xc0X\xf7\xef\xa4b\xb7q\x1c\xfe\x07\x04>\xa0c\x86\xa9\x0fx\xcd3s\xa0\xde\xcb\xd2eXNZT\xcc:\x04\xcf\xaa\x99\xbb\xc7f\xee\x82\xc0\x07tO\xf2\xfc=\xcd3S\xd8\xabZ/7h\xcd\xb9q\x0df\xee\x1a\xe6\x07\x81\x0f\xe8\x98\xe1\x03??_\xc0Z\xae\xeb\xa6\xaa\x0c\xcbA\xdb\xda8\x0e\xe1\x17\x15\xbb=\x89u\x1b\x01\x81\x0fh\xbb\xd8\x03\xf5P \x99h\xa1\x99\xdav\x90\xa5{\xefZ[\xee&N \xa9:\x9f\xef\xd0\xf2k \xf0\x01\xdd\x90\xfa\xc0\x16\xf8fSd\xe92,\xfb-/w3\xca\xd2C\xbb\xe1\xb9\x8d\x1df\x10\xf8\x80\xf6\x1b\x0a|\xf3\x17\xd7\xcbM\x95a9\x8d\xa5PZ\xeb\xce\xf9|)\xce\xf1\x04\x81\x0f\xe8\x80\x87z\xf8\xce\x15[\x9eI\xd5Pm\xd1\x85'Q\xbe\x06B\xa0\xbboh7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcb=T^\xe3B\xd3L'Nf\xd8J\xecr\xd4\xa22,u\x84\xf3\x10\xef\x0e\xed\x86\x89&\xdb\xea3\x82\xc0\x07t\xc7C\xf5\xe1|\x98O\x17\xf6\xaa\xca\xb0,}\xbd\xdcY\xc5Y\xc4\xe19\xe9\xd5\x03\x81\x0f\xe8h8\xc9\x04\xbe\xb9\x0a\xbda\xa9\x89\x1a\x87],u\x13B\x9e^=X\xac74\x01\xb0@o\x97\xdb n\xc3x\x19\xca\xb48\x7f\xafy\x80\x0em\xf7,\xb1\xcb\xa5\xde1@\xe0\x03\x96*N\xca\x98h\x89\xb9\x19W\x5c\xbf\xaf\x89\x80\x87\x18\xd2\x05h\xb9X\x86%\xb5^\xeei\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0Uy\x9eW\xad\x97{d\xb2\x03 \xf0\x01t7\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01tX\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa3\xf2<\x0f\xcb\xd2=M\xec\x12\xca\xb0\x1cj)@\xe0\x03\xe8\xae\xaa07\xd2D\x80\xc0\x07\xd0Qy\x9e\xefe\xd5eX&Z\x0a\x10\xf8\x00\xbaK\xef\x1e \xf0\x01\xf4U\x9e\xe7E\x96.\xc3r\xd0\xc5\xf5r\x01\x81\x0f\x80\xec{\xeb\xe5\xa6\xca\xac\x842,&j\x00\x02\x1f@\x87\x15Y\xba\x0c\xcb\xbe2,\x80\xc0\x07\xd0Qq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x80\xc0\x07\xd0]E\xc5\xf5V\xd4\x00\x04>\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x1c)\xc3\x02\x08|\x00\xdd\x0d{a\xbd\xdc\xd4D\x8c0Q\xa3\xd0R\x80\xc0\x07\xd0]a\xa865Q\xe3P\x19\x16@\xe0\x03\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x96\xaa6\xdc\x8e\xbd\xa4@\xc2\x1b\x9a\x00`e\xaa\xc2\x9c2,\xdf\x1f\xee\x86\xe5\xc5v\xdcBX\xbe=\xef\xf1\xedr\x9bh!\x10\xf8\x00\xda\x18^v\x13\xbb\x84\xf5r\x8f\xb5\xd4\xf7\xf9\xf0\x81\x9fo\x0b|\x90fH\x17`5\xc6\x15\xd7+\xc3rO\x08~\xe0\xe7\x86tA\xe0\x03h\x97<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ci\xa9O\xb9x\xe0\xe7CM\x03\x02\x1f@\x9b\xc2^\xe8\x8d*\x12\xbb\x842,z\xf7\x9a\x05>\xa0\x82s\xf8\x80e\x84\x9cavs\x92}\xd8n\xff\x1dz\xb8\xbe\xbc\x86\xe7\xa9\x85\xb0\x97*\xc3R\x98\xa8\xd18\xf0\xedh\x1a\x10\xf8\x80\xd5;~ \xe4\x0c\xe3u\xeb\x12|\xc3\xe4\x82\xa7\x89].\xcb\xb0\xa7\x0cK\xf3\xc0\x07T0\xa4\x0b,+\xf0\xddgo\xcd\xda\xa1*\xcc\x8d\xbcT\x00\x81\x0f\xe8[\xe0\xdb\x8c\xbd^\xbdW>\xcf\x10nSC\x8f\xa7\xd6\xcb\xadt\x91h\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xd5\x03W\x8f\xd6\xa4\x19\xf4\xee\xcd\xfe:\xba\xd0\x0a \xf0\x01\xed6^\xd7\xa0\x93\xe7y\x91\xa5\xcb\xb0<\x17f\x00\x81\x0f\xe8\x83\x87z\xb86\xca@\xd4\xdb\xd0\x17\xcb\xb0\xa4\xca\xac\x84\x9e\xcf\xc2\xcb\x03\x10\xf8\x80\xce\x8b=X\x0f\xad\x94\xd0\xe7\xc0\x13\x82n\xaa\x0c\x8b\xf5r\x01\x81\x0f\xe8]\xf8\xb9\xcff\x1f{\xf9\xe2D\x82'\x89]\xce\xcb\xb07\xf6\xb2h\xf4\xc5!\x7f`\x9bh\x1d\x10\xf8\x80v|X\x87\xc9\x1b\x97\x0f\x5c]\xf4\xf0)W='+j\x00\x02\x1f\xd0K\x0f\x85\xa0^\xf5\xf2\xc5\xe7\x92*\xc3r\xa2W\x0a\x10\xf8\x80^\x8aC\x98\x0f\xf6\xf2\xc5I\x0e]\x0f{\xd6\xcb\x05\x04>`\xed=\x14\x866{\x12\x84\xf6\xb3t\x19\x96CeX\x00\x81\x0f\xe8\xb5\x8a^\xbegy\x9e\x0f\xba\xfa\xdc\xe2c\x7f\x96\xd8%<o\xeb\xe5\x02\x02\x1f\xb0\x16R=y\xe3\x0e?\xaf\xaa0W(\xc3\x02\x08|\xc0Z\x883v\x1f\xaa\xcb\xb7\x93\xe7y\xe7\x86vc\x19\x96\xdd\xc4.\xa7\xca\xb0\x00\x02\x1f\xb0nF\x89\xeb\x8a\x0e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0z\x89\x13\x17\x0e\x1e\xb8:\xacN1\xee\xcas\x89=\x92[\x89]\x8e\xca\xe7{\xe6\xa8\x03\x02\x1f\xb0\x8e\xa1\xaf(/\xce\x1f\xb8\xba\x13C\xbb\xca\xb0\x00\x02\x1f@\xb5\xbd\x18\x8a\xee\xf3^\x19\xa8\xb6[\xfe\xf8C\xd8K\xad\x97{h\xa2\x06 \xf0\x01k-\x0e\xed\x8e\x12\xbb\x8c\xdb\xfa\xd8\xe3y\x86O\x13\xbb\x5c\xc6^L\x00\x81\x0fX\xfb\xd0\x17f\xed>\x7f\xe0\xea6\x9f\xfbV\x15FG\x8e. \xf0\x01|\x12\xfa\xc2ynGw~\x14\x86y\xdf)\x7f\xde\xca\xd0\x94\xe7y\x18\x8aN\xad\x97{j\xbd\x5c\xa0\x0d\xde\xd0\x04@\xcbB\xdf(N\x82\x18\x94\xdb\xa8\xe53[\xab\xca\xb0\x8c\x1cQ@\xe0\x03\xb8?\xf4\xed\xb5\xfd1\x96\xa1\xb4\xc8\xd2\xeb\xe5>\xb7^.\xd0\x16\x86t\x01\x9a\x87\xbd\xd0\x03\x99*\xb3\x12\x86\xa2\x0b-\x05\x08|\x00\xdd\x15\x86rSeX\xf6\x95a\x01\x04>\x80\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c@\xe0\x03\xe8\xb6\xa2\xe2z+j\x00\x02\x1f@W\xe5y>\xca\xd2eXN\x94a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\xa5\xca\xb0\x1c(\xc3\x02\x08|\x00\x1d\x15\xd7\xcbM\xf5\xde]f\xd5E\x98\x01\x04>\x80\x16\xab*\xc3R(\xc3\x02\x08|\x00\x1d\x15\xcb\xb0\xec&v9U\x86\x05\x10\xf8\x00\xba\xadj\xa8\xb6\xd0D\x80\xc0\x07\xd0Q\xb1\x0c\xcbVb\x97#eX\x00\x81\x0f\xa0}!n\x5cng\xe5\xb6]\xb1_(\xc3\x92\xea\xdd\x0b\xeb\xe5*\xc3\x02\x08|\x00-\x0b{\xa3\xecfY\xb4\xd0k7)\xff\x9f\x0alE\x96\x9e\xa8qh\xa2\x06 \xf0\x01\xb4+\xec\x85\x1e\xbd\x17w~\x14\xc2\xdc{\xe5\xcf'\xb1\xec\xca\xdd}\xc3\xff\x9f&n\xee\xb2\x0c{\x85V\x05\xba\xe2\x0dM\x00\xacA\xd8\x0b\xc3\xb3\x93\x07\xae\x0eK\xa5\xbd,\xf79\xc8>\xe9\xb5\x1bW\xdc\xe4\xfe\x94\x8fc/\xde\xf6E\xb9\xbd\xde;xV\xe7g\xce\x19\x04\x04>\x80\xfb\x85\x90\xb4Q\xb1\xcf\xb3\x10\xe4\xcaP\xf6O\xb2\xf4z\xb9\xa1\x0c\xcb\xf14\x0f\x22\xfc^,\xf3r\xdf\xe3\xd9\xa9\x19\x1a\xef}L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb0:\x08\x08|\x00}\x11\x02\xda\xa0F\xe8\x0b\xd7\xff\x83\x8a}F\xb3<\x902`\xddN\x18\x09\x8fikN\xcf\xef\xbe\xb0\xb8;e\x80\x0c\xab\x86\xbc\x1e\x02k\xf5>\x86\x9f9\xaf\x11\x04>\x80\x95\x08\xe7\xdb\x85\xd9\xb9\xd9\xcd\xac\xdb\xdd\x19n\xea\xf9<z\xc4\xc2m\xc4\x9e\xbe\xe3\xacf\xcf\xde\x12mf\x9f^3\xb8\xf6c\xbc'@\x9e\xde\xb3\xdb$\x86\xc3c\xafN\x10\xf8\x00\xe6\x19\xfaBP\xdb\x8bAk|O\xa8\xa9\xf2\xff\xca\xed?\xcf\xf1\xf1\x84\x9e\xb0a\x0c\xa2Oz\xdc\xf4\xf7\x85\xc5\xaa\x927\xc0\x9c\x99\xa5\x0b\xac[\xf0\x9b\x94\xdb\xa0\xfcg\x98\xa4q\xd5\xe0W\x7f\xb0\xdc\xfeq\xac\xe1\xf7h\x8e\x8fgT^\xbc\xbbF\x87 \xf4\xf8\x0d\x0d\xfd\x82\xc0\x07\xb0\x8c\xe0Wd7\xe7\xf5\x1d5\xfc\xd5\xdb\x1a~\xdbs|,\xa1\xb7\xeb\x9d\x86\x01\xb4\x8b\xc2\xca$\xc2\x1e\x08|\x00K\x0d}\xafb\x0f\xdb\x7f\x98\x22\xf4}\xa3\x0c}\xc5\xbcz\xfb\xca\xc71./\x86=\x0e}\xcfc[\x03\x02\x1f\xc0r\xc5\xd57\xfe\xca\x94\xbf\x1eJ\xb9\x84\x09\x18\xc5\x9cB_\x98\xf9\x1az\x0e\xcf{\xd6\xcc\xef\x94\xcf\xcd2t \xf0\x01\xac$\xec\x85\xde\xb9Y\xc3Z(\xe52\xb70\x13'\x97\x0c\xb3\xfbg\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A-5[\xf7$\xbb\xa9KWe\xae\xe5E\xe2Ps\x08}G\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdcT\xcf\x5c\x08z\xa38\xa3\xf7\x9d\x8a\xe0\xb7\x90\x12#\x1d\x9e\xc1{\x1e\xc3\xde\x99W\x1a\x08|\x00\xab\x14BZj\xe5\x8d\xe2v6i\x18\x92\x8c\xc1\xef\xed\xec\xd3\xbdn\xe7\x8b\x0c6\x1d\x9c\xc1+\xec\x81\xc0\x07\xb0z\xb1\xf8rj\xc5\x8d\xd3\xfb\xce;\x8b5\xfcF\xe5?\x1fg\x9f\xd4\xf1[x\x01\xe1\x0e\xcd\xe0=\xc9\xd4\xd8\x03\x81\x0f\xa0%\xaaBZQ\x11\xc0.B\x1d\xbfr{\xb4\xac\x09\x09\x1d\x98\xc1\x1bj\xec\xed\x09{ \xf0\x01\xac\x5c,\xc3\xb2U\x11\x5c&m|\xec-\x9e\xc1\xfb\xae\x1a{\xd0n\xd6\xd2\x05\xd6)\xecU\xad\xe1\x1a\x86L\xf7[\xf8\xb8\xc3c\xbe\xed9\x0b=}E\xb9}\xb5\xdc\xfev\x0b\x1e\xde;\xca\xae\x80\xc0\x07\xd0&!\xcc\xa5&j\x1c\xb6tH2\x0c\xe5\xee\xb4\xec1\x85p\xbc\xd7\xd6\xdeP@\xe0\x03\xd6P,\xc3\xf2,\xb1\xcbe\x5c_\xb7\x8d\xb6[\xf6xnk\xec\x99\x89\x0b\x1d\xe1\x1c>`]\x8c+\xaeo\xf3\xd2_\x1b-z,a\xd2\xc8\xb6\xb0\x07\xdd\xa2\x87\x0f\xe8\xbdX\x86%5$z\xda\xd6\x15!\xe2coS\xd8Sv\x05:H\x0f\x1f\xb0\x0e\xc6\x15\xd7\xb7\xb9w\xefQK\x1e\xc7\x91\xb0\x07\xdd\xa5\x87\x0f\xe8\xb5<\xcf\xab\xd6\xcb}\xde\xf2\xe1\xc9\x10\xb0Nb\xf0\x0b\xdb\xd6*\xc2\x9e\xb2+ \xf0\x01\xb45\xec\x85\x80T$v\xb9\xaa\xb8~\xe5\xe2,\xd8I\xc5s\xfc\xee\x02\x1f\x82\xb2+\xd0\x03\x86t\x81>\xab\xbd^n\x87-r\x06\xaf\xb0\x07=\xa1\x87\x0f\xe8\xa5<\xcfC\x10z\x92\xd8%\x94a9\xec\xc1S]D\xe0Sv\x05zF\x0f\x1f\xd0WUan\xd4\x93\xe79\xef\xc0'\xec\x81\xc0\x07\xd0~y\x9e\xefe\xe92,'=Z!b\x9e\x81/\x94]\x19\x08{ \xf0\x01\xb4=\xecU\xad\x97\x1b\xec\xf7\xe8)\xcfk\xd6\xae\x1a{ \xf0\x01tFU\x19\x96\x832\xd4\x5c\xf4$\xdc\x0e\xe7tS\xa1\xec\xca\xb6\xb0\x07\x02\x1f@\x17\x02\xd0 K\xf7\xde\x85\xf3\xd3\x0e{\xf4\x94\xe71\x9c\xfb\x5c\x8d=\xe8?\xb3t\x81>)\xb2t\x19\x96\xfd\x9e\xf5b\x0df\xfc}eW`M\xe8\xe1\x03z!\x0eo\xa6\xca\xb0\x9c\xf60\xdcL\xdb\xc3w%\xec\xc1z\xd1\xc3\x07\xf4E\xd5Pm\xd1\xc3\xe7\xbc3e\xd8Sv\x05\xd6\x8c\x1e>\xa0\xf3\xf2<\x1fe\xe9\xd9\xaaG=*\xc3r\xfb\x9c\xa7\xe9\xdd\xbb\x14\xf6`=\xe9\xe1\x03\xba\x1e|\xaa\xca\xb0\xb4~\xbd\xdc)5\x0d|\xca\xae\xc0\x1a\xd3\xc3\x07t]\x98\x95\x9b\x9a\xa8q\xd8\x972,\xaf\x194\xd8\xf7D\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Yb\x97\xb0^n\xd1\xd3\xa7?\xac\xb9\xdf\x91\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\xef\xf7\xf8\xb9\xd7\x19\xd2=\x10\xf6\x80@\x0f\x1f\xd0I\xb1\x0cKj\x96j(\xc3r\xdc\xd3\xe7>\xc8\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07t^U\x98Y\xd7\xde\xbd0Ie\xd4\xd7\xb0\x0b\x08|\xc0\x9a\xc8\xf3\xbcj\xbd\xdc\xe7=/=\xb2\x9d\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0ka/\x94a)\x12\xbb\xf4\xb5\x0c\xcb]\xc3{~v.\xec\x01\x0f\xd1\xc3\x07tM\x08s\xa9\xf3\xd7\x8a5(?2x \xec)\xbb\x02\xdcK\x0f\x1f\xd0\x19qu\x89\xa7\x89]B\x19\x96\xc3\x9e\xb7A\xe8\xe1\xbc;\x9c}$\xec\x01U\xf4\xf0\x01]R\x15\xe6Fk\xd0\x06w\xcf\xdfSc\x0f\xa8E\x0f\x1f\xd0\x09y\x9e\xefe\xd5eX&k\xd0\x14\xc3x\xf9\xae\xb0\x07\xd4\xa5\x87\x0f\xe8\x0a\xbd{7B\x0f\x9f\x1a{\x80\xc0\x07\xf4K\x9e\xe7E\x96.\xc3r\xd0\xd3\xf5r\xef\xb3\xbfF\xcf\x15\x98\x13C\xba@\xdb\xc3\xde K\x17Q\x0eeX\x0e\xd7\xa5=\x84=@\xe0\x03\xfa\xa8\xc8\xd2eX\xf6\xcdP\x05\x10\xf8\x80\x8e\x8a\xeb\xe5>I\xecr\xee\x5c6\x00\x81\x0f\xe8\xb6\xa2\xe2\xfa}M\x04 \xf0\x01\x1d\x95\xe7\xf9(K\x97a9Z\x932,\x00\x02\x1f\xd0\xcb\xb0\x17V\x93HM\xc4X\x87\xf5r\x01\x04>\xa0\xd7\xc2Pmj\xa2\xc6\xa1\xd9\xaa\x00\x02\x1f\xd0Q\xb1\x0c\xcb\xb3\xc4.\x97\xd9\x1a\x95a\x01\x10\xf8\x80>\xaa\x0as\xca\xb0\x00\x08|@W\xc52,\xbb\x89]\xc2z\xb9\xc7Z\x0a@\xe0\x03\xbak\x5cq\xbd2,\x00\x02\x1f\xd0Uy\x9e\x870\x97Z/7\x94a9\xd3R\x00\x02\x1f\xd0\xcd\xb0\x17\xca\xb0\x14\x89]B\x19\x16\xbd{\x00\x02\x1f\xd0a!\xec\xa5\xca\xb0\x14&j\x00\x08|@G\xe5y\xbe]^<M\xecrY\x86=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\xe5y\xbe\x97\xa5\xd7\xcb=\xb5^.\x80\xc0\x07t\x9b\xde=\x00\x81\x0f\xe8\xab<\xcf\x8b,]\x86\xe5\xc0z\xb9\x00\x02\x1f\xd0\xdd\xb07\xc8\xd2eVB\x19\x16\x135\x00\x04>\xa0\xc3\x8a,]\x86\xc5z\xb9\x00\x02\x1f\xd0Uq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x00\x02\x1f\xd0]E\xc5\xf5V\xd4\x00\x10\xf8\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x9c(\xc3\x02 \xf0\x01\xdd\x0d{\xd6\xcb\x05\x10\xf8\x80\x9e\x0ba.U\x86\xe5P\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x16\x00\x81\x0f\xe8\xb4\xaa0W(\xc3\x02 \xf0\x01\x1d\x15\xcb\xb0\xec&v9U\x86\x05@\xe0\x03\xba\xad\xaawom&j\x94\xe1wRn\xd7\x1d\xdd&^\xca\xf4\xe0opTng\xaf\xbd\xb6\x8f\xe3\x17S\x81\x0f`\xca7\xd7\x10\xe6\xb6\x12\xbb\x1c]__\x9fi)`\x09\xefG\xe3\xf2\xe2\xc5=\xefIa\x04\xe2\xc3\xb8\xbe\xb7\xc0\x07\xd0\xf0\xcdU\x19\x16\xa0-\xefG\xe1\xbd\xe8I\xc5n\xcf\xfa\xdc\xd3'\xf0\x01\x8b\x12\xde`S\xeb\xe5\x1e\x9a\xa8\x01,I\xdd/\x97\x85\xc0\x07P\xff\xdb\xf4\xa0\xbcx\x9a\xd8\xe5\xb2\x0c{\x85\x96\x02\x96\xf0~4\xac\xf8\xf2y\xd7\x8e\xc0\x07P\xdf\xb8\xe2\xfa\x91&\x02\x10\xf8\x80\xee~\x9b\xde\xab\xf8\x96|j\xbd\xdcN*4\x01t\xd7\x1b\x9a\x00\x98\xb3\xaa2,\xa35n\x9b6\xcdH\x0e\x93j\xb6j\xee\xfb\x5cH\xa7\xc3.\x1a\xec{%\xf0\x01T\x883\xe16+\x82\xc3\xc5\xba\xb6O\xf9\xdc\xf7[t\xac\x8ek\x06\xbe\xf36=n\x98\xe2\xef\xee\xa2|\xbd\x9ff\xf5\xce\xcf\xeb\xed\x12\x8f\x86t\x81y\x05\x88\xd0c\xb4_\xf1\xcd\xb9\xd0R\xad8V\xe18\xed\xd6\xdc}\xa4\xc5\xe8\x81QV\xdd{w.\xf0\x01\xd4\xfbf\x9c\x9a\x09\xb7\xaf\x0cK+\xc2\xde\xa0A\xf0>P\x18\x9b>\x88#\x0b\xc3r;}`\x97\xa3p}\x9f\xdf\xa3\x0c\xe9\x02\xf3\x08\x11\xe1\x8d4U\xd4\xf4\xdcz\xb9\xad1\xce\xea\x95\xa88W:\x87\x9e\x85\xbe\xf0\xe5eX\xbe_m\x97\x97a\x0b_~&\xe5v\xb6\x0e_F\x05>`\x1e\xaa\x82\x81s\xc0\xda\x11\xcc\xc3q\xa8[gl\xa4\xc5\xe8q\xf0[\xbb\x9ekC\xba\xc0\xac!bT\x11\x22N\xcc\xf0l\xc5q\xda\xce\xea\x0f\xe5\xbek(\x17\x04>\x80\xdb\x10a\xbd\xdc\xee\x18g\xf5\x86rC\x9d\xc4C\xcd\x05\x02\x1f\xc0\xad\x10\xe6ReX\x0e\xd7\xb9\x0cK\x8b\x82y\x08\xe5uJ\xb0\x84\x80>\xd2b \xf0\x01\xdc\x86\x88Ay\xf1,\xb1\xcbe\xd6\xe3\x12\x07\x1d:N\xdb\x15\xc7\xe9\xaeB@\x07\x81\x0f\xe0\xae\xaa0W(\xc3\xd2\x0a\xe3\x9a\xfb\x19\xca\x85\x1e3K\x17h,\x96a\xd9\xad\x08\x0fc-\xb5\xf2\xe3\x14\x02\x5c\xdd\xa1\xdc=-\x06+\xf9;\x1dd7%b\xb2ENp\x13\xf8\x80iT\xf6\xeei\xa2V\x84\xf2\xa75w\x1f\xe9\x8d]\xd9q\x0a\x13\x9f\xc2\xb1\xba\xad\x0d\x17\xfe\xbfS\x11\xce\xcf\xeel\x13\xc3\xf0\x9d:\xd6{\xf1x\x0f\xee;\xce\xe5>\x0f\x1d\xe7\xe3Y\xffF\x05>\xa0\xe9\x9b\xd6(K\xf7\x1a\x1d)\xc3\xd2\x8a\x0f\x96q\xcd\xddC\xd9\x9cc\xad\xb6\xd4\xe3\x13>\xecG\xf1\xc3\x7f\xab\xe1\xafo\xc4\xa0\xb0s\xe7\xf6\xc2\xea\x11\xe3y\xf4\xaa\xc7/\x0a\x1f\xd6\xdc\xfd\xcb\x8bz\xed\xc4\xd7\xf0EVofy\xe5\xe3\xa8\xfb\xbc\xca\xdb\xc9\x17\xf4\xe5+\x1c\xef'3\x1c\xe7\x17\xe5\xed\xdc.\xfd6U\xf8s\x0e\x1f\xd0\xf4M\xf8\xb0\xa2\xf7A\x19\x96\xd5+\xb2\xf4\xec\xe9\xbb\xc7k\xa4\xb9\x96\xf7e\xa9\xdcBo\xcd\xcb\xecf\x22\xcd\xd6\x9cnz'\x06\x82\x8b\x18.\xa6\x16\xbf\xac=\xaf\xb9\xfb8\xbe',\xc2\xb8f\xd8{\xde\xd6/,!\xd8\x97\xdb$\x06\xcd's\xb8\xc9\xf0zy\x11\x82p\x98y\xdf\xb4\xed\x05>\xa0i\x90H\xbd\x09\x1f\x1a\x1a\x5c\xf9\x87L\xe852\x94\xdb\xbe\xa0w\x11?\xac\xb7\x16xW!\xe4\x7f\x18\xcf\xdd\x9c%\xf4\x85/m\xe75v\x0d\xef\x05\xc7\x0bz\x0d\xef\xd6\xd8\xf5<k\xe9\xe9#q$\xe4,\xab\xbf\xb2M\x13\x1b\xf1\x0b\xc3\xb6\xc0\x07,\xe4\xdbjE\x90\xb8\xb4\xf6\xea\xca\x8fQ\x93\xa1\xdc\xe7\x86r\x97rL\x0ec\xd0\xdb\x5c\xe2\xdd>\x0d=K3\xf6\xbe\x8dj\xee\xb7\x13\x97\xec[\xc5k\xb8\x95_X\xee\x1c\xf3\x8d\x05\xde\xcdU\xd3Sg\x04>\xa0\xae\xaa7aC\xb9\xed8Fu>dB\x8dD\xe1|9V\xb5D\xddN\x83\xe0\xf4)qi\xbdwk\xee^\xc4/\x84\xcb|\x0d\xb7r\xf9\xbfX\xe4\xfc\xe9\x12\xee\xaa\xf1\x975\x81\x0f\xa8\xf3&6\xcc\xd2C\x13\xa7z\x8bV~\x8c\xea\x0e\x83\x05\x86r\x97g\x96\xbf\x8b\xf3\x19\xef{7\x06\x90iC_\xe8\xa9:\xad\xb1\xeb\xc6,\xe1r\x8a\xd7p+kF\xc6\xc7\xff\xac\xad\xaf+\x81\x0f\xa8\xfb\xad;\x19 4\xd1J?h\x9a\x0e\xe5N\xb4\xdar\xc4`}\x94\xd8%L\x9c9\xc9nz\xd3\xde.\xb7\xcf\x86\x99\xa2q\xdb\xbe\xfdw\xf9\xf3\xc7\xe5\xf6\xe5x[W\x0d\x1e\xc2\xb3\xb8\xda\xca\xb4F5\xefo\xa6\xa1\xdd\x06\xaf\xe1VN4\xaa1\xa1\xedS\xa1\xf5\xf6\x98\xdf9\xde\xb7\xc7\xf9\xedx\xddC\xc7\xfaj\x9a/\xd8\xca\xb2\x00Uod\xa1\x87`\xb3\x22@\x5ch\xa9\x95\x0ao\xfe\x86r\xdb}|\x9e\xbc\x16Z\xc2\xcf\x0e\xeb\x0eK\xc6\xbf\xb1\xb0\x1d\xc7p\x11\xc2U\xdd\xde\xa4\x10D\x86S\x06\xd6\x8b8\x01\xe1\x83\x1a\xbb\x87\xa1\xdd\xe3)\xdf\x0f\xc65_\xc3\xa3\x96\xbe\xdfT\xad+~+\xf4\xda\xee?\xf4\xa5\xeb\xceq\x9e\xdcy\x0f\x0e=\x87{w^C\xe3i\x1e\xa0\x1e>\xa0\xea[\xeb~\xc5\xb7m\x01b\xb5\xc7(\x1c\x9f\xba3\x01\xf7\x0c\xe5._\xec\x8d\xb9\x8a\xdbA\xb9\x0d\xca\x9f\x8d\xa6=\x07-\x1c\xc38A\xea\xed\xac~\xef\xdb\xf6\x8c\x8f\xff\xa4\xc6\xaeS\x0d\xed6\x18\xca=j\xf1\xa9#\xa3\x9aao\xd8\xb4\x87=<\xe7\xf0z\xc9nz\xffN\x04>`\x11\x0e+\xbeu[/w\xb5ao\xd0 p\x1f\xb4\xf1$\xf752\x8aAon\x7f318\x8cj\xee\xbe?\x87\xc7?\xf7\xa1\xdd\x06C\xb9\x97YK'\x86\xc5s\x9c\xabz\xf7\xaeb\xd8{5\xc3\xf1\xbe(\xb7\xbdi\xff\x8e\x05>\xe0\xa17\xb1\xd0#\x90*\x16z\xde\xc6\x13\xa7\xd7\xcc8\xab7\x0cv\xaed\xcej\xc5^\x9aW\x8b\xb8\xdd,}\x8e\xe0\xad\xbd\x19\xef\xe7U\x83\xdbh2k\xb7\xeek\xb8\xcd\xbd\xd3\xc3:\xcfs\xd5\x8f_\xe0\x03\x1eR\x15\xe6\x94aYm o2\x94;\xd2b\xbdV'\xcco,q\x15\x8eZC\xbb\xf1\xf1\xd4\x19\xcam{\xeft\x9dv]\xf9P\xb4\xc0\x07\xdc\xf7F<\xaa\x08\x13'fz\xae\xf4\xf8\x84\xde\xd7\xf7j\xeen(\xb7\xe7\xe2\x89\xfeu\xce\xb1\x1b\xce\xe1\xeeB\xb8\xacS.&9\xb4\xdb`(\xf7\xb4'\xbd\xd3+\xff\x1b\x14\xf8\x80\xfb\xde\x88\xab\xde`\xf5\xee\xad\xd6\xb8\xe6~\xa7\x86r\xd7F\x9d/`\xdb\xb3\xdeI\x1c\x96\x1c\xd5\x0d\x87\x89\xd5>\xea\xccj\xed\xd3Z\xcf\xdb\xab~\x00\x02\x1f\xd0\xf4\x8d\xf8@\x19\x96\x95\x06\xf2\x10\xe0\xea\xac\xc7\xda\xa7\x0fK\xaa\xd5\xe9Az4\x8f;\x8a=\xc6\x075v\xbdwh7\xf6P\xd7))3\xea\xd1{\xcd@\xe0\x03\xda\x14&\x06Y\xba\xf7.\xcc\x943Qcu\xc7\xa7\xee\x07eP\x08\xe6k\xa5\xce\x84\x80\x9dy\xddY\xec9\xae\xb3\x0a\xc7\xee=\xe7\x0e\xd6y\x0f9\xea\xd9\xea=\x85\xc0\x07\xb4\x892,\xed\x0d{MV\xd385\x83z\xbd\xac\xe8<\xcdQV\xafT\xcb\xf8\xce\xebxT#x\xb6\xb6\x04\xcb\x03\xea\xb4\xfdf\xf9\xdc\xc7\xab|\x90\x02\x1fp\xfbF\x1c\xbe\x85\xefV\x84\x88\xb1\x96Zi\x0f\x81\xa1\x5c\xda\x142/j\x06\xb3\x10v\x8a\x06\xcb\x8fu\xad@\xf8\xa4\xe6~OB\xe8K\x9c\xd7\xb8P\x96V\x03nU\xbd\x11\x17\x9ah\xa5a\xfci\xcd\xdd;q\xdeS\xf9\x9c&K\xb8\x9b\xb3\xb2-\xf6;z\xccC(\xd8\x8e[\xf8\xf70^5\xc8\xea-\xe1\xb5\xac\xd07\xae\xb9R\xc6~|\xecU5\xf7:7\xab<\x0c=\x97mpY\xf3\xb8\x84\xda\xa6\xc3\x10\x80\x97\xfd\x05Z\xe0\x03n\x87YR\xbdGG\xca\xb0\xac\xf4\x83\xbf\xee\x07\xc3I\x87\xce{\xdaqt?u\x9c\xf7b\xb0\x1b\xb6)\xd4\xd5\xf9\x92\x91\xdd\xac\xff\x9a\x0as\x1bY\xba\x90{\xd0\xe5Y\xe5\xe1q\xbf\xa8\xb9o8\xb6/\xe2\x04\xac\xb0\x1d/\xa3G\xd3\x90.\xf8\xa0\xa9\x1af\xb9\xca\x94aY\xf5\x07I\x9d\x0f\x7fC\xb9\x1d\xfd\xb2Un!\xa4\x7f7\x06\x86'\x1d\x0b{MW\xe1\xe8\xe5\xeb7\xf6\xd6\x9d6\xfc\xb5\xcdx\xcc/\xe2P\xefBK\xb7\x08|@Q\xf1\xcd\xfc\xd0D\x8d\x95\x85\x81\xf0!\xdad(\xd7q\xea\xc8\x97\xacxN\xdb\xab\xf8\x81\xbf\xdb\xf5\xe7\xd4`\x15\x8e\x87\xec\xf7`Vy\xf8{=\x9f\xe2\xf7n{?\xbfQ\xbe&\xce\xe2\x97\x80\xb9\x9f\xe7'\xf0\xc1z\x7f\xf0\x0c*\x02\xc5\xa5\xc2\xbd\xab\x0b\x05Y\xfd\xa1\xdc\xbe\x95\xb0\xe8\xf3q\x0d\xbd\xe5!\xd8<\xcb\xea\xad!\xdb\xb5/\x8f\xd3\x04\x9e\x93>L\x08\x8b_\xb8\x86S\xb6\xc1\xad\xad\xec\x93^\xbfb\x9e\xc1O\xe0\x83\xf5V\xf5&k(w\xb5\xc7\xa6N \xb8t\x9c\xba\x11\xe0\xe3D\x95\xf7z\x18\xf4\xee\x06\x9eQ\xc3_\xbb\xcczt*\xc2\x9d\xd0w:\xe3Mm\xc4/\x05\x17\xf1\x5c\xbf\x99\x99\xb4\x01\xeb\xfb\x01\x14\xde\x94R'\xce\x9f\xea5Z\xd9\xb1\xa93\xeb\xf1\x96\xa1\xdc\xf6\x1f\xcfpn\xd6d\xc6\xa0\x17z\x8d\xc2q>\xcb\xbe\xbf\xc8\xf2\xe4\xce\xbf\xc3\xb9\xb8[\xab|\xaea\x86m\xf9|\x8f\xb2\xea\x09\x1a\xb7\xce\xfa\xf6\xfa\xbd\x0d}1\xa8=\x9b\xf1\xe6>\x0e~\xf1=a4\xcb\x0cf\x81\x0f\xd6\xd7\xb8\xe2z\xbdF\xab\x09\x07M\x86r\x9fwu\xf6t\xf9\xb8sa/)\xf4\x10\x1d\xc7@4\xa9y_\xafZ\xf2|\x9f4\xf8\x95\xb0\x12\xc7^\x1f\xbf\x5c\x86\xd3ab\xb1\xe5\xc3l\xf6\xf34C\x90\x9f\x84S\x02\xa6\x1d\xfe\x16\xf8`=CE\xd5z\xb9\xcf\xbbV\x0b\xabG\x8e\xb3\xfaC\xb9\x85\xe6j}xo\x12\xf6\xaeb8\x18wq\x02C|\xbe\xd3\x04\xb7\x8fg\xa8\xf6q)\xc0\xf8\x9c\xf6\xe2\x88\xca\xa8a\x18~]x\x1d\x85r.\xd94\xa1\xcf9|\xb0\x9e\x1fBE\xc5\x87\x8e \xb1\xba ^\xb7>\x9d\xa1\xdc\xfe\x84\xf7\x8f\xbfd\x95\xdb \xf4\x0au8\xf8\x1cf\xd3\x95\x94\x09m4\xee\xf3\x0b!\xf4\xd2\x96[\x08|\x8f\xcb\xed ~a\x9b\xd6\x8b8\xc4+\xf0\x01\x95o\xca\xd6\xcbm_\xd8\x1b4\x08\xda\x07\x0aa\xb7\xfex\x8ej\x86\xf7\xf0\x05\xeb\xed\xb0\x22H\x97\xff\xeeb\x00\x99\xa5\xf7j'~\xe1\xe9\xb5\x10\xe6c\xa8\x0f\x7f\xef_,\xb7\xa3\xac\xdez\xc4\xaf\x1b\xc7\xf7\x0c\x81\x0f\xb8\xf7M\xb9\xea\xfc\x9aP\x86\xe5PK\xad\xc48\xab\xd7\x1bt\xaeTN'\xd4=F\xc3\xae\x87\xf7\x86\xe7\x9d&\xdb\xaci\x88\xe9x\xf8;\x8b\xbd~\xe19\x1f4\x0c~\x1bY\xc3\x91\x18\x81\x0f\xd6KU\x98\x1bi\xa2\x95|`\x867\xee\xdaC\xb9Z\xac\xf5\xc73\xf4v\xd5\x19\xda<\xe8\xc9\xb9\xb2u\x86\xaeOj\x86\x98\xb5\xab\x0c\x10zv\xe3\x97\xb8\x10\xfc\x8e\x1a\xfc\xea\x93&\x01Y\xe0\x83\xf5\xfa\x10J\x85\x8a\x13\xc3\x84+9.\xa1\xd7\xb5n\xe9\x86\x03\x93i:\xa1\xce\xf9UW}\xe8\xa9\xady\xdei\x189\xd8\xab\x19f\xb6\xe6Uw\xae\xa3\xc1/|\xa1{g\xce\xaf5\x81\x0f\xd6(TT\xad\x97\x1b(\xc3\xb2\x1a\xe3\x9a\xfb\x19\xca\xed\x8e\xe1\x1c\x8f{\xdb\xbf\xac\xd4yM\x8e\xee\xbc\xc7\xd4\x19\xb6|\xb6\xe8ue[\x1e\xfc\xc6\x0dB\x9f\xc0\x07|*\xcc\xa5\x86\x98\x0e\xfaX\x12\xa1\x03\x1f\x98\xe1\xc3\xb2N\xa1\xdc\xabl\xf6\xc5\xe9Y\xde\x97\xab:\xc3\xb9\x93\x05\xdc\xfd\xce\x92\x9fn\x08&\x95C\xb9\xb7#\x07qRJ\xd1\xe0\xb6\xd7V\x0c}u\x86\xc1k\x07c\x81\x0f\xfa\xff\x014\xc8\xd2\xbdw\xb7\xb5\xbfX\xeeqi2\x94[\x08\xe4\x9dQ\xf7\x03\xf8b\x01\xaf\xa7e\xbe~\xeb\xac\xeaq\xf5\xfa{O\x9c\x14Vg\xad\xd9\xadx\x1f\xeb\xacN8\xae]\xd0[\xe0\x83\xf5x\xd3H\xbd)\xec+\xc3\xb2\xf4\xb0\xd7\xa4@\xed\xa9\x99\xd3\xfd\xb3\x80s1\x07K|\xfd\x0e\xcb\x8b\xa73|Q\x19\xd5\xbc\xab\xa7\xf1\xbe\xbcF\xe6@\xe0\x83~\x07\x8b\xf0f\xf9\xa4\x22L\x8c\xb5\xd4JBx\x9da\xbf\xab\xcc\xac\xdc\xbe\xfem\xce;\xa0\xed-\xe9q\xd7-\xc1r\xfe\xd0\x17\x95\x18d\x9e\xd7\xbc\xcbq\xbc\xcfuu9\xaf\x1b\x12\xf8\xa0\xdf\xaaz\x86\x0aM\xb4\x92\x10\xfe\xb4\xe6\xee\xfb\x86r{k0\xc7\xd7\xd4 \x9b\xad\xe8q\x13\xe3\x9a_V\xaa\xbe\xa8\x145\xc3\xcc\xe6\x9a\xbfO\xcd-\xec\x0a|\xd0\xdf`\x11\xdepS\xe7\xd8\x1c)\xc3\xb2\xf4c\xd2\xa4@\xed\x89\xde\xd7^\x9bg\x8f\xdcxI\xaf\xdf\xf0\x98wk\xecZY>(\x9eF2\xaay\xd7k9\xb4\x1b\xdf/6\xe6u{\x02\x1f\xf4\xf7\x8d\x22\xd5\xbbg\xbd\xdc\xd5\xa8\xbb\xd6\xa8\xa1\xdc\xee\xaa{\xde\xd5\xde\x9c\xfe\xd6\xc3\xdf\xf1N\xc3\xf7\x86i\xeegP3X^f5'\x81\xc5/\x9cu\x87v\x8f\xd7ph\xb7\xcek\xe4T\xe0\x83\xf5\xb6_\xf1\xcd\xf0\xd0P\xe1\xd2Cx\x93\xb5FG&\xd2tS<n\xb5\x86*g];6\xf6\xe2?k\xf8k\xd3\xce\xe6\x1dg\xf5z\x9b\x9a\xbev\x8b\x9a\xed\xb5\x91\xb5\xb4TK\x08\xa2\xf3\xee\x81\x8c\xe1\xb6\xce\x97\xf2\x89\xc0\x07\xeb\x1b,\x06\x15\x1f\x02\x97\x0a\xf8.\xff\x03!k6\x94{\xac\xd5:\xad\xee\xf1+\xa6-\xa7\x12{\xf6^,\xe9\xf5[g5\x8d\xe0y\xd3\xd3D\x1a\x0e\xed\xee\xc6/Nm\x13\x1e\xd3\x87\xe5c\x9b\xcc#\xf8\xc5\xf7\x8b\xd0\x8euF\x03j\x87\xe07\xfc]B\xefT\xbd\x01XQc5\xc7\xa4\xee\xb98\x83\xf0\xc1\xd1\xd2\xe7\xb1oi\xb7Z\xc2\x90f\x9d\x899\xe15\x11B\xc2\xa8n\xc8\x8f\x81\xa2N\x0d\xbcy\x85\xbd\x10H\xdf\xab\xb1\xeb\xd4\xa7\x89\x84\x90X\xde\xcf\xf3\x9am\x16f\xed\x0eZ\xd6\x03~\x1bBwb\xf0;\x8d\x7f\xf3\xc7M\x1fg\xc3\xe3{\xdad\xa4F\xe0\x83\x1e\x89o\x16;\x15o\x10z\x8f\x96{L\xea\x9e\xe8~k\xab\xc5O\xe7\x91#Z+\xc0\x5c\xc4\x0f\xfd:\xbdb!\xf4}\x90\x0a\x091t\x85\xbf\xedQ\xc5\xeb\xe3*\x9b\xe3I\xfe\x0d{\xa6g=\x0d\xa1\x88\xc1i\xb3F{\x1dg\xf5\x96\xaf[\xc6\xdf\xf7\xa3{\xfe\xbew\xe2\xf6\x22\x1e\xd7I\xdc\xce\xeek\xa3\x06\xc7w\xa6/\xef\x02\x1f\xf4K\xd5\x9b\xb3\xde\xbd\xf6\x1d\x13\xfa)|x\xbfl\xb0\xff\xdd\x900Mx\xbb\x9d\xe8\xf3\xc1\x1c\x9fCQ3\x80\xcc|\x1aB\x08B\xf1\x9c\xc4\x0f\xeb\xb4U\x18fnIA\xf2\xbd\x9a\xc7\xf5Y\x0cw\xb7?\xbf\xcc\xea\x0d\xd9>\xe4\xa0io\xbbs\xf8\xa0'\xe2y6\xa97\x90\xe7\x86\xe3VbC\x13\xac\x9f8\xd4\xf6\xee\x92^7!\xec\x0dk\x86\xaeZ\xe7\x0c6\xa8\x179\xb7\x19\xe5\x0dg\xed\x16\x0b(^\xbd\x88\xc0\xf7\x90Y\xc2\xde\xd14\xe7a\x0b|\xd0\x8f\xb0W5\xa3K\x19\x16X~\xe8\x0b=PG\x0b\xbe\x9b\xdb\xb0W\xf7\xcb\x5c\xe5\xb0|\xc3\xa5\xff\xe6=\xa3\xbc\x88\xcf\xa9N \x1e\xb7\xe0}ww\xc9w\x1b\xc2\xdeT\x01[\xe0\x83~8\xac\xe8\x11(\x94\xf9\x80\x95\x84\xbe\xf0\xe1\xfc|A7\x1f\xce\x0f\x1b\xbc\x16\xf6\xaa\xea\xb2\xd59\x0fs\x9c\xd5\xeba\x9c\xfb9\xc1\x0dg\xed\xee\xc4\xd9\xca\xab2X\xf2\xfd\xbd;m\xd8\x13\xf8\xa0\x07\xe2\x09\xbf\xa9\xfan\x97-9\xd7\x05\xd65\xf4\x85\xd3-\xbe\x9c\xd5\xeb\xb9\xaa#\x9c\xff\xf5Ny\xbb\xc3)\xbe\xc8mW\xbc\x9f\x84@Q\xa7\xd7ja\xc5\xc1c\x88<\xa9\xb9\xfb\xb3iK\xdb\xcc\xe1q\x86\xa0\xfd8[|/n\xb8\xfd\xc7\xb3\xbe\x8f\x0b|\xd0}Uo\x02#M\x04+\x0f}!\xc4\x0c\xca\xed`\x86\xe0w\x1e\x83\xde \xb1\xec^\xd5\xd0\xee \x11\xf6\x06Y\xcdU2\xb2\x9bQ\x83\x8b\x056\xd9\xa8A;\x8dWx\x5c/b\xaf\xdbg\xb3\x9bs6O\xe7t\xd3\xe1\xb9?\x8fAo4\x8f\xb6\xce\xcb\x1b\xf1\x97\x08\xd1\xc6W\xde\x9ad\x0d\x96)\x8aN\xaf\xde\xffh\xb8\x8a\xc7\x1bK~\xa4f\xe5\x85!\x97\xa1#\x0b\xed\x12\xffv\xc3\xdf\xe6v\xe2=\xe72\x06\xb8\xf0\xbetlu\x9c\xce\x1c\xdbG\xf1\xb8\xde\x1e\xdfG5>WBP|\x15\x8f\xf5d\x11\x13\xec\x94e\x81n\xd3\xbb\x07\x1d\x14{\xfc\xd4\xc4\xec\xe7\xb1\xfd^pk\xd3\xe32\xa4\x0b\xdd\xfd\x16Yd\xe9\xa9\xfd\x07z\x04\x00\x10\xf8\xa0\xbbao\x90\xa5\x8b(\x87\xf3?L\xd4\x00@\xe0\x83\x0e+\xb2t\xd9\x84}eX\x00\x10\xf8\xa0\xa3b\x05\xfcT\x19\x96\xf3\xc4\x0c>\x00\x04>\xa0\x03\x8a\x8a\xeb\xad\x97\x0b\x80\xc0\x07]\x15\x8b\xa2\xa6\xa6\xf7\x1f\xc5\xf5(\x01@\xe0\x83\x0e\x86\xbdP\xcb)5\x11\xc3z\xb9\x00\x08|\xd0qa\xa865Q\xe3P\x19\x16\x00\x04>\xe8\xa8X\x86\xe5Yb\x97P\x91_\x19\x16\x00\x04>\xe8\xb0\xaa0\xa7\x0c\x0b\x00\x02\x1ftU,\xc3\xb2\x9b\xd8\xe54.\xd3\x04\x00\x02\x1ft\xd4\xb8\xe2zeX\x00\x10\xf8\xa0\xab\xf2<\x0fa.\xb5^n(\xc3r\xa6\xa5\x00\x10\xf8\xa0\x9ba/\x94a)\x12\xbb\x842,z\xf7\x00\x10\xf8\xa0\xc3B\xd8K\x95a)L\xd4\x00@\xe0\x83\x8e\xca\xf3|\xbb\xbcx\x9a\xd8\xe5\xb2\x0c{\xca\xb0\x00 \xf0A\x87U\x85\xb9\x91&\x02@\xe0\x83\x8e\xca\xf3|/K\xaf\x97{j\xbd\x5c\x00\x04>\xe86\xbd{\x00\x08|\xd0Wy\x9e\x17Y\xba\x0c\xcb\x81\xf5r\x01\x10\xf8\xa0\xbbao\x90\xa5\xcb\xac\x842,&j\x00 \xf0A\x87\x15Y\xba\x0c\x8b\xf5r\x01\x10\xf8\xa0\xab\xe2z\xb9O\x12\xbb\x9c\x97ao\xac\xa5\x00\x10\xf8\xa0\xbb\x8a\x8a\xeb\xad\xa8\x01\x80\xc0\x07]\x95\xe7\xf9(K\x97a9R\x86\x05\x00\x81\x0f\xba\x1b\xf6\xea\xac\x97[h)\x00\x04>\xe8\xae0T\x9b*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT,\xc3\xf2,\xb1\xcbe\xa6\x0c\x0b\x00\x02\x1ftZU\x98+\x94a\x01@\xe0\x83\x8e\x8aeXv\x13\xbb\x9c*\xc3\x02\x80\xc0\x07\xddV\xd5\xbb\xa7\x0c\x0b\x00\x02\x1ftU\x9e\xe7!\xccm%v\x09eX\xce\xb4\x14\x00\x02\x1ft3\xec\xd5)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xd4z\xb9\x87&j\x00 \xf0AG\xc52,O\x13\xbb\x5c\x96a\xaf\xd0R\x00\x08|\xd0]\xe3\x8a\xebG\x9a\x08\x00\x81\x0f:*\xcf\xf3\xbd,\xbd^\xee\xa9\xf5r\x01\x10\xf8\xa0\xdb\xaa\xca\xb0\x8c4\x11\x00\x02\x1ftT\x9e\xe7E\x96^/\xf7\xb9\xf5r\x01\x10\xf8\xa0\xbba/\x94aI\x95Y\x09eX\x0a-\x05\x80\xc0\x07\xdd\x15\x86rSeX\xf6\x95a\x01@\xe0\x83\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c\x00\x04>\xe8\xb6\xa2\xe2z+j\x00 \xf0AW\xe5y>\xca\xd2eXN\x94a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850\x97*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT\x5c/\xf7Yb\x97\xcb\xac\xba\x083\x00\x08|\xd0bUa\xaeP\x86\x05\x00\x81\x0f:*\x96a\xd9M\xecr\xaa\x0c\x0b\x00\x02\x1ft[U\xef\x9e\x89\x1a\x00\xac\xc4\x1b\x9a\x00f\x17\xcb\xb0l%v9\xba\xbe\xbe>\xd3R\x00\xdd\xf3\xf5\xc7_\x18\xc7\x7f\x16_z\xf9\xad\x8b.>\x07=|0\xa3?\xf9\xc7?\x10\xbe8\xa5z\xf7\x94a\x01\xe8\xb6\xe3\xecf\xe5\xa4\x97e\xf8\x9b\x94\xdbH\xe0\x835\xf3\xf8\xdb?6\xc8\xd2\xeb\xe5\x1e\x9a\xa8\x01\xd0]_z\xf9\xad\x10\xf8.\xe3\x7fCQ\xfd\x17e\xe8\xbb(\xb7\xa2\xdc\x1e\x09|\xd0s\x7f\xee\x0f\xdf\xc8~\xf5\x83_\xf9|b\x97\xcb2\xec\x15Z\x0a\xa0\xf3^\x1f\xc9\x09\xf5VC\x19\xae\xef\x86!\xdfr\xdb\x16\xf8\xa0\xa7>\xf7\xf2\x07\xabv1\x94\x0b\xd0\x0f\xe3\xec\xe6\x14\x9d\xfb\x84\xe1\xdeo\x94\xa1\xef\xac\xad\xc3\xbd\x02\x1fL\xe9\xa7\xfe\xe0G\xb3\xd3_\x9e\xa4v\x09eX\x8e\xb5\x14@\xf7}\xe9\xe5\xb7\xc2\xa99U\xef\xe9a\xf2^\x18\xee}\x15\x87{\x07\x02\x1ft\xdc\xef\xfe\xfb\xefT\xed2\xd2J\x00\xbdRw\xa5\xa4p^w\x18\xee\x0d\x93<\x8e\xcbm(\xf0A\x07\xfd\xb5\xef<\xca\xbey\xf6\xeb\xa9]\x9e[/\x17\xa0_\xbe\xf4\xf2[\xa1\xbc\xd6i\xc3_\x0b\x05\xf9?\x8c\x93<F\xab\x9a\xe4!\xf0AC?\xfe\xc7\x9f\xc9\xbe9I\x86\xbdp\x8eG\xa1\xa5\x00zi<\xe5\xef\x85I\x1e/\xca\xed\x22N\xf2\x18\x08|\xd0bo\xfe\xce\x8fg\xdf\xfe\xf6\xb7S\xbbX/\x17\xa0\xa7\xbe\xf4\xf2[!\xf0]\xcep\x13a\xb8\xf7nM\xbf=\x81\x0fZ\xe6/\xfc\xdf\x1f\xca~\xf5\x97\xfeEj\x97\xf32\xec\x1dj)\x80^\x1b\xcf\xe9vBM\xbf\x0f\xe2p\xef\xfe\x22\x87{\x05>h\xe0\xc7.+\xffd\x94a\x01\xe8\xbfy\x7f\xb1\x0f\xc3\xbd\xefe\x0b\xac\xe9'\xf0AM\x7f\xf5\xf7~\xa2\xaa\x0c\xcb\xc9\xf5\xf5\xf5DK\x01\xf4[,\xd1r\xb4\xa0\x9b\xbf\xad\xe97\xd7%\xdc\xdep\xd8\xa0Z\x98\xa8\xf1\xea\x9b\xff3\xb9\xcf?\xfa\xb9\xbf\xf9\x8bm\x98z\x0f\xc0R\x9c\xc5p\xb6(a\xb8w\xa7\xfc\x5c\x09\xbd\x89a\x1b\x97A\xf3b\xda\x1b\xcb\xaf\xaf\xaf\x1d2\x886\xbe\xf2\xd6$\xfe\x91}\x9fP\x86\xe5W\xfe\xd9/?\xf8{\xc5\xde\xcfd?s\xf6\xdf5 \x00\x8bt\x14\x83\xdf\xa4\xe9/\x1a\xd2\x85{\xfc\xe8\xef}\xee{\xff\x0e\xeb\xe5\xa6\xca\xb0\xec\xbc\xf5\xd3\xd9\xdf\xf8\xadok4\x00\x16-\xf4(~x\xbb\x84[\x93I\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f\xbe\xf1S\xd9\x0f}\xe7'?\xfe\xff\x9f\xf9\xed\x1fI\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xf1\x12n\xd9MM\xbf\xc3:5\xfd\x0c\xe9\xc2\x1d\x9f\xff\xd9_\xf8\x9d\x9f\xf8\xf6\xc6\x9f\xbe\xfd\xff\xe7\xfe\xe2\xcb\xec_\xfd\xe2?\x7fp\xff\x9f\xff\xd9\xb7\xb3_\xf8O\xbf\xad\xe1\x00X\xb5\x93\xecf\xb8\xf7\xde\xf5~M\xda\x80\xe8\xcd\xad\xaf\x8e\x7f\x22\xfb$\xec\x05\x9f\xf9\xed\xab\xe4\xef\xfcl\xf6\xbf4\x1c\x00m\x10\x96p\xdb\xfd\xfa\xe3/\x84\xa2\xd0\xb7\x93<\xbe\xb7\x08\x80\x1e>\x88a/\xbbg\xb6\xd5\xc6\x8f\xfcQ\xf6'~\xf47\xb3\x7f\xf9\xe1\xc9\xa7~\xe7\xef\xff\xdc\xdf\xca\xfe\xee\xaf\xff7\x8d\x07@\x1b\x85\x1e\x8b\xd0\xdbw\x18\xd6\x00\x16\xf8\x10\xf6\x1e\x08{w\xfd\xd9\xcf\xfeA\xf6\x7f~\xff\xdfd\xff\xfa\xdf\xfd\xdb\x8f\xff\xbf\xb99\xc8\xfe\xe9\xc6\x8f8w\x0f\x80.\x04\xbf}\x81\x0fa\xafA\x1d\xa5\xbf\xf3\x93\xdf\xca~\xe9?\x9ee\xef|\xf1\xcf+\xc3\x02@\x9b}\xdf\xd0\xae\xc0\x87\xb0\xd7\xd0;?\xf8\xcd\xec\xaf\x7f\xe775 \x00mt\xef\xe4\x0d\x81\x0fao\x0a\x7f\xef\x0f\x7f=\xfb\xe9\xdf\xff\xaf\x1a\x12\x806\x08\xc3\xb6\xe1s\xed\xf0\xa1\xd58\x04>\x84=\xa1\x0f\x80n:\xcfn\x86m\x8f\xef\xce\xc8\xbd\x8f\xb2,\x08{S\xfa/?\xf0\xb9\xec\xa73\x81\x0f\x80\xa5k\xbc\xc4\x9a\xc0\x87\xb07\x85\xb7\xaf\xffG\xf6\xf3\xbf\xf7k\x1a\x15\x80e\x09\xc3\xb6\xb7\x930.\x9a\xfe\xb2\xc0\x87\xb0\xd7\xd0\xe7\xf3\xff\xfd\xcb?\xff\xbb\xbf\xf65\xad\x0a\xb0\xd6\x86\xe5\xf6l\x09\xf7s\x1aC\xdex\x96\x1b\x11\xf8\x10\xf6\x9a9\xfa\xd5\xb3\xe7#\xad\x0a\xb0\xde\xbe\xfe\xf8\x0b\xfb\x0b\xbe\x8b0l\xfbq\xd1\xe4y\xdc\x98I\x1b\x08{\x0d\xfe\xf8~\xe3\xfck\xc2\x1e\x80\xb07(/^.\xe0\xa6\xef]\x16m\x1e\xf4\xf0!\xec\x09{\x0043\xef\xde\xbd0l{\xf8z\xed\xbcy\xd2\xc3\x87\xb0'\xec\x01P\xd3\xd7\x1f\x7f\xe1QyqQn\x1b3\xde\xd4\xedZ\xb7\xc54\x930\x9a\xd2\xc3\x87\xb0'\xec\x01P\xdf\xde\x8ca/\x0c\xdb\x16Y\x8d\xday\x02\x1f\x08{\x00\xacF1\xe5\xef\x85%\xcf\x0e\x9b\xd4\xce\x13\xf8@\xd8\x03`\xc9\xbe\xfe\xf8\x0b\xc3\xf2b\xb3\xc1\xaf\xccT;O\xe0\x03a\x0f\x80\xe5\xab\xfb\xd9\xf0\xf1\x92g\xb3\xd6\xce\x9b'\x936\x10\xf6\x84=\x00*\xd4,\xc52\xd7\xday\xf3\xa4\x87\x0faO\xd8\x03\xa0\xdaC\x9f\x0fa\x12\xc68\x06\xbdWm}\xf0\x02\x1f\xc2\x9e\xb0\x07@\xb5\xd7k\xef\xcde\xc93\x81\x0f\x84=\x00Z\xe0\xeb\x8f\xbf\x10>#B)\x96\xa5\xd6\xce\x13\xf8\x10\xf6\x84=\x00\x96gXn\xeff\x0bX\xf2lYL\xda@\xd8\x03\x80\x9e\xfb\x8c&@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\xd3\xaa\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\xb0\xb6\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x16\x05\xbe2\x9c=\x12\xf6\x00\x00z\x1c\xf8J\x87eH\x1b\x08{\x00\x00=\x0c|\xb1w/\x04\xb4\xd1\x1cnK\xd8\x03\x00h[\xe0+\xed\xc7\xcb\x99\xc2\x95\xb0\x07\x00\xd0\xde\xc0w\x1b\xac6\xcb\xd0\xb6'\xec\x01\x00\xf4(\xf0\xc5\x80\xb7yO\xf8\x13\xf6\x00\x00\xfa\x10\xf8\xee\x09x\xbbM&o\x08{\x00\x00-\x0e|1\xd8\xed\xd6\x08\x81\xc2\x1e\x00@\x17\x03_\x22\xd8U\x06/a\x0f\x00\xa0\xdb\x81/9yC\xd8\x03\x00\xe8@\xe0+C[\x08Y\x9b\x89]\xf6\x85=\x00\x80\x0e\x07\xbe\xacz\xd8v\xe7\xf5\xc9\x1b\xc2\x1e\x00@G\x02_\x0cr;5v\xdd\x17\xf6\x00\x00:\x18\xf8\xb2\x07\x86k\xef1\x12\xf6\x00\x00V'\xbf\xbe\xbe\x9e\xea\x17\xcb\x00\xf7\xaa\xbc\xd8\xa8\xb9\xfby\xb9m\x09{\x00\x00\xcb7U\x0f_\x9c\xac\xb1\xd1\xe0W\x84=\x00\x80.\x05\xbel\x8a\xa5\xd3\x84=\x00\x80\xd5h<\xa4\x1b'k\xbc\x14\xf6\x00\x00\xbaa\x9a\x1e\xbeB\xd8\x03\x00\xe8\x8eF=|on}\xf5Qyq\x915;\x7fO\xd8\x03\x00X\xa1\xa6=|{\xc2\x1e\x00@\xbf\x03\xdf\xfe\x92\x1e\x97\xb0\x07\x00\xb0\xec\xc0\xf7\xe6\xd6W\xb7\xb3\xf9\x95W\x11\xf6\x00\x00\xda\x16\xf8\xb2\xe5\xf4\xee\x09{\x00\x00\xab\x08|q\xb2\xc6\xde2\x1ePy_C\x87\x05\x00`~j\xcd\xd2\x8d+k\xbcX\xe2\xe3\xba,\xb7\xc3r\x1b\xff\xc6\xf9\xd7^9L\x00\x00\x8b\x0f|\x17\xe5\xc5\xe6\x0a\x1e\xdfU\xb9\x1d\x87\xf0W\x06\xbf3\x87\x0b\x00`\x01\x81/\x0e\xb1~\xd8\x82\xc7z\x9e\xdd\xf4\xfa\x1d\xeb\xf5\x03\x00\xa8\xaf\xce9|\xa3\x96<\xd60C8\x0c+_\x94!t\x1cg\x0d\x03\x00P!\xd9\xc3\x17'k|\xb7\xc5\x8f?\xf4\xfa\x15\xbfq\xfe\xb5c\x87\x12\x00\xe0~U=|\xa3\x96\x87\xbdCa\x0f\x00 \xed\x8d\x8a\xeb\xf7[\xf6xM\xe2\x00\x00\x98W\xe0\x8b\x9356[\xf28M\xd8\x00\x00\x98w\xe0\xcb\xda1\x9c{\x94\xe9\xcd\x03\x00\x98\xc9\xbd\x936\xde\xdc\xfa\xea\xa0\xbcx\xb9\xa2\xc7\xa4\xe82\x00\xc0\x1c=\xd4\xc37Z\xc1c9\x8a!o\xe2\xb0\x00\x00\xf4'\xf0\xe9\xcd\x03\x00Xv\xe0{s\xeb\xab{\xd9\xe2'k\x9c\xc4\x90\xa7\xa4\x0a\x00\xc0\xb2\x03_\xb6\xb8\xde\xbd\xd0\x9b7\x8eA\xefB\xd3\x03\x00,\xc7\xf7M\xdaX\xd0d\x8d\xd3L\x81d\x00\x80\x95y\xbd\x87o4\xa7\xdb\x0d\x05\x92\xc71\xe8]hf\x00\x80\xfe\x04\xbe\xd0\x9b\x17\x86l\xc7\x9a\x16\x00\xa0e\x81\xef\xcd\xad\xaf\x86\xb07\xcdd\x0d\xbdy\x00\x00]\x08|Y\xf3\xde=\xcb\x9d\x01\x00t\xc0\xc7\x936\x1aL\xd6\x08\xbdya\xf2\x85\xe5\xce\x00\x00:\xe2\xb6\x87o\xbfb?\xbdy\x00\x00\x1d\x0f|\xa3\x07\xae\xb7\xdc\x19\x00@\xd7\x03_\x9c\xac\xb1q\xe7g\x96;\x03\x00\xe8S\xe0\xcb>\xe9\xdd\xd3\x9b\x07\x00\xd0\xd3\xc0\x17\x02\xde\x9e\xde<\x00\x80~\xfa\xff\x02\x0c\x00\x1eE:\x8bH\x8b\x05S\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x0f\xb6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x12t\x00\x00\x12t\x01\xdef\x1fx\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x0f3IDATx\x9c\xe5\x9by|TU\x96\xc7\xbf\xaf*KU\xaa\x0a\xb2\x90\xa4*\x8e\xa3\x01\x82c\x98\x86@l\xb7@\xc0fQ\x16\xa1\xb5\x05\x5c\x80\x06\x84\xb0((\xc8\x08\xd2\x0dQTF\xa4\x05Y\x14\x08K\x90\xc8\x9anA\x09\x81\x04ADtD\x0d\x04\xdbFIB@gHU\xc0\x90\xa4\xeaU\xaa*\xa9zw\xfe\xa8\xa4$\x92\x04*\x9b\xd3\xce\xef\xf3y\x9fT\xee;\xf7\x9c\xdf9\xef\xdc\xe5\xdd{\x9fD\x03HMM\x0du:\x9d\xcf\x02#%IJhH\xe6\x9f\x05B\x88|\xe0}`\xcb\xd2\xa5K/\xfc\xfc\xbe\xf4\xf3\x82y\xf3\xe6\xbd\x08<\x13\x1e\x1e\x1e\x1a\x1f\x1fO||<\x00\x9d;wn[\xa6\xad\x8c\xe2\xe2b\x00\xce\x9c9\xc3\x993g\xb8r\xe5J\x05\xb0r\xe9\xd2\xa5/^-\xe7\x0b@jjj\xa8\xcb\xe5Z\x01L\x188p \x03\x06\x0chO\xbem\x8e\xc3\x87\x0f\xf3\xe1\x87\x1f\x02l\x09\x0e\x0e\x9e\xfd\xd2K/U\x00\x04\xd4\x09\xb8\x5c\xae\x15\x1a\x8dfBJJ\x0a&\x93\xe9\x97\xe2\xd9f\x180`\x00\xf1\xf1\xf1\xa4\xa5\xa5Mp:\x9d\x00\x13\x01\xd4\x00\xf3\xe7\xcf\x7f\x16\x98?c\xc6\x8c_\xa5\xf3u0\x18\x0c\xdcv\xdbm\x9c8q\x22\xa1O\x9f>\x95\xc7\x8f\x1f\xff\x5c\x9d\x9a\x9a\x1a\xeav\xbb\xf7\x0c\x1a4H\xd3\xa3G\x8f_\x9ac\x9b\xc3`0 I\x12\xe7\xce\x9d\xbb{\xe0\xc0\x81\xebU.\x97k\x82V\xab\x0dMJJjW\x22\xa2\xa4\x18w\xe6rDIq\xbb\xda\x05HJJB\xab\xd5\x86\xba\x5c\xae\x09\x01\xc0\xc8\xee\xdd\xbb\xa3\xd1h\xda\xc5\xb8p\xc8\xb8?X\x8f\xe7\xabC\x00\xb8\xbf\xc8E}\xc7 \x02FLE\xd2\xea\xdb\x85\x83F\xa3\xa1{\xf7\xee\xe4\xe5\xe5\x8d\x0c\x00\xfa\xd7\x0dum\x8d\x9a\x83[q\x1f\xdb\x03N{\xbdr\xf7\x89\x1c\xdc\xa7\x8f\x13\x90\xfc\x10\x81\x0f\x8co\x17.\xf1\xf1\xf1\xe4\xe5\xe5\xf5W\x01m\xfe\xf4=\x85\xa7\xa9z\xf1\x09\xaa\xb3\xb7\xa2\xc86\x14\xb7B\xc0\xa0\xb1\xf5\xfe*\xb2\x8d\xea\xec\xadT\xbd\xf8\x04\x9e\xc2\xd3m\xca\x07~\xf29\x00 &&\xa6M\x8c(e\x16\x5c\x19K\xf1\x14\xe6\xfb\xca\x02z\xf6!\xe8\x0fO\xa1\x8a0\xe2\xcaJ'p\xc8x\xd4w\x0e\xa6\xfaoo\xe1>}\x1cq\xb9\x04\xc7\x9b\xcf\xa2\x8eK x\xdc<T\x11\xc66\xe1V\xe7s\x00\xb4~\x06\x88*\x19W\xd6\x16\xaa\x0fg\xfa\xca\xd47wE3z&\xean?\xcd\xac\x15\xb7\x02\x80*\xc2\x88&\xe5e<\x05\xf98w\xaf\xc6\xf3\xdfE(\xdf\x9e\xa4f\xc1\x18\x82\x06\x8c\x22x\xf8\x04\xa4\x90\xd6\xed\x1f\xeae@k\xc2uh7\xae\x0f\xd2\x11\x0e\x19\x00)D\x8f\xe6\xd1Y\x04\xdd;\xe4\x1aY\xe1Q\xea\xfd\xaf\xee\x96\x80\xee\xcf\x9b\xa8\xfe\xec\x00\xce\x9d\xab\xbc\x81\xcc\xddE\xf5'\xfb\x09\x1e1\x91\xe0A\xa3[\x9bn\xeb\x05\xc0\xfd\xddI\xec\x1b\x97 ~4\xfb\xca\x82\xef\x1f\x83f\xe4\xa4F\x9f\x9ep+\x0d\x96\x07\xdd;\x84\xc0\x84\xbe8\xdf\xdf\x8c+g\x17\xc2f\xc5\xb1m%\xce\x9c\xdd\xe8&/ \xe0\xdfz\xb7\x16\xed\x96\x07@\xb9l\xa6\xea\xdd7\xa9>y\xccW\x16x{ot)\x7fF\xd5\xa9\xe9Y\xa5\xe2i8\x00\xe0\xcd\x1c\xedc\xb3\x08\x1e4\x0a{\xda+\xd4|{\x12J/b}\xf5)\x82z'\x132\xf6YT\x91-\x9f\xb56;\x00\xc2n\xc3q`'\x8e\xbfm\xf4\x95\xa9\x22M\xe8\xa7-\x220\xfe\xc6\x9ePc\x19p5T\x9dL\x18\x16\xbcE\xcd\x99\x93\xc8\xeb\x16\xa3\x5c6\xe3\xfa\xe2(\xae/\x8e\xa2\xfd\xc3d\xb4C\x1eE\xd2\x19\x9a\xebF\xf3\x02\xe0<\xba\x0f\xf9\x9d\xe5\x08{m;\xd7\x19\xd0=2\x05\xed\xb0\xc7\xea\xc9\xd5\xfc#\xcf\xf7;\xb0{\xe25z\x9a\xca\x80\x9f#0\xbe7a\xab\xf6\xe2\xd8\xbf\x03\xfb_7 \xec6\xec\xbb\xd3\xa8\xda\xbf\x1d\xfd\x1f\xe7\xa0\xe9\xff`s\x5c\xf1/\x00\xd5\xff\xc8CN_\x8e\xfbB\x81\xaf,d\xf8c\xe8FM\xa9\xf7\x14\xaa\xb2v\xe0\xc8\xde\x81\xe7\x92\xb9^}\xcd}\xc3\xd1\x8dJA\x1d\xe5M]q\xe3\xfe\xfb\xa0\x1d\xf6\x18\x9a\xfe\xc3\xb1gn\xa0*k\x07\xc2&c]\xb3\x98\xaa\xac\x9d\xe8'\xce!\xa8\x81@7\x05\x95?\xc2\xe5\x8b\xa6Ss\xbe\x10!$\x02\xe3\x13\x09_\xb6\x0d\xfd\x849\xf5\x9c\xb7\xaeY\x8c-}\x05\xeeR\x0bBH\xf5.\xc7\x91\xfd\x94\xcd\x1d\x8b\xe3\xa3,\x00\x84\xb8f=\xe6\x86 \xe9\x0c\xe8'\xcc!|\xd96\x02\xe3\x13\x11B\xa2\xe6|!\xe5\x8b\xa6\xfb\xad\xcb\xaf\x0c\xa8#\x1c:\xefu4w\xf5\xbb\xe6\xbe\xbcs\x03UG\xf6S\xb7\xce\xa2\x8e4\xa1\x1f3\x99\xaa\xac]\xd4\x5c(@\xa5\xd3\xa3\xc82\x95\xab_F\x1d\x19\xd3\xec\x00\xf8\xc8\xc7v#l\xf1:\x9c'>\xa6b\xe9\xf3\xcd\xd2\xe1W\x06\x08\xe1\xbd\x1ar^\xb1\xdb\xb0\xef\xdb\x85\x10\x10|g?:>\xbd\x10Ig\xa0b\xf5\xcb\xb8/\x99\x89x#\x83\x887\xdeE\x15iB\x08\xa8X\xb5\x18!\x9a\xc5\xf9\x1ah\xee\xea\xe7\xe3\xe6/\xfc\x0b\x80\x22!\x94\x86\x9f\x9a\xe3\xf0~<\xb2\x8cP$\x02n\xe9F\xf0]\xfdp[,\x04\xdcr\x1bB\x91\xa8:\x92\x8d:\xcaD\xc7\x99\x8b\x10\x8a\xe4m\x22\x8d\xe8j\x0e\x9a\xe2\xd6\x14\x9a\xd5\x04\x1a\x82\xe3\xc4'\xbe\xfb\xf6\xc3\xd98N\x1cC\x15e\x22\xe2\xd5\xb7q~\xfe1\xe5+_A\x0a\xd1\xe3<q\xac\xc5\xa9\xef/\xb7\xa6\xe0g\x13\x90\x1a5Tw/\xe4w\xc3q\x97Zp[,\x84\xcdZ\x88J\xa7G\x1deB\x08\x09\xeb\xf6\xcd\xa8#cjS\xb6q]\xcdAs\xf5\xb5Z\x06\xb8K\xcd\x08!\xe1*\xf6\x8e\x12\x1e\xd9N\xd5\xe7\xc7\x08(.\xe4\xca\x9b\xaf\xa2\xbd;\x19\xd7\xdfO\x12\x10\x1b\xe7\xd5\xf5_\x9f\x5c\xa3\xa3\xba\xb8\x10\xc7\xe7\xde\x19e\xf0oz\xa3\xf9M\xafV\xe1\xd6\x14Z-\x00\xea\xc8\x18j,\xa5T\x9f+B{O2!w\xf7\xa5l\xc5\x12\x00t\x03\x87\x121\xfbOTn\xdb\x84uo&\xa1OL\xf4\xe9\x92?\xcc& \xcaD\xe5\xf6\xcd\xb8\xfe~\xea*\x8d\xe9\xb5\xf5\x16\xb4\x98[S\xf03\x00\x8d\xdf\xd3\xdc\xdd\x17\xc7\xd7^\x07\xb4w\xf5E{O2\xaaw\xd3\x91$\x90\x0f\x1d@\x0a1\x10:v\x12\xd6\xbd\x99\x5cY\xbf\xdaW\xafl\xf9\x92\xfa\x84\xa2M\xdeE\x13\xbb\x8c|(\x9b\xc0\xd8\xaet\xf8\xfd\xf5\xdf\x02\x9b;\xa2\xb4Z\x1f\xa0\x1f4\xd4\xd7\xd6\xcb\xd6\xaf\xa6l\xdd*\xdc\xa5\x16Lk\xd2\x09K\x99\x85|\xe8\x00\xa5/-\xc0#\xdb\x11B\xa2\xc3\xe3\x93\x08\x8c\xed\xe6\xd3\xa9\x8e2\x111\xfbO\xdc\x94\x9eI`\xe7\x9f\xca\xcb\xdfMG\x91\xe5\x16qk\x0a\xad6\x0c\xaatz\xa2\x16.A\x0a1\xa0\xd8\xec\xc8\xb9\x07\x09\x88\xed\x06\x02:\xfc~4\xa1OL\xc2y:\x1f\xdd\xc0\xa1\xe8\x06\x0e\xa5\x22c\x0b\x8a\xf0\xea\x0b\x8c\xedF\xcc\x9at\xf4\x83\x86\xa0\xc82\x8e\xfc|\xc2Sf!\x14\x09\xc5f\xc7v\xe8@\x8b\xb85\x05\xbf\x02\xa0\x08\x09\xa5\x89(\x07u\x89\xc3\xf4\xfaJTQ&\x14!\xe1**\xe2\xfb\xf1c\xa8|/\x13k\xeeA\x14!\x11\xf9\xdc\x0bD>\xf7\x02\xc1=z\xe1**\x22\xb8G/\x9c\xb5r\xf6\xcf>\xa1bO&\xe8\x0ctxh\x14\xda{\x92Q\x84\x84\xfd\xb3\xe3-\xe6\xd6\x18Z\xad\x13\xacCP\x978n^\xbb\x89+\x19[\xa8x\xef\xafxlv.\xaf}\xab\xf6\xae\x845\xf7 \xb6\xdc\x83\xb8\xce\x15\xf1/k7\x11\xdc\xa5+\x8a,\xf3\xe3\xda5\x98S\x17\xa2\xd2\xeb\x09}\xe8\x11\x00B\xeeMB\xfe\xf48U\xf9\xd7_$m\xa7y\xc0\x8du6*\xbd\x9eN\xd3\x9f\xe6\x96\x8c\x9d\x04u\xee\x8a\x10\x10\x10mD\xd3#\x81\xd2\xd7_\xa3*?\x9f\xa8\xb9/\x10\xdc\xa5+\x005\xa5\x16$\xbd\x1e!\xc0c\x93\x09\x1f?\x11\x80\xe0.q>\x9b\xaesE\xad\xc2\xed\xe7h\xf5\x0c\xb8\x1a\x81F#\xd1\xff1\x9f\xef\xa7N\xa1\xda\x5c\x8a\xdbf' \xda\x84G\x96)Y\xb4\x10}R\x1f\xaaN\xe7\xa3\xc82\x81F#\x86\xc1C\xb0\xe6\x1c\xa4\xeat>\x9a.]){\xe7\x1d\x9fM\xcfu:\xc2_|&\xd8\x18<\xb2L@\xb4wt\xf0\xd8\xecDN\x7f\x0a\x95\xce\x80\x10\x12\xb6\xe3\x9f\xd2a\xf0\x03\xc4n\xdbA\xec\xb6\x1dt\x18|\xbfw2Ux\x8e\x0b))\xd8\xf3O\xffd\xf3:O\xb7}F\x01?\x8c\xd4X,\x5c\x5c\xb8\x88\x1ff?G@\xb4\xd1W\xd7\xbct\x19*\x9d\x81[\xd3\xd2\x10BB\x9f\xd4\x87@\xa3\x11\x8f,S\x99\x93\x8b\x10\x12\xa5o\xbd\x8d\xb6g\x02]\xb6o\xbf\xcaf\xd3v\xdbi*|}\x99\x1a\x8b\x85\xcb[\xb6Ry0\x87@\xa3\x91\x7f]\xb1\x9c\x90\x84\x9e\x94\xbc\xf6:\x95\x07sp\xdbdnzy\x06\xa2V\x9f\xb3\xb0\x88\x8a\x839T\x1e\xcc\x01\xc0\xd0'\x89\xe8\xa7g\xf8\x82Rg\xf3\xf2\x96w\x88\x99\xff<\x81\xc6\x867J\xdae\x22\x84\x90\xbcW\x03\xf0\xc82\x97\xd3\xb7R4f,U\xa7\xbe&f\xde\xf3t\xdd\xb9\x8d\x90\x84\x9e\x00\x18\x92\x92|\xf5\xed\xa7NS\xf2\x9f\xcb@H\x94\xaeY\x8b\x22\xdb1\xf4\xe9\x03B\xf29\x0fP\xbaf\xad\xaf\x8eb\xb3S4f,%\xaf-\xa3\xc6b\xf1\x8b[Shq'\xe8\x91e\xcav\xbf\xc7\x95\xcc=H\x12D?=\x83\xf0Q\x0f_#g\xe8\x9b\xe4\xab_q \x97 \x93\xb7Y\xdc\x96\xbd\x07\xb5^\xcf\x0f\x0b^D\xdf\xc7\xdb\x1cj,\xa5\x98W\xadE>\xfe) \x11>\xeaa\x8c3\xa7Sq \x97\xcb\xe9[\xb1~\xfc\x19\xe1\xa3\x1e\x22b\xf4\xc3\xa8\xf5\xfaF\xb9\xdd\x08Z\x14\x80\xf2\xec\x5c,\xab\xd7!!\xae!\xd4\xa0\xb1h\xafs\x1d\x1f\xb8\x1fM\x5c\x17\xe4\x93_\xfb\xe4\xab-\xa5\x04\x19\xa3\xb9\xb8\xe4/T\x1c\xc8\xad\xad!a\xe8{/\xc6\x99\xde\xb5\xbe\xd0!\x83\x09\x1d2\x98K\x9b\xb7r%s\x0fe\x99{\x89\x18\xf5\x10Q\x93\xc6\xb5\xef\xcbPyv.\x977o\xa5\xdar\x89\x88\xd1\x0f\x135il\x93\x8e\x03\x5c\xda\xb4\x95js)\x005f\x0b5f\x0b\x81\xd1Q\x98W\xad\xc5Yp\xcewi\xe2\xba\xa0\xd2\xe9\xf1\xc82A\xa6hnZ0\xf7\x1a]Q\x93\xc6\x131\xfaa\xcav\xef\xa1l\xf7\x1e*\xb2s\x9a\xdd\x07\xf8\xb7/\xa0x\xd7\xb1/\xbe\xf2:a\xc3\xee\xe7\xd6I\xe3\x082\xdd\xd8\xee\xad\xfd\xd4i_\xfd\xf2\xfd\xde\x0eO\xad\xd7\xe3,(B\xd7\xab'\xf6\x93\xf9\xc4\xaey\x03\xb5AG\xd1\x1f\xa7\x01`\x9c9\xad\xd1\xc0\xaa\xf5z\xa2&\x8d#t\xc8 ~\x98\x9f\xea\xd3\xed/\xfc\x1c\x06\x95\xab.\xffB.\x84@\x08\x85\xc8I\xe3\xf8\xf7\xcf>$r\xd28\x82\xe3:\x13\xbb\xe6\x0dB\x87\x0eF\x08\x05\xb5AG\xe5\xc7\x9f\x22\x84B\xa01\x8a\x0e\xc9M\x1f\xdb\xa96[\xb8\xb4i+\x8e\x82B\x1f/\x7f\xe1W\x00nym1A\xc6(\x10\x0a\xe5Y\xd9\x14\x8d\x9fL\xe9\xc6-xl\xd7\x7f]U\xebC@(T\xec?P+/|;#5f\x0b\x08\x05M\x5cW\x9c\x85\x85 \x14t\xbd\x1a?\xb0\xe5\xb1\xc9\x94n\xdcB\xd1\xf8\xc9\x94ge\x83P\x082Fq\xcbk\x8b\xfdq\x07\xf0\xb3\x09t\xe8\xd7\x07]\xef\x04~\xdc\x99I\xd9\xaeL<V+\x976l\xa6b_6\xa693\xe9\xd0\xafo\xa3u;\x8dy\x04\xeb\xd1cT\x97\x94\xf0\xfd\xf3\x7f\x02\xa0\xa6\xc4L\xe9\x86t_\x00\x8a\xa7?\x83\xfd\xa4wQ%\xd0\x18\xdd\xa0\x1e\xeb\xc7\x9f`^\xbe\x9a\xea\xda\xa1Pm\xd0\x131f\x14\x9d\x1e\x1d\x85\xda\xe0\xff\x19\x02\xff\xe6\x01\xb5\x06\xa3\xa7L\xa4k\xc6fB\x87>\x80P\x14\x5c%%\x5c\x98\xfb\x02\xc5\xd3f\xe2((l\xb0\x9e\xaew\x82O\xdeq\xf6,\x8e\xb3g\x11B`\xcf;\x89\xe3l\x01BQ\xd0\xf5\xee\x89P\x94\xda\xdf\xf5\x8f(;\x0a\x0a)\x9e6\x93\x0bs_\xc0UR\x82P\x14B\x87>@\xd7\x8c\xcdDO\x99\xd8,\xe7\xa1\x05\xbb\xc3A\xa7. l\xf8\x10.\xa5mB\xce;\x85\xfc\xd5I\x0a\x1f\x9f@\xd8\x83C\x89\x99\xf3\xcc5\xa4\x82L\xd1 \x04\xda8\xef[\xa0.\xb1\x17\xd1)O\x22\xe7\x9d\xa2x\xea\xd3\xe8z\xf7\xf2\x0d5\xdan\xde\xc5S\x8fM\xa6d\xf9J\xca\xf7e\xfb\xf4\xe8\x13{\x11\x95\xf2$\xfa\xc4\x1b_4m\x0c->\x1f\xa0O\xec\x85~\xfd\x1a\xca\xf7\xed\xa74m\x13\xd5%f\xca?\xc8\xc2z\xe4(\x9d\x1e\x1fC\xf4\xd4\xc9>Y\xc7\xd9\x02\x10\x0a\x81%\xe6\xab\xe6\xaf\xde\xfe\xc0y\xf6\xac\xaf_P\x1b\xf4\x94\xae\xdf\xc8\x8f\xdbw\xf9\xde\x04\x83bLD\xa7<I\xd8\x83\xc3ZJ\xdb\x87V;!\x12\xf6\xe00:\xf4O\xe6\xf2\xf6\x9d\x94\xae\xdb\x88\xdbj\xc5\xb2n\x03W>\xd8G\xcc\xdc9t\xbc\xaf\x1f\x1e\xab\x15\xa1(\xc8_~\x89\xc7&\xe3\xba\xf8?\xc8_\xe5\xe1\xb1\xd9\x10\x8a\xc2\xc5e+\x00\x08\x8a1\xf2\xed\xb0\x91T\x97\xfc4\xe5\x8d\x9e6\x99\xc8\xc7\x1fEmh\xfeY\x80\x86\xe0w\x1f\xd0\x14\xd4\x06\x03\xc6\xa9S\xb8}\xff^:\xf6O\x06\xa1P}\xb1\x84\x0b\xb3\xe7rn\xf24<V\xab\xb7\xc76\x99\xf0X\xad\xe8\x13{\xa3K\xec\xe5Mw\xa1\xd4\xabS}\xb1\xc4Wv\xfb\xfe\xbd\x18\xa7Niu\xe7\xa1\x95\x03P\x87\xa0\x18\x13\xb7\xaeXF\x97\x8d\xeb|\xce\xc9_~\x89\xe3\xbb\xef@(DO\x9b\x02B!l\xc4p\x8c\xd3R\x08\x1b1\x1c\x84B\xe5\x91#\xde& \x14\xb4\xdd\xe2\xe8\xb2q\x1d\xb7\xaeXFPL\xdb\x1d\xe0n\xf5SbWC\x7fG\x22\xddvo\xe7\xca\xfb\x1fpq\xe9_\xf0\xd8l\x00\x9c\x9f5\x1b\xa1( \x04\x973\xb6aY\xbb\xde\xfb?\xde,\xbai\xde\x5c\xc2G\x8ehKj>\xb4i\x00\xea\x10>r\x04\x1d\x7fw\x9f\xd7\xd9\xb7\xd7\xe2\xb1V\x02P4\xf1\xc9zr\xc6\x19\xd3\x89\x1c\xf7D\x9b\xa4zch\x93&\xd0\x10\xd4\x06\x03\xc6\x19\xd3\x88\xcf9\x80\xfe\x8eD_\xaa#\x14\xf4w$\x12\x9fs\x00\xe3\x8ci\xed\xea<\xb4S\x06\x5c\x8d\xa0\x9bb\xe8\xbae3\xf2\x17_b~\xebmLO\xcd@\x7f\xe7o\xdb\x9b\x86\x0f\x01\x00f\xb3\xb9\xdd\xbf\x14\xd1\xdf\xf9[\xe2\xeeLoW\x9bW\xa3\xee\xa3*\x15\x80\xc3\xe1\xf8\xc5\x88\xfc\xd2Pi\xb5\xda\x0a\xb3\xd9|}\xc9_\x19j}\xbe\xa0R\xab\xd5G\xf3\xf2\xf2\xae'\xff\xabC\xad\xcfGU\xd1\xd1\xd1+\xcdf\xb3\xafM\xfc\x7f@qq1f\xb3\x19\x95J\xb5R5e\xca\x94\xa3Z\xad\xf6hFF\x06\xb5\xdf\xd3\xfd\xaa\xe1t:\xc9\xc8\xc8@\x08\xb1w\xc9\x92%\xf9*\x80\xe4\xe4\xe4\x89\x1a\x8d\xa6\x22--\xedW\x1d\x04\xa7\xd3IZZ\x1a\x0e\x87#_\xa3\xd1L\x84\xab\xf6\x9b\xbe\xf9\xe6\x9b\x84\xac\xac\xac\x8f\x9cNg\xe8\xf0\xe1\xc3IL\xf4\xef\xcc\xed\xffu\xe4\xe5\xe5\x91\x95\x95U\xe7\xfc}u\x9f\xce\xd6[L\xff\xe8\xa3\x8fn-((H?\x7f\xfe|\xff\xb0\xb00\x12\x13\x13\x89\x8d\x8d\x05\xfe\xb9>\x9ev:\x9d\x94\x94\x94\x00p\xfe\xfcy\xf2\xf2\xf2(//G\x08\xf1\xa6F\xa3y\xa9\xceyhd\xc7\xf1\xd0\xa1C\xfd\xcf\x9c9\xf3LEEE\x7f\x87\xc3\x11\xdaN\xbc\xdb\x0a\x17\x84\x10{\xf1~9~\xe1\xe77\xff\x17\xd7q\x00\x14\xc6\xb0\x7f\x82\x00\x00\x00\x00IEND\xaeB`\x82\x00\x008k\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x007\xf8IDATx\xda\xec\xddOllY~\x17\xf0[\x93\x8e\xf2\x8f\xc4o\xa4\xe1\x9fF\xc4~,\x185\x04\xd9#\x88h\x10#WGH\x84M\x9eGb\x93\x95\xab\x17,\xb2\x18\x9e{\xc5\xec\xde\xb5\xc4bv\xe3\xc7\x08\x09\x89\xc5+\xaf\x88X\xa4\xedeHP\xdb\x1a\x04\x1d\xfe\xa8\xed0R\x18\x02y6\x22L$4\x9a\xe7D\x84\x00I\xcc=\xed\xe3i\xf7\xeb\xaa{\xcf\xad\xbaUu\xef\xad\xcfG\xba\xaa\xf7\x5c\xd7\xf5\xe7\xdcr\xd5\xb7\xce\xb9\xe7w\x06\xb7\xb7\xb7\x19Pn\xe3+o\xed\x15\x17[7\xdf\xf8\xe0Hk\x00\xd05\x9f\xd1\x04\x90\xe4 n\x00 \xf0A\xdfl|\xe5\xad\xad\xe2b\xb7\xd86cO\x1f\x00\x08|\xd03\xf9\x83\x7f\xeb\xe5\x03\xa0s\x06\xce\xe1\x83\xe96\xbe\xf2\xd6\xa3\xe2\xe2*\xfc\xf3\xc1\x8f\x1f\xdf|\xe3\x83+\xad\x03@W\xe8\xe1\x83r{\xaf\x85\xbd \xd7,\x00\x08|\xd0\x1f\x93\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x08u\xc3\xe2bs\xd2U\xd9]\xcf\x1f\x00\x08|\xd0qe\x134r\xcd\x03@W\x98\xb4\x01\x13\xc4R,/+v{\xfb\xe6\x1b\x1f\x9ci-\x00\xdaN\x0f\x1fL6J\xd8G\x89\x16\x00\x04>\xe8\xb0\x940\xf7$\xf6\x04\x02\x80\xc0\x07]R\x84\xb8Q\xf6\xe9R,\xd3\x8c\xb4\x18\x00\x02\x1ft\xcf\xc1\x82\xf6\x05\x00\x81\x0fV-\x96b\xd9\xae\xf3+\xb1G\x10\x00\x04>\xe8\x88Y\xc2\x9b^>\x00ZMY\x16\x88\xe2\xea\x19\xdf\x9b\xf1\xd7\x95h\x01\xa0\xb5\xf4\xf0\xc1\xc7\xe6\xe9\xa9\x1bi>\x00\x04>h\xbfyB\xdb\xbe\xf5u\x01\x10\xf8\xa0\xc5\xe2\xc4\x8b\xcd9o\xc6\xb9|\x00\x08|\xd0b\xa3\x96\xdc\x06\x00\x08|\xd0\xb4\xb8Z\xc6n\x037\xb5\xa9D\x0b\x00\x02\x1f\xb4S\xde\xe0m\x09|\x00\xb4\x8e\xb2,\xac\xb58\xd1\xe2*K_J-\xc5\xe3\x9bo|p\xa5u\x01h\x0b=|\xac\xbbQ\xc3a/\xc85+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x84\xb2\xbdl\xfeR,\x13o:s.\x1f\x00\x02\x1f\xb4\xc2\x22C\x99\x9a|\x00\x08|\xb0J\xb1\x14\xcb\x93\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96i\x94h\x01`\xe5\xf4\xf0\xb1\x8e\xf6\x96\x14\xf6\x02\xe7\xf2\x01 \xf0\xc1\x0a,3\x84\x8d\x94h\x01@\xe0\x83%*\xc2\xd7\xb0\xb8\xd8^\xe6]fw=\x8a\x00 \xf0\xc1\x92\x8cVp\x9f\x86u\x01X)\x936X\x1b\xb1\x14\xcb\xcb\x15\xdd\xfd\xdb7\xdf\xf8\xe0\xccQ\x00`\x15\xf4\xf0\xb1NFkz\xdf\x00\x08| \xf0-\xc1~\xeca\x04\x00\x81\x0f\x16\xa1\x08[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6Nq\xb1\xdb\x82\x87\xb2\x19{\x1a\x01@\xe0\x83\x86\xb5\xa9,\x8a\xc0\x07\xc0\xd2)\xcbB\xaf\xc5U.\xbe\x97\xb8\xfb\xcd\xfd\xaf\xd5\xbc\x9b\xf0{\xaf\xb2\xf4s\x04\xbfx\xf3\x8d\x0f.\x1c\x1d\x00\x96E\x0f\x1f}7J\xd8\xe7\xba\xd8\xde)\xb6\xadb\x9b%\x88]\x14\x01n+\xde\xc6y\xc2\xfe\x0a1\x03 \xf0A\x83\xca\xc2\xd5ivW\x10y\xab\xd8\xc6\xc5\xf6j\x9e;\x8a\xb71,\xfe\xf9\xc5b;.\xd9u\xdf\xfa\xba\x00\x08|\xd0\x80\x22T\x855l_\x1ff\x0d\xc3\xaf\xcf\x8b\xedq\x11\xce\xf6\x16\xb1\xfaE\x18\xae-\xb6Q\xf1\xcf\xcf\x16\xdbav\xd7\x83\xf8\xba\x91#\x04\xc0\xb2\xbc\xa1\x09\xe8\xb1\x87\xbd{\x97\xc5v\x14z\xe1\x96u\xe7\xb1\xc70\x0f[\x9c\x9d\x1b\xb6\xdd\x07\x8f\xed\xc8!\x02`\x19\xf4\xf0\xd1KqU\x8b\x10\xae\xc2\xd0j\x18\xb6\xddYf\xd8\x9b\x10\xfe\xee\x87{\x1f\xc7\xc7\xf4(\xf6@\x02\xc0\xc2\xe9\xe1\xa3\xcf\xc2\xb0\xedU\x9b\x1eP|<\xa3x\x0e\x9f\xf3\xf8\x00\x10\xf8`\xce`\xd5\xe6\xc7\x17\x86{_9R\x00,\x83!]\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00@\xe0\x03\x80%\x19\x0c\x06\x8f\x8am\xa8%@\xe0\x03\xa0\x9fao\xa7\xb8\xb8\xd2\x12 \xf0\x01\xd0\xcf\xb0wP\x5c|Xl\x1bZ\x03\x16\xcbZ\xba\x00,;\xe8=*.\xc6\xc5\xf6\xe4\xfeg\xb7\xb7\xb7gZ\x06\x04>\x00\xfa\x11\xf6\xc2\x10\xeeI\xb1mj\x0dX\x1eC\xba\x00,+\xec\xdd\x0f\xe1\xbe\x1e\xf6\xce\xb5\x0e,\x96\x1e>\x00\x16\x1d\xf4>5\x84\x0b\x08|\x00\xf4'\xec\xa5\x0c\xe1\x9ei)X,C\xba\x00,*\xecM\x1b\xc2\x05\x96L\x0f\x1f\x00M\x07\xbd\xbaC\xb8gZ\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b@Sao\x94\xdd\xf5\xd6\xd5\x0a{j\xf0\xc1\xe2\xe9\xe1\x03`\xde\xa0\x17\x86p\x8f\x8am_k\x80\xc0\x07@\xff\xc2^\x18\xc2\x1d\x17\xdb\xf6\x8c7\xa1\x06\x1f,\x81!]\x00f\x0d{\xa3\xecn\x08w[k@\xbb\xe9\xe1\x03\xa0n\xd0kr\x08\xf7L\x8b\x82\xc0\x07@\xbb\xc2\xde\xbcC\xb8\xc0\x0a\x18\xd2\x05 5\xec\x8d\xb2\xe6\x87p\xcf\xb4,,\x9e\x1e>\x00\xaa\x82\x9eY\xb8 \xf0\x01\xd0\xe3\xb0\xb7\x95\xdd\x15R^\xc8\x10\xae\x1a|\xb0\x1c\x86t\x01\x98\x16\xf6\xf6\x8a\x8b\x8b\xcc\xf9z \xf0\x01\xd0\xcb\xb0\x17\x86p\xdf+\xb6\x8d\x05\xde\x8d\x1a|\xb0$\x86t\x01x\x18\xf4\xb6\xb2\x05\x0e\xe1\x02\xab\xa1\x87\x0f\x80\xfb\xb0\xb7\xec!\xdc3\xad\x0e\x02\x1f\x00\xcb\x0b{\xcb\x18\xc2\x05V\xc4\x90.\xc0z\x07\xbd\xadluC\xb8g\x8e\x00,\x87\x1e>\x80\xf5\x0d{f\xe1\x82\xc0\x07@\x8f\xc3\xde\xca\x87p\xd5\xe0\x83\xe51\xa4\x0b\xb0^A/\xac\x9a\x11\x82\x96^=X#z\xf8\x00\xd6'\xec\x0d\x8b\x8b\xab\x96\x84=5\xf8@\xe0\x03\xa0\xe1\xb0\x97\x17\x17\xefgf\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\xdd\x96=\xb43G\x07\x04>\x00\x9a\xb1\x15\xc3U\x98\x8d\xbb\x13\x7f\xb6\xabY@\xe0\x03\xa0'noo/b\xd8\xfb\x94xN_\xf0\xfa\xe52\x02\xe1\x99\xa3\x03\x02\x1f\x00\x8b\x0f\x83ge\xe1\xeb\xb5@8\xcc\xf4\x0c\x82\xc0\x07@\x7f\x03a\x11\xfe\xae\x16t\xdb\xc0\x12\x98\xa5\x0b@\xa98\xc3wSK\x80\xc0\x07@?\xc3^\x98\xe5{\xd0\xf0\xcd\xaa\xc1\x07\x02\x1f\x00-\x12\x96`+\xab\xddwXl7\x9a\x09\x04>\x00:(N\xda\xd8/\xd9\xe5\xf2\xf6\xf66/.G5o\xfaL\xeb\x82\xc0\x07@;\xe4\x15\xd7\x7f4\xd4[\x84\xbeP\xd8\xf9\xb9\xe6\x02\x81\x0f\x80\x0e\x19\x0c\x06\xa3\xac\xbc\x0c\xcb\xe9\xc3\x99\xb6\xc5\xbfC\xf8\xbbL\xbc\xf93-\x0c\xcb\xa5,\x0b\xc0]\xc0\xd9\xca\xeeV\xa5\x18\x16[\x98\xa8P\xba*E\x11p\x06=n\x8b\xf0\xfc\xf3\x8a\xdd&M\xe4\xd8\xcb\xee\x8a<[\xaf\x17\x04>\x80V\x84\x9a\x9d\xec\xe3\x82\xc2\xc36\x86\x94x\x0e\xdd\xfb\xf1\xbf\xe71L]\x15\xdbY\x5cAcQB\x98++\xc3rX\xdc\xff\xd5\x84\x10|\x15{\x06\xdf+\xbbq5\xf8@\xe0\x03Xt\xc8\x0b\x81d/\xebF]\xb9\xad\x07\xff\xde\xcd\x1e\xf46\x16\xcf\xe5>\x04\x9e4\x19\x00cOgY\x19\x96\xeb\xecn\xe6\xee\xb40wR\xdcF8\x9f\xef\xa9W\x1c\x08|\x00\xcb\x0ay\x8fb\xc0\x0b!f\xbbc\x0f\x7f\xab\xe2\xfa\xef\x87\xc0\xe2y\x86 v\x16\x02`\x9cD1\xab\xaa2,yq\xfb\xaf\xcan \x9c\xcf\x17\xc3\xf5\xa4\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11W\xc5\xf6\xa2\x83a/\x18\xd6\xd87\xf4X\x86\x12*\xef\x15\xcf\xfbU\xb1\x8d\x8bm\xaff\x9b\x85\xfb{R\xb2\xcby\x11\xe6\xc6\x897\x17\xee{R}\xbeW^\x9d \xf0\x014\x19\xf4\x9ee\xdd\x9e@\xb05\xe3\xefm<\x08\x7f\xe1\xbc\xba<\x0e\xd5V9\xaa\xb8>O}\x00\xb1\x17pR\xe0\xbc\xf0*\x85\xe53\xa4\x0b\xf4\xd1\xc1\x1cA\xef&\xfbxrD\xd8^\xad0\xa4l6t\x1b!\xf8>+B\xdfi\x08u\x93&M\xc4\xc9\x16e\xbd\xa0\xc7u'[\x84\xfd\x8b\xdb=\x8c\xf7\x7fO\x0f\x1f\x08|\x00\xf3\x09=KE\xc8\x08\xe7\xb0\xed'\xfeJ8\xa7\xec,n\x17U\xe7\xa7-\xd9\xdb\xd9\xc7%b\x86\xf1r\x9e\x1e\xcb0\x5c\xfb$\x9e\xef\x97\xdf\x0f\xcf\xc6\xf3\x1c\x8f*B\xf0\xc1\x8c\xc7#\x8fC\xc5\xf7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x11G%\x81/\x84\x9d\xfb\x99\xad'm~\x12\x0fz\xd4\xbe\xff8\xe3\xd0\xec\xf0\xc16K/`\xf8\x9d\x17q\xe8;O\x08\x92Gs\x06\xe10\xb4{\x15\xefC\x0f\x1f\x08|\x00\x8d\x04\xa5\x8b\x22\xcc\x84U\x1f\x1e\x0eQ\x1e\x17\xdb\xb8\xeb5\xe0b\xfd\xbbq\xdc\x1e\xd6\x13\x1ce\xf5'\xa6\x84\xe0\xf7\x0f\x8b\xed\xf3%\xfb\x5c\xc7\xf5r\xe7y\xcc\xaf\xe2\x04\x92\xf7\x17\x5c?\x10\x98\xc2\xa4\x0d\xa0\xafB/_\x18\x8a\x0c\xe7\x90}\xb6\x08\x1a\xa3>\x16\xfc\x0d\x01\xaa\xd8B\x0f\x5c\x08~\x8f\x8b\xed\xdd,}\x89\xb3\xa0\xaa\xc7\xed\xa0\xa1\xc7y\x16\x1f\x1b \xf0\x014\x16\x84Bo\xde\xa3\xd0;\xd5\xb2\xf3\xf2\x16\xf9\x9c\xaf^\x0b\x7f\xa1\x00\xf2u\xc9\xaf\x84`\xf8\x97J\xae?or\xd8;<6\xafL\x10\xf8\x00h6\xfc\x1d\x14\xdbV\xf1\xdf/\x17\xdb\xe9\x84\xdd>Wq3#-\x09\x02\x1f\x00\xdd\x08\x7fa\xf5\x8dp\x0e\xdd}\xaf_\x18\xea\x0e\xb3\x93\xcb\xce\xdd{>i\xbd\x5c@\xe0\x03\xa0\xdd\xc1\xef\xa3^\xbf\xec\xae\xa0\xf3N\xc9\xae!\x10\xe6Z\x0c\x04>\x00\xbak\xee\xf5r\x01\x81\x0f\x80\x96\x8ae\x5c\xca\x8aR_\x9a\x5c\x01\x02\x1f\x00\xddV\x15\xe6\x0e4\x11\x08|\x00tT\x5c/w\xb7d\x97\xd3>\xd6*\x04\x04>\x80u\x09{a\xbd\xdc\xbcb7\xbd{ \xf0\x01\xd0a!\xcc\x95\xad\xbb{\xa8\x0c\x0b\x08|\x00t\xd4`0\xd8\xca\xca{\xef\xc2j\x1c&j\x80\xc0\x07\xb0\x90 rTl\x17q\xb8\x91\xc5Q\x86\x05\x04>\x80\x95\x84\xbdaq\xf1\xb4\xd8\xb6\x8b\xed*\x96\x0ba1\xed\xfc\xa4d\x97\xb0^\xeex\x01\xf7;\x16\xe6A\xe0\x03x8\x84\x18z\x9f>\x8c\xb3HY\x5c;O\x92/\x22\xecew\xb5\xfeB\x98?\x13\xe6A\xe0\x03\xd6P\x11\x00\x0eb\x18x\xdd\x8b\x18\x16h\xa6\x9dGS\xda\xf9\xdeq\xd3eX\x1e\x84\xbd{B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad\xd4X;\x97\xf5\xee5\xbe^\xee\x84\xb0woC\xe8\x03\x81\x0fX/\x07Y\xf9\x04\x82\x91&ZJ;\x1f-\xa0\x0cKY\xa0\x13\xfa@\xe0\x03\xd6A\xecu*+\x0frl\xa5\x87F\xday\xab\xb8xV\xb2\xcbu\xd1\xce\xf9\x02\xeezXl\x97B\x1f\x08|\xc0z+\xebu\xba\xc9\xac\xf4\xd0\x94q\xc2qh\x5c,\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xc9uGj\xc1\xcd/\x96a)[/7\x94a9Y\xd4\xfd\xd7\x0c}J\xb6\x80\xc0\x07\xf4,\x88\x84\xb07mi\xaf\xd0\xbbg\xa5\x87f\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6J\xae\xd3\xbb\xd7L\xa8\xaeZ/\xf7y\xd1\xce\x17\xcbx,\xf1x\x8eb\x98\x9f&\x94l9q\xe4@\xe0\x03\xfa\x11D\xb6\xb2\xe9\xab=\xe8\xddk\xa6\x8d\xab\xca\xdd4^\x86%!\xf4\x85p9\xac\x08}\xbbj/\x82\xc0\x07\xf4CY\xef\xde\x89\xde\xbdF\xb4r\xbd\xdc\x18\xfa\xaa\x86\x91\xf7c\xef$ \xf0\x01=\x0d|z\xf7\xe6\x14g\xbc\xee\x97\xec\x12\xca\xb0\xac\xac\x9d\xe3Z\xbd\x87\x15\xbb}\xbdx\x1e{\x8e&\x08|@7\xc3H\x18j\x9c6k\xf4rY\xe7\x94\xf5\x5cU\x98\x1b\xad\xfa\x01\xc6\xba\x7f\xa7\x15\xbb\x8d\xe3\xf0? \xf0\x01\x1d3,\xfb\x80\xd7<s\x07\xea\xbd\xac\xbc\x0c\xcbi\x8b\x8aY\x87\xe0Y5s\xf7\xc4\xcc]\x10\xf8\x80\xee)=\x7fO\xf3\xcc\x15\xf6\xaa\xd6\xcb\x0dZsn\x5c\x8d\x99\xbb\x86\xf9A\xe0\x03:f8\xe5\xe7\x97\x0bX\xcbu\xddT\x95a9l[\x1b\xc7!\xfc\xbcb\xb7\xfdX\xb7\x11\x10\xf8\x80\xb6\x8b=P\xd3\x02\xc9\x99\x16\x9a\xabm\xb7\xb2\xf2\xde\xbb\xd6\x96\xbb\x89\x13H\xaa\xce\xe7;\xb2\xfc\x1a\x08|@7\x94}`\x0b|\xf3\xc9\xb3\xf22,\x07-/w3\xca\xca\x87v\xc3s\x1b;\xcc \xf0\x01\xed7\x14\xf8\x9a\x17\xd7\xcb-+\xc3r\x1eK\xa1\xb4\xd6\x83\xf3\xf9\xca8\xc7\x13\x04>\xa0\x03\xa6\xf5\xf0]*\xb6<\x97\xaa\xa1\xda\xbc\x0bO\xa2x\x0d\x84@7ih7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcbM+\xafq\xa5if\x13'3l\x97\xecr\xdc\xa22,)\xc2y\x88\x0f\x87v\xc3D\x93\x1d\xf5\x19A\xe0\x03\xbacZ}8\x1f\xe6\xb3\x85\xbd\xaa2,K_/w^q\x16qxNz\xf5@\xe0\x03:\x1aN2\x81\xafQ\xa17\xacl\xa2\xc6Q\x17K\xdd\x84\x90\xa7W\x0f\x16\xeb\x0dM\x00,\xd0\xdb\xc5\xb6\x15\xb7a\xbc\x0ceZ\x9c\xbfW?@\x87\xb6{V\xb2\xcb\xb5\xde1@\xe0\x03\x96*N\xca8\xd3\x12\x8d\x19W\x5c\x7f\xa0\x89\x80i\x0c\xe9\x02\xb4\x5c,\xc3R\xb6^\xeey\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0U\x83\xc1\xa0j\xbd\xdcc\x93\x1d\x00\x81\x0f\xa0\xbba/\xcct\xceKv\x09eX\xf4\xee\x01\x02\x1f@\x87\x85\xb0WV\x86%\xb7b\x09 \xf0\x01t\xd4`0\x08\xcb\xd2=-\xd9%\x94a9\xd2R\x80\xc0\x07\xd0]Uan\xa4\x89\x00\x81\x0f\xa0\xa3\x06\x83\xc1^V]\x86\xe5LK\x01\x02\x1f@w\xe9\xdd\x03\x04>\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xb0\x8b\xeb\xe5\x02\x02\x1f\x00\xd9\xf7\xd7\xcb-+\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00\x1d\x96g\xe5eX\x0e\x94a\x01\x04>\x80\x8e\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x04>\x80\xee\xca+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eX\x8e\x95a\x01\x04>\x80\xee\x86\xbd\xb0^n\xd9D\x8c0Q#\xd7R\x80\xc0\x07\xd0]a\xa8\xb6l\xa2\xc6\x912,\x80\xc0\x07\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\xaa\xdap'\xf6\x92\x02%\xde\xd0\x04\x00+S\x15\xe6\x94a\xf9d\xb8\x1b\x16\x17;q\x0ba\xf9\xfe\xbc\xc7\xb7\x8b\xedL\x0b\x81\xc0\x07\xd0\xc6\xf0\xf2\xa4d\x97\xb0^\xee\x89\x96\xfa\x84\xf7\xa7\xfc|G\xe0\x83r\x86t\x01Vc\x5cq\xbd2,\x13B\xf0\x94\x9f\x1b\xd2\x05\x81\x0f\xa0]\x06\x83A\x08se\xeb\xe5\x862,\x17Z\xeaS\xae\xa6\xfc|\xa8i@\xe0\x03hS\xd8\x0b\xbdQy\xc9.\xa1\x0c\x8b\xde\xbdz\x81\x0f\xa8\xe0\x1c>`\x19!g\x98\xdd\x9dd\x1f\xb6\xfb\x7f\x87\x1e\xae/\xaf\xe1yj!\xec\x95\x95a\xc9M\xd4\xa8\x1d\xf8v5\x0d\x08|\xc0\xea\x9dL\x099\xc3x\xdd\xba\x04\xdf0\xb9\xe0i\xc9.\xd7E\xd8S\x86\xa5~\xe0\x03*\x18\xd2\x05\x96\x15\xf8&\xd9[\xb3v\xa8\x0as#/\x15@\xe0\x03\xfa\x16\xf86c\xafW\xef\x15\xcf3\x84\xdb\xb2\xa1\xc7s\xeb\xe5V\xba*i\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xcd\x94\xabGk\xd2\x0cz\xf7\xe6\x7f\x1d]i\x05\x10\xf8\x80v\x1b\xafk\xd0\x19\x0c\x06yV^\x86\xe5\xb90\x03\x08|@\x1fL\xeb\xe1\xda(\x02QoC_,\xc3RVf%\xf4|\xe6^\x1e\x80\xc0\x07t^\xec\xc1\x9a\xb6RB\x9f\x03O\x08\xbaeeX\xac\x97\x0b\x08|@\xef\xc2\xcf$\x9b}\xec\xe5\x8b\x13\x09\xf6Kv\xb9,\xc2\xde\xd8\xcb\xa2\xd6\x17\x87\xc1\x94\xedL\xeb\x80\xc0\x07\xb4\xe3\xc3:L\xde\xb8\x9eru\xde\xc3\xa7\x5c\xf5\x9c\xac\xa8\x01\x08|@/M\x0bA\xbd\xea\xe5\x8b\xcf\xa5\xac\x0c\xcb\xa9^)@\xe0\x03z)\x0eaN\xed\xe5\x8b\x93\x1c\xba\x1e\xf6\xac\x97\x0b\x08|\xc0\xda\x9b\x16\x866{\x12\x84\x0e\xb2\xf22,G\xca\xb0\x00\x02\x1f\xd0k\x15\xbd|\xcf\x06\x83\xc1VW\x9f[|\xec\xcfJv\x09\xcf\xdbz\xb9\x80\xc0\x07\xac\x85\xb2\x9e\xbcq\x87\x9fWU\x98\xcb\x95a\x01\x04>`-\xc4\x19\xbb\xd3\xea\xf2\xed\x0e\x06\x83\xce\x0d\xed\xc62,OJv9W\x86\x05\x10\xf8\x80u3*\xb9.\xef\xe0\xd0nU\xef\x9e\x89\x1a\x80\xc0\x07\xac\x978q\xe1p\xca\xd5au\x8aqW\x9eK\xec\x91\xdc.\xd9\xe5\xb8x\xbe\x17\x8e: \xf0\x01\xeb\x18\xfa\xf2\xe2\xe2r\xca\xd5\x9d\x18\xdaU\x86\x05\x10\xf8\x00\xaa\xed\xc5P4\xc9\xd7\x8b@\xb5\xd3\xf2\xc7\x1f\xc2^\xd9z\xb9G&j\x00\x02\x1f\xb0\xd6\xe2\xd0\xee\xa8d\x97q[\x1f{<\xcf\xf0i\xc9.\xd7\xb1\x17\x13@\xe0\x03\xd6>\xf4\x85Y\xbb\xcf\xa7\x5c\xdd\xe6s\xdf\xaa\xc2\xe8\xc8\xd1\x05\x04>\x80\x8fC_8\xcf\xed\xf8\xc1\x8f\xc20\xef;\xc5\xcf[\x19\x9a\x06\x83A\x18\x8a.[/\xf7\xdcz\xb9@\x1b\xbc\xa1\x09\x80\x96\x85\xbeQ\x9c\x04\xb1Ul\xa3\x96\xcfl\xad*\xc32rD\x01\x81\x0f`r\xe8\xdbk\xfbc,Bi\x9e\x95\xaf\x97\xfb\xdcz\xb9@[\x18\xd2\x05\xa8\x1f\xf6B\x0fdY\x99\x950\x14\x9dk)@\xe0\x03\xe8\xae0\x94[V\x86\xe5@\x19\x16@\xe0\x03\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05\x04>\x80n\xcb+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eXN\x95a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\x95\x95a9T\x86\x05\x10\xf8\x00:*\xae\x97[\xd6{w\x9dU\x17a\x06\x10\xf8\x00Z\xac\xaa\x0cK\xae\x0c\x0b \xf0\x01tT,\xc3\xf2\xa4d\x97seX\x00\x81\x0f\xa0\xdb\xaa\x86jsM\x04\x08|\x00\x1d\x15\xcb\xb0l\x97\xecr\xac\x0c\x0b \xf0\x01\xb4/\xc4\x8d\x8b\xed\xa2\xd8v*\xf6\x0beX\xcaz\xf7\xc2z\xb9\xca\xb0\x00\x02\x1f@\xcb\xc2\xde(\xbb[\x16-\xf4\xda\x9d\x15\xff/\x0blyV>Q\xe3\xc8D\x0d@\xe0\x03hW\xd8\x0b=z/\x1e\xfc(\x84\xb9\xaf\x17??\x8beW\x1e\xee\x1b\xfe\xff\xb4\xe4\xe6\xae\x8b\xb0\x97kU\xa0+\xde\xd0\x04\xc0\x1a\x84\xbd0<{6\xe5\xea\xb0T\xda\xcbb\x9f\xc3\xec\xe3^\xbbq\xc5M\x1e\xcc\xf88\xf6\xe2m_\x15\xdb\xeb\xbd\x83\x17)?s\xce \xf0\x01L\x16B\xd2F\xc5>\xcfB\x90+B\xd9?\xc9\xca\xd7\xcb\x0deXNfy\x10\xe1\xf7b\x99\x97I\x8fg714N|L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb2:\x08\x08|\x00}\x11\x02\xdaVB\xe8\x0b\xd7\xff\x83\x8a}F\xf3<\x90\x22`\xddO\x18\x09\x8fi\xbb\xa1\xe77),>\x991@\x86UC^\x0f\x81I\xbd\x8f\xe1g\xcek\x04\x81\x0f`%\xc2\xf9vavnv7\xeb\xf6\xc9\x1c7\xf5\xbc\x89\x1e\xb1p\x1b\xb1\xa7\xef$K\xec\xd9[\xa2\xcd\xec\xd3k\x06'?\xc6\x09\x01\xf2|\xc2ng1\x1c\x9exu\x82\xc0\x07\xd0d\xe8\x0bAm/\x06\xad\xf1\x84PS\xe5\xff\x15\xdb\x7fn\xf0\xf1\x84\x9e\xb0a\x0c\xa2\xfb=n\xfaIa\xb1\xaa\xe4\x0d\xd00\xb3t\x81u\x0b~g\xc5\xb6U\xfc3L\xd2\xb8\xa9\xf1\xab?Xl\xff8\xd6\xf0{\xd4\xe0\xe3\x19\x15\x17\xef\xae\xd1!\x08=~CC\xbf \xf0\x01,#\xf8\xe5\xd9\xddy}\xc75\x7f\xf5\xbe\x86\xdfN\x83\x8f%\xf4v\xbdS3\x80vQX\x99D\xd8\x03\x81\x0f`\xa9\xa1\xefU\xeca\xfb\x0f3\x84\xbe\x0f\x8b\xd0\x977\xd5\xdbW<\x8eqq1\xecq\xe8{\x1e\xdb\x1a\x10\xf8\x00\x96+\xae\xbe\xf1Wf\xfc\xf5P\xca%L\xc0\xc8\x1b\x0a}a\xe6k\xe89\xbc\xecY3\xbfS<7\xcb\xd0\x81\xc0\x07\xb0\x92\xb0\x17z\xe7\xe6\x0dk\xa1\x94Kca&N.\x19f\x93g\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A\xadl\xb6\xeeivW\x97\xaeJ\xa3\xe5E\xe2Ps\x08}\xc7\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdc\xb2\x9e\xb9\x10\xf4FqF\xef;\x15\xc1o!%F:<\x83\xf72\x86\xbd\x0b\xaf4\x10\xf8\x00V)\x84\xb4\xb2\x957\xf2\xfb\xd9\xa4aH2\x06\xbf\xb7\xb3O\xf7\xba].2\xd8tp\x06\xaf\xb0\x07\x02\x1f\xc0\xea\xc5\xe2\xcbe+n\x9cO:\xef,\xd6\xf0\x1b\x15\xff|\x9c}\x5c\xc7o\xe1\x05\x84;4\x83\xf74Sc\x0f\x04>\x80\x96\xa8\x0aiyE\x00\xbb\x0au\xfc\x8a\xed\xd1\xb2&$t`\x06o\xa8\xb1\xb7'\xec\x81\xc0\x07\xb0r\xb1\x0c\xcbvEp9k\xe3co\xf1\x0c\xdew\xd5\xd8\x83v\xb3\x96.\xb0Na\xafj\x0d\xd70dz\xd0\xc2\xc7\x1d\x1e\xf3}\xcfY\xe8\xe9\xcb\x8b\xed\xab\xc5\xf6\xb7[\xf0\xf0\xdeQv\x05\x04>\x806\x09a\xael\xa2\xc6QK\x87$\xc3P\xeen\xcb\x1eS\x08\xc7{m\xed\x0d\x05\x04>`\x0d\xc52,\xcfJv\xb9\x8e\xeb\xeb\xb6\xd1N\xcb\x1e\xcf}\x8d=3q\xa1#\x9c\xc3\x07\xac\x8bq\xc5\xf5m^\xfak\xa3E\x8f%L\x1a\xd9\x11\xf6\xa0[\xf4\xf0\x01\xbd\x17\xcb\xb0\x94\x0d\x89\x9e\xb7uE\x88\xf8\xd8\xdb\x14\xf6\x94]\x81\x0e\xd2\xc3\x07\xac\x83q\xc5\xf5m\xee\xdd{\xd4\x92\xc7q,\xecAw\xe9\xe1\x03zm0\x18T\xad\x97\xfb\xbc\xe5\xc3\x93!`\x9d\xc6\xe0\x17\xb6\xedU\x84=eW@\xe0\x03hk\xd8\x0b\x01)/\xd9\xe5\xa6\xe2\xfa\x95\x8b\xb3`\xcf*\x9e\xe3\xf7\x16\xf8\x10\x94]\x81\x1e0\xa4\x0b\xf4Y\xf2z\xb9\x1d\xb6\xc8\x19\xbc\xc2\x1e\xf4\x84\x1e>\xa0\x97\x06\x83A\x08B\xfb%\xbb\x842,G=x\xaa\x8b\x08|\xca\xae@\xcf\xe8\xe1\x03\xfa\xaa*\xcc\x8dz\xf2<\x9b\x0e|\xc2\x1e\x08|\x00\xed7\x18\x0c\xf6\xb2\xf22,\xa7=Z!\xa2\xc9\xc0\x17\xca\xael\x09{ \xf0\x01\xb4=\xecU\xad\x97\x1b\x1c\xf4\xe8)75kW\x8d=\x10\xf8\x00:\xa3\xaa\x0c\xcba\x11j\xaez\x12n\x87\x0d\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x85\x00\xb4\x95\x95\xf7\xde\x85\xf3\xd3\x8ez\xf4\x94\x9b\x18\xce}\xae\xc6\x1e\xf4\x9fY\xba@\x9f\xe4Yy\x19\x96\x83\x9e\xf5bm\xcd\xf9\xfb\xca\xae\xc0\x9a\xd0\xc3\x07\xf4B\x1c\xde,+\xc3r\xde\xc3p3k\x0f\xdf\x8d\xb0\x07\xebE\x0f\x1f\xd0\x17UC\xb5y\x0f\x9f\xf3\xee\x8caO\xd9\x15X3z\xf8\x80\xce\x1b\x0c\x06\xa3\xac|\xb6\xeaq\x8f\xca\xb0\xdc?\xe7Yz\xf7\xae\x85=XOz\xf8\x80\xae\x07\x9f\xaa2,\xad_/wFu\x03\x9f\xb2+\xb0\xc6\xf4\xf0\x01]\x17f\xe5\x96M\xd48\xeaK\x19\x96\xd7l\xd5\xd8\xf7T\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Y\xc9.a\xbd\xdc\xbc\xa7O\x7f\x98\xb8\xdf\xb1\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\x1f\xf4\xf8\xb9\xa7\x0c\xe9\x1e\x0a{@\xa0\x87\x0f\xe8\xa4X\x86\xa5l\x96j(\xc3r\xd2\xd3\xe7\xbe\x95\x95\x0fc\x07\xca\xae\x00\x02\x1f\xd0yUaf]{\xf7\xc2$\x95Q_\xc3. \xf0\x01kb0\x18T\xad\x97\xfb\xbc\xe7\xa5GvJ\xc2\x9e\xb2+\xc0\xa78\x87\x0f\xe8Z\xd8\x0beX\xf2\x92]\xfaZ\x86\xe5\xa1\xe1\x84\x9f]\x0a{\xc04z\xf8\x80\xae\x09a\xae\xec\xfc\xb5|\x0d\xca\x8flM\x09{\xca\xae\x00\x13\xe9\xe1\x03:#\xae.\xf1\xb4d\x97P\x86\xe5\xa8\xe7m\x10z8\x1f\x0eg\x1f\x0b{@\x15=|@\x97T\x85\xb9\xd1\x1a\xb4\xc1\xc3\xf3\xf7\xd4\xd8\x03\x92\xe8\xe1\x03:a0\x18\xece\xd5eX\xce\xd6\xa0)\x86\xf1\xf2]a\x0fH\xa5\x87\x0f\xe8\x0a\xbd{wB\x0f\x9f\x1a{\x80\xc0\x07\xf4\xcb`0\xc8\xb3\xf22,\x87=]/w\x92\x835z\xae@C\x0c\xe9\x02m\x0f{[Yy\x11\xe5P\x86\xe5h]\xdaC\xd8\x03\x04>\xa0\x8f\xf2\xac\xbc\x0c\xcb\x81\x19\xaa\x00\x02\x1f\xd0Qq\xbd\xdc\xfd\x92].\x9d\xcb\x06 \xf0\x01\xdd\x96W\x5c\x7f\xa0\x89\x00\x04>\xa0\xa3\x06\x83\xc1(+/\xc3r\xbc&eX\x00\x04>\xa0\x97a/\xac&Q6\x11c\x1d\xd6\xcb\x05\x10\xf8\x80^\x0bC\xb5e\x135\x8e\xccV\x05\x10\xf8\x80\x8e\x8aeX\x9e\x95\xecr\x9d\xadQ\x19\x16\x00\x81\x0f\xe8\xa3\xaa0\xa7\x0c\x0b\x80\xc0\x07tU,\xc3\xf2\xa4d\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xee\x1aW\x5c\xaf\x0c\x0b\x80\xc0\x07t\xd5`0\x08a\xael\xbd\xdcP\x86\xe5BK\x01\x08|@7\xc3^(\xc3\x92\x97\xec\x12\xca\xb0\xe8\xdd\x03\x10\xf8\x80\x0e\x0ba\xaf\xac\x0cKn\xa2\x06\x80\xc0\x07t\xd4`0\xd8).\x9e\x96\xecr]\x84=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\x0d\x06\x83\xbd\xac|\xbd\xdcs\xeb\xe5\x02\x08|@\xb7\xe9\xdd\x03\x10\xf8\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xd0z\xb9\x00\x02\x1f\xd0\xdd\xb0\xb7\x95\x95\x97Y\x09eXL\xd4\x00\x10\xf8\x80\x0e\xcb\xb3\xf22,\xd6\xcb\x05\x10\xf8\x80\xae\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x10\xf8\x80\xee\xca+\xae\xb7\xa2\x06\x80\xc0\x07t\xd5`0\x18e\xe5eXN\x95a\x01\x10\xf8\x80\xee\x86=\xeb\xe5\x02\x08|@\xcf\x850WV\x86\xe5H\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Y\xc9.\xd7\x992,\x00\x02\x1f\xd0iUa.W\x86\x05@\xe0\x03:*\x96ayR\xb2\xcb\xb92,\x00\x02\x1f\xd0mU\xbd{\xbd\x9e\xa8Q\x04\xde\xbdb\xbbM\xdc\x8eV\xf88\x0fj<\xce\x91\x975\x1d\xfd{\x1c\x15\xdb\xc5k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x985D\x14\x17\xdb%\xbb\x1c\xdf\xde\xde^\xf4\xb9\x0d\x8a\xe7wR\x5c\x9c'\xee\xfe4\x9e\xef\xb8\xec\xe3T5\x83\xfa!=\xb2t\xf5\xfd(\xbcn_LxO\x0a#\x10\xef\xc7\xf5\xbd\x05>\x80\x86C\xc4:\x95a\x19\xd5\xd8w\x15a*\x1c\xa7\x8d\x05<\x17h\xcb\xfbQx\x8d\xefW\xec\xf6\xac\xcf=}\x02\x1f\xb0\xaa\x10q\xb4.\x135b\xb9\x99\xc3\xc4\xddw\x97\xf9\xa1S\xdc\xd7Nq\xf14q\xf7C\xa5s\xe8\xa8\xd4/\x97\xb9\xc0\x07\x90\x1e\x22\xb6*B\xc4u\x11\x1c\xf25k\x96p~\xdeu\x8d}\x97\xf9\xb8R\x5c\xae\xe11\xa3\x1f\xefG\xc3,\xbd\x07{W\xe0\x03H7\xae\xb8~\xb4n\x0d\x12{3S{\x19\xb6\x9711\x22L(\xa9\xf1\x01g\x15\x14\x10\xf8\x00\x92C\xc4\xf9\xba\xae\x97[s\x02\xc7Q<\x0frQ\xc7)\xdcvj\xef\xdesk\x1c\x83\xc0\x07\xf0\x89\xa0Rq\xfdh\xcd\xdb'\xf5\xf9\x87!\xa8E\xf6\xaaU-uw/L\xae\xc9\xbd\xac\xe9\xb0\xab\x1a\xfb\xde\x08|\x00\x15\xe2L\xb8\xb2\x10\xf1|\xddO\xfa\xaf9\x81\xe3\xd9\x22\xca\xb4\xc4\xdbL\x0d\x93#\xab\xa0\xd0\x83\xbf\xb9\xe4\x9eu\x81\x0f\xa0<D<\xaa\x08\x11z\x8a>\xf9\xa1\x92:\x81#_\xd0\xfd\xa7\x9c\xc4~\x1e\x87\xa1\xa1\xebFYu\xef\xdd\xa5\xc0\x070\x7f\x888\xd0St'\xb6Cj\x90\xdbo\xb2LK\xc2Rw\x0f\x03\xfa\xc8\xd1\xa2'\x7fsW\xc5Ex\xedO\xeb\xe9;\x0e\xd7\xf7\xf9=\xea\x0d/\x03\xa0\xa1\x10QV\xd4\xf4\xd2\xea\x0c\x9f\xfa\x00\x1a\xc7\x99\xb8)\xb3d\xf3\xf8a\xd5\x84\xd4\xe3p\xa4\xe6\x1e=\xfb\x9b\x0b\xab\xfa\x0cc\xed\xc9\xb0m\x15\xdbY\xb1]\xac\xc3\x97Q\x81\x0fhB^q\xbd\x92\x1e\xd3\xdb\xe5\xc3\x84\xfdB1\xe6\xd1\xbc\xa19.u\x972QC\xcd=\xfa\x1e\xfc.\xd6\xedy\x1b\xd2\x05\xe6\x92\xd0Ku\xaa\xa4G\xe9\x07\xcf\xf3\xd4P=O\x99\x96\x9a\xeb\xe5\x0a\xe8 \xf0\x01$\x87\x88uZ/wVy\x96V\x0abs\xce\xb6L\x9d\xa8\xa1\xe6\x1e\x08|\x00\x9fP5D\xe8<\xb0\x0a5W\xe08\x98\xa5\x97/\x9e\xb3\xb4\x9f\xb0\xab\x99\xd4 \xf0\x01|\x22Dl\x15\x17\xcfJv\x09eG\x8e\xb4TR\xe8\x1bgiu\xc26fl\xd3\xd4\xdfQs\x0f\x04>\x80Z!\x22\x17\x1ejI\xed\xe5\xdb\x8f=v\xa9\xc1|\x94\xa5\xcd\x04Vs\x0f\x04>\x80O\x84\x88aV^\xcb\xed\x5c\x19\x96zjN\xe08J<N\xa9\x135\xd4\xdc\x83\xd5\xbd\x9fn\x85\xf7\xd4&\xebmN\xa2,\x0b0\x8b\xca\xde=M4\x93<\x06\xaf\xaa\xc9\x15\xa1L\xcb^B\x8f\x5cj\x19\x16\xe7Z\xae\xee\xc3>\x84\xf2\xf0A\x7f_\x1b.\xfc\x7f\xb7\x22\x9c_<\xd8\xce\x1c\xbbN\x1d\xeb\xbdx\xbc\xb7&\x1d\xe7b\x9fi\xc7\xf9d\xde\x11\x13\x81\x0f\xa8\xfb\xa6\x15\x02\xc9v\xc9.\xc7fy\xce&\xbc\xa1\xc7Zy/\x12C\xf7I\xc5\x87K\xca0\xb1\x9a{\xcb\xff\x1b\xda\x8a\xc1~\xaf\xe2oi\x92\x8d\x18\x14v\x1f\xdc^8\xffs\xdcD\xafz\xecez?q\xf7//\xea4\x80\xf8\xfa\xbd\xca\xd2f\x96W>\x8e\xd4\xe7U\xdc\xce`\x01\xcfe\x18\x8f\xf7\xfe\x1c\xc7\xf9Eq;\xf7K\xbf\xcd\x14\xfe\x0c\xe9\x02u\xdf\x84\x8f*z\x1f\x94a\x99/\xf4\x85\x0f\xed\x94\x09\x1c\x9b1\x1c\x96\x05\xc2\x94\x0fK\xc7k\x89_\x96\x8a-\xf4\xd6\xbc\xcc\xee&<m7t\xd3\xbb1\x10\x5c\xcd;,\x18\xbf\xac\xa5\x9eZ0\x9e\xa76d\xd5mg\xe9e\x84NZz\xbc\xc3P\xedY\x0c\x9a\xfb\x0d\xdc\xe4v\xfc2\x18\x8es\xed\xba\x9c\x02\x1fPG^\xf1&|d\xa2Fc\xed\x9c\xb4\xdf\xa47\xfd\xd8\x83\x94\xf2\x01\xa3\xe6\xde\xf2\x82\xdeU\xfc\xb0\xde^\xe0]\x85\xe1\xfb\xf7\x8b\xfb\x9akv|\xf1\x9a\x08_\x02.\x13v\x0d\xef\x05'\x0bh\xaf\xd0\xf3\x99\xb2\xde\xf3e\xd6\xd2\xd3G\xe2H\xc8E\x966a\xaa\xae\x8d\xf8\x85a\xa7\xce/\x09|@\xf2\xb7\xd5\xe2\xe2i\xc9.\xd7\x86\x06\x9b\x11C\xd8q\xe2\x1b\xff\xc1\x8c\x81Q\xcd\xbd\xe5\xfc\xdd\x1c\xc5\xa0\xb7\xb9\xc4\xbb}\x1az\x96\xe6\xec}\x1b%\xee\xb7[\xd1\xd3\x5c\xb7\xbd\xc2c\x1e\xa7>\xc66~\xc1|p\xcc7\x16x77u\xbf\xac\x09|@\xaa\xaa7aC\x83\xcd:\xc8\xd2V\xe08\x88a\xfc\xfe\xc3f\x98\xa5\xf5\xee\xa9\xb9\xb7\x1c\xabZ\xb3u\xb7Fp\x9a\xf4\xa5#<\xeew\x13w\xcf\x1f\xbe\x06\x1bx\x9fI\x09J\xef\xc6\xc7\xd8\xb6\xb0\x97W|1nJ\xed\x9eU\x81\x0fHy\x13\x1bf\xe5C\x13j\xb85,\x86\xb1<a\xd7\x8d\xd7\xf6K\xf9\x9dS\xc7ki\xe6i\xe7\xcb9\xef\xfbI\x0c \xb3\xbe\x06COUjA\xf0\xf1\xbc\x0dUc(\xf7<>\xb6\xb6\xbdO\x86\xc7\xff\xac\xad\xaf+\x81\x0fH\xfd\xd6]f\xa4\x89\x16\x12\xfa\x8e\x12?\xf4\xf7\xefkye\xd5\xe7\x0c\x99X\xb3\xfc\xe0~\x5cq<N\xb3\xbb\xde\xb4\xb7\x8b\xed\xb3a\xa6h\xdcv\xee\xff]\xfc\xfcq\xb1}9\xde\xd6M\x8d\x87\xf0\xacN\xa1\xee)\x7f\xdb)\xf77\xd7\xd0n\x8d\xa1\xdcV\xd6\x8cL\x98\xd0\xf6\xa9\xd0z\x7f\xcc\x1f\x1c\xef\xfb\xe3\xfcv\xbcn\xda\xb1\xbe\x99\xe5\x0b\x9b\xb2,@\xd5\x1bY\xe8!(;\xff\xe8\xb9:`\x0b\x15>DS\xcad\x84\xe3\x94r\xceV\xeex-]\xf8p\xde\x7f-\xb4\x84\x9f\x1d\xa5\x0eK\xc6c\x16\xb6\x93\x07%wR{\x93B\x10\x19\xce\x18X\xaf\xe2\x04\x84\xf7R^[\xc5\xbe'3\xbe\xbeB\xd8K\x19\xca\x1d\xb5\xf4\xf5\x9bZ\xf32|\x81;\x98v\xfe\xdd\x83\xe3|\xf6\xe0=8\xf4\x1c\xee=x\x0d\x8dgy\x80z\xf8\x80\xaao\xad\x07\x15\xdf\xb6s-\xb585&p\x84\x0f\x83\xaa\xe1\xb0\xcb6\x0e\x85\xad\xc11<\x89\x7f+a;,\xb6\xad\xe2g\xa3Y\xcfA\x0b\xbd\x86q\x82\xd4\xdbYz\xef\xdb\xce\x9c\x8f\xff4a\xd7\x99\x86vk\x0c\xe5\x1e\xb7\xf8T\x84Qb\xd8\x1b\xd6\x9dl\x11\x9esx\xbddw\xbd\x7f\xa7\x02\x1f\xb0\x08U\xb5\xdc\xac\x97\xbb\xbc\xde\x83\x9b\x06ng\xa4)W\x1a\x08B\xd0k\xeco&\x06\x87Q\x8d\xd7\xd0\xbc\x8f\xbf\xf1\xa1\xdd\x1aC\xb9\xd7YKOE\x88\xa7RT\xf5\xee\xdd\xc4\xb0\xf7j\x8e\xe3}Ul{\xb3~Q\x10\xf8\x80iob\xa1G\xa0l\xb6\xa7\xde\xa2%\xa91\x81\xa3\xcc\xf36\xcej\x5c\xa3cx\xb2\x88/G\xb1\xc7+\xa5\x07x\xaf\x81\xd7`\xeam\xd4\x99\xb5;\xce\xd2\x86r\xf7Z\xfc\xe5r\x98\xf2<W\xfd\xf8\x05>`\x9a\xaa0\xe7\xc4\xff\xe5\x06\x86\xd4\x09\x1c\x93\x5cg\x86\xde\xfb,\xe5\xd8n,q\x15\x8e\xa4\xa1\xdd\xf8xR\x86r\x0f[\xfee%\xa5]W>\x14-\xf0\x01\x93\xde\x88GY\xf9l\xcfS+4\xac\xc4\xac!\xfb\xc0\xd0{\xaf\xbf\x0c\x5cei\xe7\xd8\x0d\x1b\xb8\xbb<\xf1\x8bG\xe9\xd0n\x8d\xa1\xdc\xf3\x9e\x14t_y`\x15\xf8\x80Io\xc4Uo\xb0z\xf7V\xf3\xc1\x1eB\xf6q\xcd_Sso=\xa4|\x01\xdbi\xe05\x18\xbe8\x8cR\xc3a\xc9j\x1f)\xb3Z[Y\x82eF;\xab~\x00\x02\x1fP\xf7\x8d\xf8PY\x8f\x95\xcak\xee\xefX\xad\x87\x94\x1e\xa4GM\xdcQ\x1c^=L\xd8u\xe2\xd0n<?8\xa5\xa4\xcc\xa8G\xef5[\x02\x1f\xd0\x1a\xf1D\xeb\xb2\xde\xbbp.\x98\x89\x1a\xab\x0f\xe4u<\x9d\xf7\xdc-:!e\xc8~\xb7\xa9;\x8b\xc3\xac)\xabp<\x99\xf0\xfaKy\x0f9\xeeY\xcft\xbe\xea\x07 \xf0\x01\xaf\xbf\x11+\xc3\xd2\xde@\x1ezF\x9e\xcex\x5c\xe9\xb1\x15Mj\x18ei\xa5Z\xc6\x0f^\xc3\xa3\x84\xe0\xd9\xda\x12,S\xa4\xb4\xfdf\xf1\xdc\xc7\x02\x1f\xd0\x860\x11\xbe\x85\x97\xcd\x98\x0b'O\x8f\xb5\xd4\xca\x03\xf9,\xb6\xe7Y\xf6\x0a\xa6\x84\xcc\xab\xc4`\x16\xc2N^c\xf9\xb1\xbd\x8e}\xb1<K\xdc/,\x818.9\xafq\xa1,\xad\x06\xa4\x86\x89\x5c\x13\xad4\x90\x8f\xb2\xf9\x86\xe4\xc2\x07\xee\xb8-\x1f\xa4\xc5c9[\xc2\xdd\x5c\x14\xcf\xf7\xa0\xa3\xc7;\x84\x82\x9d\xb8\x85\x7f\x0f\xe3U[Y\xda\x12^\xcb\x0a}\xe3\xc4\x952\x0e\xe2c\xaf\xaa\xb9w\xd8\xb5z\x91a\xe8\xb9h\x83\xeb\xc4\xe3\x12j\x9b\x0eC\x00^\xf6\x17h\x81\x0f\xb8\x0f\x13\xdb%\xbb\x1c+\xc3\xb2\xf2\x0f\xffy\x87e7\xe2m\x8cZ\xf2\xb4v\x1d\xd9O\x1d\xe3\xbd\x18\xec\x86m\x0au\x09\xc2k\xea\xaa\x22\xccmd\xe5\x85\xdc\x83.\x97`\x09\x8f\xfbE\xe2\xbe\xe1\xd8\xbe\x88\xeb\x94\x87\xedd\x19_\xc4\x0c\xe9\x82\x0f\x9a\xaa0q\x93)\xc3\xd2\x86\x0f\x93\x8d\x06ng\xdf\x04\x8e\xf6}\xd9*\xb609\xe1{10\xecw,\xec\xd5]\x85\xa3\xec}f\xd4\xd5\xe3\x18{\xeb\xcek\xfe\xdaf<\xe6Wq\xa8w\xa1\xa5[\x04>\xa0*L\x1c\x99\xa8\xb1\xd2@\x902Q#|X\xbe\x9bx\x93c\xad\xba\xfa/Y\xf1\x9c\xb6W\xf1\x03\xffI\xd7\x9fS\x8dU8\xa69\xe8A\x09\x96\x10zgY\x0d\xe7\xbe\xf7\xf3\xc3\xe25q\x11\xbf\x044~\x9e\x9f\xc0\x07\xeb\xfd\xc1\xb3U\x11&\xae{R\xe5\xbe\xcbR\x86r\x8fj,\xbd\xb6\x19\x87\x92X\xcd\xdf\x5c\xe8-\x0f\xc1\xe6Y\xd6L\xafm\xdb\xbe<\xce\x12xN\xfb0!,~1\x1ef\xb3/\x81\x18lg\x1f\xf7\xfa\xe5M\x06?\x81\x0f\xd6[\xd5\x9b\xac\xa1\xdc\xd5\x86\x83QV}\xae\xdb\xcd\x83P\x98z\xbc\x0ej,nO3\xc7\xf2Q\x9c\xa8\xf2\xf5\x1e\x06\xbd\x87\x81gT\xf3\xd7\xae\xb3\xfe\xac\xa6\xf10\xf4\x9d\xcfyS\x1b\xf1K\xc1US_\xd0\x04>X\xdf\x0f\xa0aE\x988\xb7$\xd7j\x03B\x96\xde\xbb\xf7*~\xd8\x84@\x91\xb2\xa6\xeaF\xa66\xdf2\x8fe\x18\x96\xbf\xca\xe6\x9b\xa8r\x19CD\x186=|\xb0\xbd\xfd`\xbb\x5c\xf5s\x8d3l\xeb,\xffw\xd1\xb7SF\xc2\xf3)\xb6a\x96\xb6\x1aIR\xf0\x8bC\xbds\x9d\xe3g\x96.\xac\xafq\xc5\xf5z\xf7V\xab\xaa\x08vp3!\xb8\x85\xe3\x96rN\xd8G+ \xacj\xf6uq\xbf\x835\x0a{gY\xfd^\xbd\x10\xeeNb :K\xbc\xafW-y\xbe\xfb5~%\xbc\x0e\xf7\xfa\xf8\xe52\x9c\x0e\x13\x8b-\x1fe\xf3\x9f\xa7\x19\x86z\xcf\xc2)\x01\xb3\x0e\x7f\xeb\xe1\x83\xf5\xecq\xa8Z/\xf7y\xd7ja\xf5\xec\xf8\x0c\x13?4?\xb5\xf2I<\xf1=\xb5ga\xac\xb5\x17z\x1c\x1f\xd5\x0c{7\xf1\xd8=\x0e=D\xe1\xbc\xcc.\x95C\x8a\xcfw\x96\xe06\xee\xeb)\x06\xe1\xef\xb1\xd8\xc2d\x8e\xd0\x03{<\xe7\xcd\x85\xd7\xd1\x8bx\xaa\x87\xc0\x07$\xbd)\xe7\x15\x1f:\xb9\x96Z\xa9\x94\xe1\xd6\xeb8Qc\xda\xef_'\xdc\x86\x09\x1c\x8buR#\xec\x85\xa1\xda\xad\xd0+\xd4\xe1\xd9\xaaG\xd9l%e6\xfa\xfe\xe5#\x04\xf7b\x0bA\xedq\x0c\xf5\xd7s\xdc\xdc\x8bX\xecZ\xe0\x03*\xdf\x94\xad\x97\xdb\xde@\x1ez_\xb7\x13v=(\xf9py\x95\xa5\x0f\xc9?3\x81c!\xc71|\xb8\xa7\x9c\xb3\x17\xbe`\xbd\x1dV\x04\xe9\xf2\xdf]\x0c \xfbs\xdc\xc4\xee:,\xff\x17{\xfc\xc2{l\xf8\x9b\xfbbv\xd7\xebw3\xc3M\xd5\xee\x15\x15\xf8`\xbd>\x84\xaa\xce\xaf)\xeb5b\xf1\xc7\xa7\xaa\xf7\xf5^\xe5\x84\x9ax}\xeaL\xc1\xb1\xd6o\x5c\x9e\xb8\xdf\xb0\xeb\xab\xd8\xc4\xd7m\x13\xaf\xa1|\x9d\xbe|\x84\xd3fb\xaf_x\xce\x875\x83\xdfFVs$F\xe0\x83\xf5R\x15\xe6F\x9ah\xe5\xc7'e\x080\xf5\x8d>\xf5x\xee\xce2D\xc4\xd4\x00\x14\xda2eh\xf3\xb0'\xe7\xca\xa6\x0c]\xa7\xce\x1e_\xbb\xca\x00qVo\x1e\x83_\x9d\xf3\xfc\xf6\xeb\x04d\x81\x0f\xd6\xebC\xa8l\x88\xe9\xd4z\xb9+=>\xc3,mH,y]\xe3\x9a\x138\x8e\x16Q\xdd\x7fM\xa5\x84\xe7\x9b>\x145\x8f\xc3\xb0UC\xd7\xd7q\xe2BJ\x98\xd9^\xd7\xf3Jc\xf0\x0b_\xd2\xdei\xf8\xb5&\xf0\xc1\x1a\x85\x89\x94\x9an\xca\xb0\xacV\xcaP\xfa,\xeb\x1a'O\xe0\xf0\x1ah\xcc0a\x9fq\x0f\xdeW\xc2)\x22)\xe1l\xf4\xe0=&e\xd8\xf2\xd9\xa2\xd7\x95my\xf0\x1b\xd7\x08}\x02\x1f\xf0\xa90W6\xc4t\xd8\x83u,\xbb\xfc\xc1\x19>4S&j\xd4^\xd7x\x86\x09\x1c;\x8e\xc8\xdc_\xaeR\x86s\xcf\x16p\xf7\xbbK~\xba!\x98T\x0e\xe5\xde\xf7H\xc7\xd7b^\xe3\xb6\xd7V\x0c})\xc3\xe0\xc9\x7f\xaf\x02\x1f\xf4\xff\x03h\xab\xe2\x03\x7fR\xf1^\xdas|\xee\xcd\xbc\xaeq\x9c\xc0q\x9a\xb8\xbb\xd7\xc2|R?\x80\xaf\x1a~\x1d\xed,\xf9u{\x94\xf0%\xe5S=\xd25\xd6|\xde\x8e\xf7\xb1\xceR\xfe\xde\x93\x0bz\x0b|\xb0\x1eo\x1aeo\x0a\x07\xca\xb0\xacT\xeaD\x8dy\x87[S\x7f\x7fw\xd6\xc2\xae\xd4\x0a\xe1MO\xd6\xd8Zb\xd8\x1b\x16\x17OS\xde{\xa6\x8c\x1c\xa4\xbe\xbe\x9e\xc6\xfb\xf2\x1ai\x80\xc0\x07=\x960\x11\xe0|\xd6ezh\xec\xf8\xa4,\xb94\xf7\xba\xc6&p\xb4\xee\xd87\x1d\xd0\xf6\x96\xf4\xb8SK\xb0\x5cN+\xf1\x14\x83\xcc\xf3\xc4\xbb\x1c\xaf\xf9k\xf1\xba\xa9\x1b\x12\xf8\xa0\xdf\xaa\x86DrM\xb4R\xa9a;o\xf0\xf5\x90r\xd2\xfc\x86\xd7\xc6\xc25\x16\xf8bx\xdc_\xe2k6\xe5\x1c\xc5Q\xc2k:u2\xd1:\xbf\x16\x1b\x0b\xbb\x02\x1f\xf4\xb7\x07!\xbc\xe1\x96\x9dcs\xac\x0c\xcbJ\x8fO\x9e\xf8\xc1\xd9\xd8q\xaa9\x81\xe3\xa9\x09\x1c\x0b\xd5d\x8f\xdcxI\xaf\xd9\xf0\x98Sz\xa4+\xeb\x0b\xc6\xd7\xe2\xa8\xc6kq\xb8\x86\xef\x11\x8f\xb2\x1a\xe7\xe8\x09|\xb0\x9ea\xa2\xaa\x0c\x8b\xf5rW{|\xb6j\x04\xafF\x8fS\x1c\xc2\xb7\x02\xc7\xe2\xa4\x9ew\xb5\xd7\xd0k)\xbc>vk\xbe7\xcc\xfa\x9aMy=\x5cg\x89\x13\x7f\xe2\x17\x99\xd4\xa1\xdd\x935\x1c\xdaMy\x8d\xa4\xfe-\x0b|\xd0S\x07\x15\xdf\x0c\x8f\x94aY\xa9\xd4\x89\x1a\xcf\x17t\x9cRC\xe4\xf6:\xaco\xdap\xa0~\x95%\x0eU\xce\xdb\xb6\xb1\x17\xffY\xcd_\x9b\xb5\xd7v\x9c\xf8\x9a\x1d\xd5\x9c\x04\x96'\xb6\xd7F[\xbf\x80\x84 \xdat\x0fd\x8de\x16\xcf\x04>XS\xf1\x9bx\xd9\x87\xc0u\x1f*\xfcw\xf8\xf8\x84\x0f\x86\x94a\xb1\x85\xf5\xc2\xc6\x9e\x95\xd4%\x9cr\x138j;\xa9\xd1\xb6;3\xbe\x8e\xc2k\xe3\xc5\x92^\xb3)\xabi\xdc\x7fA9\xab\xf9Z\xac3\xb4\xfb\xa4\xa5K\x00\x86\xc7\xf4~\xf1\xd8\xce\x9a\x08~\xf1\xef-\xb4c\xca)\x1f\xc9!X\xe0\x83\xfe\xa9z\x03\xd0c\xb3\xc2\x9e\x80\x1ao\xd0G\x0b.\x97\x93g\xe9\x138\xd4\xe6\xab'\xb5\xbdB\xdb\x9e\xd5\x091!P\x14\xdbEV\xbfgo\xd6\xd7l\x08\xa4__\xe4\x17\x94\x9aC\xbbm\x9c\xb5{\x7f\xfcv\x1f\x04\xbf\xd1,\x8f3\x06\xc6\xd0\x1e)\x85\xd8\xcf\xeb\x8c\x00\x08|\xd0\xaf@1\xac\xf8&>wy\x0f\xe6R\xb5\xe2\xc9\xbd\xe4\xf3\xa0f\x15?(R\xefc\x7f\x9d\xeb\xa1\xcd\xd8\xb6\xa9\xe7V\x85\xd0\xf7^YH\x08\xa1+\xf4\xb2\xc5\xa0\xf7~I\x18\xb8i\xf8\xfd\xa4\xce\x17\x94\xd1\x9c_P\xf2,}h\xb75\xefa\xb1\x8d^\xef\xb1\x0f\xef\xc1\xa1\xf7\xf5{\xf1\xb8\xe61\xa8?\x9a\x16\xaa\x13\x8f\xef\x5c_\xde\x05>\xe8\x97\xaa7g\xbd{\xab\xfb`\xd8\xca\xd2{e\xf2e\x14\xc3\x8eC\xfb\xa9u\xbe\xf4\xf2\xd53\xaa\xb9\xff\xc3\x90p[l\xaf\xe2\xe5m\xf1\xb3\x0f\xb3\xbb^\xb6\xb2 p3\xc3}\xa6\x84\xb0\x94\xf0q\xda@\x9d\xc8:C\xbb\xbb-:\xb7t/\xe1\xb8>\x8bA\xee\xfe\xd8\x86\xed\xaa\xe6\xf1}\xdda\xdd\xc2\xcc\x02\x1f\xf4'PT\xf5\x1e=_@u\x7f\x9a\x0b\xe3\xf7.\x97\x5c\x0c;\xf5\x83s;\x9e7FZ\x80\xb9*.\xde\x9d\xe3&\xea\x94\xe3\x08ao\x98\x18\xba\x92\xce\x19\xac\xb1\x9aFcA\xb3\xe6\xd0n\xbe\x80\xe2\xd5\x8b\x08|\xd3l\xceq\x9f\xc7\xb3\x9c\x87-\xf0A?\xc2^\xd5\x8c.eXV{|\xf6\xb2\xf4\xd2\x19K\xed\xb9\x88!!u\xf8\xf1\xa0%\x1f\xb2]\x09}\xa1W\xf4x\xc1ws\x1f\xf6R\xbf\xcc=Jx\xbd\x86}R{\xecF\x0d\xf7F\xe7Y\xfa\xb9\xa5\xe3\x15\xff]O\x1a\xce]\xb4\x10\xf6f\x0a\xd8\x02\x1f\xf4CU\x99\x8f\xdcz\xb9+\xfdPH\x1d\x0e=_Q1\xec\xd4\x90i\x02G\xfd\xd0\x17>\x9c\x9f/\xe8\xe6CP\xdfz-\xecU\x85\xf7\x94\x89\x04\xe3,\xad\x87\xb1\xf1s\x82g\x18\xda]\xe5\x17\xd9e\x7f\xf9yw\xd6\xb0'\xf0A?\x02E\x18\xa2)[V\xe9z\xda\x9a\x96,-Lm\xd6\xd8w\x15\xa1\xa4\xce\xda\xa6OL\xe0\xa8\xdd\xbe\xe1\xb8~9knRE8\xef\xf2\x9d\xe2v\x873|\x91\xdb\xa9x?\x09\x81\x22\xb5l\xd0hA\xed\x15B\xe4i\xe2\xee\xcfV\xb5\x22L\xfc\xbby\x9c-\xbe\x177\xdc\xfe\xe3y\xdf\xc7\x05>\xe8\xbe\xaa7\x81\x91&ZY\x18\x0f=\x00\xa9\x135\x8eW|\x8ee^#\x90\x8c\x1d\xdd\x99BLx=\x1c\xce\x11\xfc.c\xd0\xdb*9\xcf\xb3\xea5\xb4U\xf1zM\x0d\x15\xf9\x82\x8b\xb7\x8f\xba\xf0z\x0cm\x10{\xdd>\x9b\xdd\x9d\xb3y\xde\xd0M\xdf\xc4/a!\xe8\x8d\x9ah\xebAq#\xfe\x12!\xda\xf8\xca[gY\x8de\x8a\xa2\xf3\x9bo|0\x5cQ\xa0\x08\xe7\x86\xbdW\xf6\xd8B/\x80#\x0b\xad\xfb2\x10\xfev\xc3\xdf\xe6N\xc9{\xceu\x0cp\xe1}\xe9\xc4\xea8\x9d9\xb6\x8f\xe2q\xbd?\xbe\x8f\x12>WBP|\x15\x8f\xf5\xd9\x22\xbe\xfc\xbd\xe1\xd0@\xa7\xe9\xdd\x83\x0e\x8a=~jb\xf6\xf3\xd8~?\xb8\xb5\xe9q\x19\xd2\x85\xee~\x8b\xcc\xb3\xf2s\xc3\x0e\xf5\x08\x00 \xf0Aw\xc3\xdeVV~\x82\x7f8\xff\xc3D\x0d\x00\x04>\xe8\xb0<+/\x9bp\xa0\x0c\x0b\x00\x02\x1ftT,\x89QV\x86e\xd9+5\x00 \xf0\x01\x0d\xcb+\xae\xb7^.\x00\x02\x1ftU,\x8aZ6\xbd\xffxE+5\x00 \xf0\x01\x0d\x84\xbd\xaa%\xba\xac\x97\x0b\x80\xc0\x07\x1d\x17\x86j\xcb&j\x1c)\xc3\x02\x80\xc0\x07\x1d\x95\xb0DW\xa8\xc8\xaf\x0c\x0b\x00\x02\x1ftXU\x98S\x86\x05\x00\x81\x0f\xba*\x96ayR\xb2\xcby\x5c\xa6\x09\x00\x04>\xe8\xa8q\xc5\xf5\xca\xb0\x00 \xf0AW\x0d\x06\x83\x10\xe6\xca\xd6\xcb\x0deX.\xb4\x14\x00\x02\x1ft3\xec\x852,y\xc9.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb0\x10\xf6\xca\xca\xb0\xe4&j\x00 \xf0AG\x0d\x06\x83\x9d\xe2\xe2i\xc9.\xd7E\xd8S\x86\x05\x00\x81\x0f:\xac*\xcc\x8d4\x11\x00\x02\x1ft\xd4`0\xd8\xcb\xca\xd7\xcb=\xb7^.\x00\x02\x1ft\x9b\xde=\x00\x04>\xe8\xab\xc1`\x90g\xe5eX\x0e\xad\x97\x0b\x80\xc0\x07\xdd\x0d{[Yy\x99\x95P\x86\xc5D\x0d\x00\x04>\xe8\xb0<+/\xc3b\xbd\x5c\x00\x04>\xe8\xaa\xb8^\xee~\xc9.\x97E\xd8\x1bk)\x00\x04>\xe8\xae\xbc\xe2z+j\x00 \xf0AW\x0d\x06\x83QV^\x86\xe5X\x19\x16\x00\x04>\xe8n\xd8KY/7\xd7R\x00\x08|\xd0]a\xa8\xb6\xac\x0c\xcb\x912,\x00\x08|\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\x00\x10\xf8\xa0\xd3\xaa\xc2\x5c\xae\x0c\x0b\x00\x02\x1ftT,\xc3\xf2\xa4d\x97seX\x00\x10\xf8\xa0\xdb\xaaz\xf7\x94a\x01@\xe0\x83\xae\x1a\x0c\x06!\xccm\x97\xec\x12\xca\xb0\x5ch)\x00\x04>\xe8f\xd8K)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xb2\xf5r\x8fL\xd4\x00@\xe0\x83\x8e\x8aeX\x9e\x96\xecr]\x84\xbd\x5cK\x01 \xf0Aw\x8d+\xae\x1fi\x22\x00\x04>\xe8\xa8\xc1`\xb0\x97\x95\xaf\x97{n\xbd\x5c\x00\x04>\xe8\xb6\xaa2,#M\x04\x80\xc0\x07\x1d5\x18\x0c\xf2\xac|\xbd\xdc\xe7\xd6\xcb\x05@\xe0\x83\xee\x86\xbdP\x86\xa5\xac\xccJ(\xc3\x92k)\x00\x04>\xe8\xae0\x94[V\x86\xe5@\x19\x16\x00\x04>\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05@\xe0\x83n\xcb+\xae\xb7\xa2\x06\x00\x02\x1ft\xd5`0\x18e\xe5eXN\x95a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850WV\x86\xe5H\x19\x16\x00\x04>\xe8\xa8\xb8^\xee\xb3\x92]\xae\xb3\xea\x22\xcc\x00 \xf0A\x8bU\x85\xb9\x5c\x19\x16\x00\x04>\xe8\xa8X\x86\xe5I\xc9.\xe7\xca\xb0\x00 \xf0A\xb7U\xf5\xee\x99\xa8\x01\xc0J\xbc\xa1\x09`~\xb1\x0c\xcbv\xc9.\xc7\xb7\xb7\xb7\x17Z\x0a\xa0{\xbe\xf9\xf8\x0b\xe3\xf8\xcf\xfcK/\xbf}\xd5\xc5\xe7\xa0\x87\x0f\xe6\xf4'\xff\xf8\x07\xc2\x17\xa7\xb2\xde=eX\x00\xba\xed$\xbb[9\xe9e\x11\xfe\xce\x8am$\xf0\xc1\x9ay\xfc\x9d\x1f\xdb\xca\xca\xd7\xcb=2Q\x03\xa0\xbb\xbe\xf4\xf2\xdb!\xf0]\xc7\xff\x86\xa2\xfa/\x8a\xd0wUly\xb1=\x12\xf8\xa0\xe7\xfe\xdc\x1f\xbe\x91\xfd\xea{\xbf\xf2\xf9\x92]\xae\x8b\xb0\x97k)\x80\xce{}$'\xd4[\x0de\xb8\xbe\x17\x86|\x8bmG\xe0\x83\x9e\xfa\xdc\xcb\x1f\xac\xda\xc5P.@?\x8c\xb3\xbbSt&\x09\xc3\xbd\x1f\x16\xa1\xef\xa2\xad\xc3\xbd\x02\x1f\xcc\xe8\xa7\xfe\xe0G\xb3\xf3_>+\xdb%\x94a9\xd1R\x00\xdd\xf7\xa5\x97\xdf\x0e\xa7\xe6T\xbd\xa7\x87\xc9{a\xb8\xf7U\x1c\xee\xdd\x12\xf8\xa0\xe3~\xf7\xdf\x7f\xb7j\x97\x91V\x02\xe8\x95\xd4\x95\x92\xc2y\xdda\xb87L\xf28)\xb6\xa1\xc0\x07\x1d\xf4\xd7\xbe\xfb(\xfb\xd6\xc5\xaf\x97\xed\xf2\xdcz\xb9\x00\xfd\xf2\xa5\x97\xdf\x0e\xe5\xb5\xcek\xfeZ(\xc8\xff~\x9c\xe41Z\xd5$\x0f\x81\x0fj\xfa\xf1?\xfeL\xf6\xad\xb3\xd2\xb0\x17\xce\xf1\xc8\xb5\x14@/\x8dg\xfc\xbd0\xc9\xe3E\xb1]\xc5I\x1e[\x02\x1f\xb4\xd8\x9b\xbf\xf3\xe3\xd9w\xbe\xf3\x9d\xb2]\xac\x97\x0b\xd0S_z\xf9\xed\x10\xf8\xae\xe7\xb8\x890\xdc\xfb\xb0\xa6\xdf\x9e\xc0\x07-\xf3\x17\xfe\xef\x0fe\xbf\xfaK\xff\xa2l\x97\xcb\x22\xec\x1di)\x80^\x1b7t;\xa1\xa6\xdf{q\xb8\xf7`\x91\xc3\xbd\x02\x1f\xd4\xf0c\xd7\x95\x7f2\xca\xb0\x00\xf4_\xd3_\xec\xc3p\xef\xd7\xb3\x05\xd6\xf4\x13\xf8 \xd1_\xfd\xbd\x9f\xa8*\xc3rz{{{\xa6\xa5\x00\xfa-\x96h9^\xd0\xcd\xdf\xd7\xf4kt\x09\xb77\x1c6\xa8\x16&j\xbc\xfa\xd6\xff,\xdd\xe7\x1f\xfd\xdc\xdf\xfc\xc56L\xbd\x07`).b8[\x940\xdc\xbb[|\xae\x84\xde\xc4\xb0\x8d\x8b\xa0y5\xeb\x8d\x0dnoo\x1d2\x886\xbe\xf2\xd6Y\xfc#\xfb\x84P\x86\xe5W\xfe\xd9/O\xfd\xbd|\xefg\xb2\x9f\xb9\xf8\xef\x1a\x10\x80E:\x8e\xc1\xef\xac\xee/\x1a\xd2\x85\x09~\xf4\xf7>\xf7\xfd\x7f\x87\xf5r\xcb\xca\xb0\xec\xbe\xf5\xd3\xd9\xdf\xf8\xad\xefh4\x00\x16-\xf4(\xbe\x7f\xbf\x84[\x9dI\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f>\xfc\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf\xfc\xf6\x8f\x94\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xd1\x12n\xd9]M\xbf\xa3\x94\x9a~\x86t\xe1\x81\xcf\xff\xec/\xfc\xceO|g\xe3O\xdf\xff\xffs\x7f\xf1e\xf6\xaf~\xf1\x9fO\xdd\xff\xe7\x7f\xf6\xed\xec\x17\xfe\xd3ok8\x00V\xed4\xbb\x1b\xee\x9d\xb8\xde\xafI\x1b\x10\xbd\xb9\xfd\xd5\xf1Od\x1f\x87\xbd\xe03\xbf}S\xfa;?\x9b\xfd/\x0d\x07@\x1b\x84%\xdc\x9e|\xf3\xf1\x17BQ\xe8\xfbI\x1e\xdf_\x04@\x0f\x1f\xc4\xb0\x97M\x98m\xb5\xf1#\x7f\x94\xfd\x89\x1f\xfd\xcd\xec_\xbe\x7f\xfa\xa9\xdf\xf9\xfb?\xf7\xb7\xb2\xbf\xfb\xeb\xffM\xe3\x01\xd0F\xa1\xc7\x22\xf4\xf6\x1d\x855\x80\x05>\x84\xbd)a\xef\xa1?\xfb\xd9?\xc8\xfe\xcf\xef\xff\x9b\xec_\xff\xbb\x7f\xfb\xd1\xff77\xb7\xb2\x7f\xba\xf1#\xce\xdd\x03\xa0\x0b\xc1\xef@\xe0C\xd8\xabQG\xe9\xef\xfc\xe4\xb7\xb3_\xfa\x8f\x17\xd9;_\xfc\xf3\xca\xb0\x00\xd0f\x9f\x18\xda\x15\xf8\x10\xf6jz\xe7\x07\xbf\x95\xfd\xf5\xef\xfe\xa6\x06\x04\xa0\x8d&N\xde\x10\xf8\x10\xf6f\xf0\xf7\xfe\xf0\xd7\xb3\x9f\xfe\xfd\xff\xaa!\x01h\x830l\x1b>\xd7\x8e\xa6\xad\xc6!\xf0!\xec\x09}\x00t\xd3ev7l{\xf2pF\xee$\xca\xb2 \xec\xcd\xe8\xbf\xfc\xc0\xe7\xb2\x9f\xce\x04>\x00\x96\xae\xf6\x12k\x02\x1f\xc2\xde\x0c\xde\xbe\xfd\x1f\xd9\xcf\xff\xde\xafiT\x00\x96%\x0c\xdb\xdeO\xc2\xb8\xaa\xfb\xcb\x02\x1f\xc2^M\x9f\x1f\xfc\xef_\xfe\xf9\xdf\xfd\xb5\xafiU\x80\xb56,\xb6gK\xb8\x9f\xf3\x18\xf2\xc6\xf3\xdc\x88\xc0\x87\xb0W\xcf\xf1\xaf^<\x1fiU\x80\xf5\xf6\xcd\xc7_8X\xf0]\x84a\xdb\x8f\x8a&7qc&m \xec\xd5\xf8\xe3\xfb\x8d\xcb\xaf\x09{\x00\xc2\xdeVq\xf1r\x017=qY\xb4&\xe8\xe1C\xd8\x13\xf6\x00\xa8\xa7\xe9\xde\xbd0l{\xf4z\xed\xbc&\xe9\xe1C\xd8\x13\xf6\x00H\xf4\xcd\xc7_xT\x5c\x5c\x15\xdb\xc6\x9c7u\xbf\xd6m>\xcb$\x8c\xba\xf4\xf0!\xec\x09{\x00\xa4\xdb\x9b3\xec\x85a\xdb<K\xa8\x9d'\xf0\x81\xb0\x07\xc0j\xe43\xfe^X\xf2\xec\xa8N\xed<\x81\x0f\x84=\x00\x96\xec\x9b\x8f\xbf0,.6k\xfc\xca\x5c\xb5\xf3\x04>\x10\xf6\x00X\xbe\xd4\xcf\x86\x8f\x96<\x9b\xb7v^\x93L\xda@\xd8\x13\xf6\x00\xa8\x90X\x8a\xa5\xd1\xdayM\xd2\xc3\x87\xb0'\xec\x01Pm\xda\xe7C\x98\x841\x8eA\xefU[\x1f\xbc\xc0\x87\xb0'\xec\x01P\xed\xf5\xda{\x8d,y&\xf0\x81\xb0\x07@\x0b|\xf3\xf1\x17\xc2gD(\xc5\xb2\xd4\xday\x02\x1f\xc2\x9e\xb0\x07\xc0\xf2\x0c\x8b\xed\xddl\x01K\x9e-\x8bI\x1b\x08{\x00\xd0s\x9f\xd1\x04\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{Z\x15\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\xd66\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xa2\xc0W\x84\xb3G\xc2\x1e\x00@\x8f\x03_\xe1\xa8\x08i[\xc2\x1e\x00@\x0f\x03_\xec\xdd\x0b\x01m\xd4\xc0m\x09{\x00\x00m\x0b|\x85\x83x9W\xb8\x12\xf6\x00\x00\xda\x1b\xf8\xee\x83\xd5f\x11\xda\xf6\x84=\x00\x80\x1e\x05\xbe\x18\xf06'\x84?a\x0f\x00\xa0\x0f\x81oB\xc0{Rg\xf2\x86\xb0\x07\x00\xd0\xe2\xc0\x17\x83\xdd\x93\x84\x10(\xec\x01\x00t1\xf0\x95\x04\xbb\xca\xe0%\xec\x01\x00t;\xf0\x95N\xde\x10\xf6\x00\x00:\x10\xf8\x8a\xd0\x16B\xd6f\xc9.\x07\xc2\x1e\x00@\x87\x03_V=l\xbb\xfb\xfa\xe4\x0da\x0f\x00\xa0#\x81/\x06\xb9\xdd\x84]\x0f\x84=\x00\x80\x0e\x06\xbel\xcap\xed\x04#a\x0f\x00`u\x06\xb7\xb7\xb73\xfdb\x11\xe0^\x15\x17\x1b\x89\xbb_\x16\xdb\xb6\xb0\x07\x00\xb0|3\xf5\xf0\xc5\xc9\x1a\x1b5~E\xd8\x03\x00\xe8R\xe0\xcbfX:M\xd8\x03\x00X\x8d\xdaC\xbaq\xb2\xc6Ka\x0f\x00\xa0\x1bf\xe9\xe1\xcb\x85=\x00\x80\xee\xa8\xd5\xc3\xf7\xe6\xf6W\x1f\x15\x17WY\xbd\xf3\xf7\x84=\x00\x80\x15\xaa\xdb\xc3\xb7'\xec\x01\x00\xf4;\xf0\x1d,\xe9q\x09{\x00\x00\xcb\x0e|on\x7fu'k\xae\xbc\x8a\xb0\x07\x00\xd0\xb6\xc0\x97-\xa7wO\xd8\x03\x00XE\xe0\x8b\x935\xf6\x96\xf1\x80\x8a\xfb\x1a:,\x00\x00\xcdI\x9a\xa5\x1bW\xd6x\xb1\xc4\xc7u]lG\xc56\xfe\x8d\xcb\xaf\xbdr\x98\x00\x00\x16\x1f\xf8\xae\x8a\x8b\xcd\x15<\xbe\x9bb;\x09\xe1\xaf\x08~\x17\x0e\x17\x00\xc0\x02\x02_\x1cb}\xbf\x05\x8f\xf52\xbb\xeb\xf5;\xd1\xeb\x07\x00\x90.\xe5\x1c\xbeQK\x1ek\x98!\x1c\x86\x95\xaf\x8a\x10:\x8e\xb3\x86\x01\x00\xa8P\xda\xc3\x17'k|\xaf\xc5\x8f?\xf4\xfa\xe5\xbfq\xf9\xb5\x13\x87\x12\x00`\xb2\xaa\x1e\xbeQ\xcb\xc3\xde\x91\xb0\x07\x00P\xee\x8d\x8a\xeb\x0fZ\xf6xM\xe2\x00\x00h*\xf0\xc5\xc9\x1a\x9b-y\x9c&l\x00\x004\x1d\xf8\xb2v\x0c\xe7\x1egz\xf3\x00\x00\xe62q\xd2\xc6\x9b\xdb_\xdd*.^\xae\xe81)\xba\x0c\x00\xd0\xa0i=|\xa3\x15<\x96\xe3\x18\xf2\xce\x1c\x16\x00\x80\xfe\x04>\xbdy\x00\x00\xcb\x0e|on\x7fu/[\xfcd\x8d\xd3\x18\xf2\x94T\x01\x00Xv\xe0\xcb\x16\xd7\xbb\x17z\xf3\xc61\xe8]iz\x00\x80\xe5\xf8\xc4\xa4\x8d\x05M\xd68\xcf\x14H\x06\x00X\x99\xd7{\xf8F\x0d\xddn(\x90<\x8eA\xefJ3\x03\x00\xf4'\xf0\x85\xde\xbc0d;\xd6\xb4\x00\x00-\x0b|on\x7f5\x84\xbdY&k\xe8\xcd\x03\x00\xe8B\xe0\xcb\xea\xf7\xeeY\xee\x0c\x00\xa0\x03>\x9a\xb4Qc\xb2F\xe8\xcd\x0b\x93/,w\x06\x00\xd0\x11\xf7=|\x07\x15\xfb\xe9\xcd\x03\x00\xe8x\xe0\x1bM\xb9\xderg\x00\x00]\x0f|q\xb2\xc6\xc6\x83\x9fY\xee\x0c\x00\xa0O\x81/\xfb\xb8wOo\x1e\x00@O\x03_\x08x{z\xf3\x00\x00\xfa\xe9\xff\x0b0\x00\xb2\x10\xef\xec0\x8f}\x9d\x00\x00\x00\x00IEND\xaeB`\x82\x00\x006\xc9\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006VIDATx\xda\xec\xddO\x8c$Y~\x17\xf0\x88\xf5X\xfe\x87]\xbd\xd2\xf2O+\xdc\xd9\x1cX\x0d\x18U\xad\xc0bA\xac\xaa\xc6B\xc2\x5c\xdc5\x12\x17\x9f*\xe7\xc0\xc1\x87\xa5kN\xec\xad\xa3%\x0e\xbem5#$$\x0e\x93u\xc2\xe2\xe0\xa9>\x1a\x1bu\x95\x16\xc1\x98?\xda*\xb3\x92Y\x0c\xee*\xc4\xb2\x96\xd0j\xbbla\x0c\xd8n\xe2M\xbd\xda\xa9\xe9\xc9|\x11\x91\x19\x99\x19\x11\xf9\xf9H\xa1\xec\xae\x8c\xca\x8c|\x99\x95\xf9\xcd\xf7\xe2\xfd^\xfe\xea\xd5\xab\x0cH\xdb\xfa\xca\x97\xf6\xcb\x8b\xd1\xf5{\x1f\x1ei\x0d\x00\xfa\xe63\x9a\x00j9\x8c\x1b\x00\xf4N\xae\x87\x0f\xd2\xb6\xbe\xf2\xa5Qy\xf1\x22\xfe\xf7\xed\xeb\xf7><\xd1*\x00\xf4\x89\x1e>\xa8V\xdc\xf9\xb7^>\x00zG\x0f\x1f$l}\xe5K\xf7\xca\x8b\xcb\xf0\xcf;?~p\xfd\xde\x87\x97Z\x07\x80\xbe\xd0\xc3\x07i\xfb\xaf\x85\xbd\xa0\xd0,\x00\x08|0\x1c\xd3\xc2\xdd~\xec\xf9\x03\x00\x81\x0f\xfa\xac\x0cu{\xe5\xc5\xfdiWe7=\x7f\x00 \xf0A\xcf\xa5&h\x14\x9a\x07\x80\xbe0i\x03\xa6x\xad\x14\xcb,o]\xbf\xf7\xe1\xa9\xd6\x02\xa0\xeb\xf4\xf0\xc1t\xe3\x1a\xfb(\xd1\x02\x80\xc0\x07=V'\xcc=\x8c=\x81\x00 \xf0A\x9f\x94!n\x9c}\xba\x14\xcb,c-\x06\x80\xc0\x07\xfds\xb8\xa4}\x01@\xe0\x83u\x8b\xa5X\xb6\x9b\xfcJ\xec\x11\x04\x00\x81\x0fzb\x9e\xf0\xa6\x97\x0f\x80NS\x96\x05\xa2\xb8z\xc6\xf7\xe6\xfcu%Z\x00\xe8,=|\xf0\xb1Ez\xea\xc6\x9a\x0f\x00\x81\x0f\xbao\x91\xd0v`}]\x00\x04>\xe8\xb08\xf1\xe2\xfe\x827\xe3\x5c>\x00\x04>\xe8\xb0qGn\x03\x00\x04>h[\x5c-c\xb7\x85\x9b\xba\xafD\x0b\x00\x02\x1ftS\xd1\xe2m\x09|\x00t\x8e\xb2,l\xb48\xd1\xe22\xab\xbf\x94Z\x1d\x0f\xae\xdf\xfb\xf0R\xeb\x02\xd0\x15z\xf8\xd8t\xe3\x96\xc3^PhV\x00\x04>\xe8\x8ee\xcc\xac\xddW\xa2\x05\x00\x81\x0f:\xa0\x0ce\xfb\xd9\xe2\xa5X\xa6\xdet\xe6\x5c>\x00\x04>\xe8\x84e\x8625\xf9\x00\xe8\x0c\x936\xd8H\xb1\x14\xcb\x8b%\xdf\xcd\xdb\xd7\xef}x\xa2\xb5\x01X7=|l\xaaU\xf4\xc0\x8d53\x00]\xa0\x87\x8f\x8d\xb3\xa4R,\xb3(\xd1\x02\xc0\xda\xe9\xe1c\x13\xed\xaf(\xec\x05\xce\xe5\x03@\xe0\x835Xe\x08\x1b+\xd1\x02\x80\xc0\x07+T\x86\xaf\xbd\xf2b{\x95w\x99\xdd\xf4(\x02\x80\xc0\x07+2^\xc3}\x1a\xd6\x05`\xadL\xda`c\xac\xa8\x14\xcb,o]\xbf\xf7\xe1\xa9g\x01\x80u\xd0\xc3\xc7&\x19o\xe8}\x03 \xf0\x81\xc0\xb7\x02\x07\xb1\x87\x11\x00\x04>X\x862l\x85\xb0w\x7f\xcd\x871\xf6L\x00 \xf0\xc1\xb0\xc3\x96\xc0\x07\x80\xc0\x07\xcb\xb0\xf5\x95/\xed\x94\x17\xbb\x1d8\x94\xfb\xb1\xa7\x11\x00\x04>hY\x97\xca\xa2\x08|\x00\xac\x9c\xb2,\x0cZ\x5c\xe5\xe2{5w\xbf\xbe\xfd\xb5\x86w\x13~\xefeV\xff\x1c\xc1/^\xbf\xf7\xe1\xb9g\x07\x80U\xd1\xc3\xc7\xd0\x8dk\xecsUn\xef\x94\xdb\xa8\xdc\xe6\x09b\xe7e\x80\x1b\xc5\xdb8\xab\xb1\xbfB\xcc\x00\x08|\xd0\xa2T\xb8z\x96\xdd\x14D\x1e\x95\xdb\xa4\xdc^.rG\xf16\xf6\xca\x7f~\xb1\xdc\x8e\x13\xbb\x1eX_\x17\x00\x81\x0fZP\x86\xaa\xb0\x86\xed\xeb\xc3\xaca\xf8\xf5i\xb9=(\xc3\xd9\xfe2V\xbf\x08\xc3\xb5\xe56.\xff\xf9\xd9r{\x92\xdd\xf4 \xben\xec\x19\x02`U\xde\xd0\x04\x0c\xd8\xdd\xde\xbd\x8br;\x0a\xbdp\xab\xba\xf3\xd8cX\x84-\xce\xce\x0d\xdb\xee\x9dc;\xf2\x14\x01\xb0\x0az\xf8\x18\xa4\xb8\xaaE\x08Wah5\x0c\xdb\xee\xac2\xecM\x09\x7f\xb7\xc3\xbd\x0f\xe21\xdd\x8b=\x90\x00\xb0tz\xf8\x18\xb20l{\xd9\xa5\x03\x8a\xc73\x8e\xe7\xf09\x8f\x0f\x00\x81\x0f\x16\x0cV]>\xbe0\xdc\xfb\xd23\x05\xc0*\x18\xd2\x05\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x04>\x00X\x91<\xcf\xef\x95\xdb\x9e\x96\x00\x81\x0f\x80a\x86\xbd\x9d\xf2\xe2RK\x80\xc0\x07\xc00\xc3\xdeay\xf1\x8dr\xdb\xd2\x1a\xb0\x5c\xd6\xd2\x05`\xd5A\xef^y1)\xb7\x87\xb7?{\xf5\xea\xd5\xa9\x96\x01\x81\x0f\x80a\x84\xbd0\x84{Rn\xf7\xb5\x06\xac\x8e!]\x00V\x15\xf6n\x87p_\x0f{gZ\x07\x96K\x0f\x1f\x00\xcb\x0ez\x9f\x1a\xc2\x05\x04>\x00\x86\x13\xf6\xea\x0c\xe1\x9ej)X.C\xba\x00,+\xec\xcd\x1a\xc2\x05VL\x0f\x1f\x00m\x07\xbd\xa6C\xb8\xa7Z\x0d\x04>\x00\xfa\x13\xf6\xcc\xc2\x85\x0e2\xa4\x0b@[ao\x9c\xdd\xf4\xd65\x0a{j\xf0\xc1\xf2\xe9\xe1\x03`\xd1\xa0\x17\x86p\x8f\xca\xed@k\x80\xc0\x07\xc0\xf0\xc2^\x18\xc2\x9d\x94\xdb\xf6\x9c7\xa1\x06\x1f\xac\x80!]\x00\xe6\x0d{\xe3\xecf\x08w[k@\xb7\xe9\xe1\x03\xa0i\xd0ks\x08\xf7T\x8b\x82\xc0\x07@\xb7\xc2\xde\xa2C\xb8\xc0\x1a\x18\xd2\x05\xa0n\xd8\x1bg\xed\x0f\xe1\x9ejYX>=|\x00T\x05=\xb3pA\xe0\x03`\xc0ao\x94\xdd\x14R^\xca\x10\xae\x1a|\xb0\x1a\x86t\x01\x98\x15\xf6\xf6\xcb\x8b\xf3\xcc\xf9z \xf0\x010\xc8\xb0\x17\x86p?(\xb7\xad%\xde\x8d\x1a|\xb0\x22\x86t\x01\xb8\x1b\xf4F\xd9\x12\x87p\x81\xf5\xd0\xc3\x07\xc0m\xd8[\xf5\x10\xee\xa9V\x07\x81\x0f\x80\xd5\x85\xbdU\x0c\xe1\x02kbH\x17`\xb3\x83\xde([\xdf\x10\xee\xa9g\x00VC\x0f\x1f\xc0\xe6\x86=\xb3pA\xe0\x03`\xc0ao\xedC\xb8j\xf0\xc1\xea\x18\xd2\x05\xd8\xac\xa0\x17V\xcd\x08AK\xaf\x1el\x10=|\x00\x9b\x13\xf6\xf6\xca\x8b\xcb\x8e\x84=5\xf8@\xe0\x03\xa0\xe5\xb0W\x94\x17\xcf3\xb3pa#\x19\xd2\x05\x18v\xd0\x0bC\xb8a\x16\xeen\xc7\x0e\xed\xd4\xb3\x03\x02\x1f\x00\xed\x18\xc5p\x15f\xe3\xee\xc4\x9f\xedj\x16\x10\xf8\x00\x18\x88W\xaf^\x9d\xc7\xb0\xf7)\xf1\x9c\xbe\xe0\xf5\xcbU\x04\xc2S\xcf\x0e\x08|\x00,?\x0c\x9e\xa6\xc2\xd7k\x81p/\xd33\x08\x02\x1f\x00\xc3\x0d\x84e\xf8\xbb\x5c\xd2m\x03+`\x96.\x00Iq\x86\xef}-\x01\x02\x1f\x00\xc3\x0c{a\x96\xefa\xcb7\xab\x06\x1f\x08|\x00tHX\x82-U\xbb\xefI\xb9]k&\x10\xf8\x00\xe8\xa18i\xe3 \xb1\xcb\xc5\xabW\xaf\x8a\xf2r\xdc\xf0\xa6O\xb5.\x08|\x00tCQq\xfdGC\xbde\xe8\x0b\x85\x9d\x9fj.\x10\xf8\x00\xe8\x91<\xcf\xc7Y\xba\x0c\xcb\xb3\xbb3m\xcb\x7f\x87\xf0wQ\xf3\xe6O\xb50\xac\x96\xb2,\x007\x01g\x94\xdd\xacJ\xb1Wna\xa2BrU\x8a2\xe0\xe4\x03n\x8b\xf0\xf8\x8b\x8a\xdd\xa6M\xe4\xd8\xcfn\x8a<[\xaf\x17\x04>\x80N\x84\x9a\x9d\xec\xe3\x82\xc2{]\x0c)\xf1\x1c\xba\xe7\xf1\xbfg1L]\x96\xdbi\x5cAcYB\x98K\x95ayR\xde\xff\xe5\x94\x10|\x19{\x06?H\xdd\xb8\x1a| \xf0\x01,;\xe4\x85@\xb2\x9f\xf5\xa3\xae\xdc\xe8\xce\xbfw\xb3;\xbd\x8d\xe5c\xb9\x0d\x81'm\x06\xc0\xd8\xd3\x99*\xc3r\x95\xdd\xcc\xdc\x9d\x15\xe6N\xca\xdb\x08\xe7\xf3=\xf2\x8a\x03\x81\x0f`U!\xef^\x0cx!\xc4l\xf7\xec\xf0G\x15\xd7\x7f?\x04\x96\x8f3\x04\xb1\xd3\x10\x00\xe3$\x8ayU\x95a)\xca\xdb\x7f\x99\xba\x81p>_\x0c\xd7\xd3\x86\xc3\xd5\xe0\x8350i\x03\x18l\xd0\x8b+D\x5c\x96\xdb\xfb=\x0c{\xc1^\x83}C\x8fe(\xa1\xf2A\xf9\xb8_\x96\xdb\xa4\xdc\xf6\x1b\xb6Y\xb8\xbf\x87\x89]\xce\xca07\xa9ys\xe1\xbe\xa7\xd5\xe7{\xe9\xd5\x09\x02\x1f@\x9bA\xefq\xd6\xef\x09\x04\xa39\x7fo\xebN\xf8\x0b\xe7\xd5\x15q\xa8\xb6\xcaQ\xc5\xf5E\xdd\x03\x88\xbd\x80\xd3\x02\xe7\xb9W)\xac\x9e!]`\x88\x0e\x17\x08z\xd7\xd9\xc7\x93#\xc2\xf6r\x8d!\xe5~K\xb7\x11\x82\xef\xe32\xf4=\x0b\xa1n\xda\xa4\x898\xd9\x22\xd5\x0bz\xdct\xb2E\xd8\xbf\xbc\xdd'\xf1\xfeo\xe9\xe1\x03\x81\x0f`1\xa1g\xa9\x0c\x19\xe1\x1c\xb6\x83\x9a\xbf\x12\xce);\x8d\xdby\xd5\xf9i+\xf6V\xf6q\x89\x98\xbdx\xb9H\x8fe\x18\xae}\x18\xcf\xf7+n\x87g\xe3y\x8eG\x15!\xf8p\xce\xe7\xa3\x88C\xc5\xb7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x15G\x89\xc0\x17\xc2\xce\xed\xcc\xd6\x93.?\x88;=j\xdf?\xce84\xbbwg\x9b\xa7\x170\xfc\xce\xfbq\xe8\xbb\xa8\x11$\x8f\x16\x0c\xc2ah\xf72\xde\x87\x1e>\x10\xf8\x00Z\x09J\xe7e\x98\x09\xab>\xdc\x1d\xa2<.\xb7I\xdfk\xc0\xc5\xfaw\x93\xb8\xdd\xad'8\xce\x9aOL\x09\xc1\xef\x1f\x96\xdb\xe7\x13\xfb\x5c\xc5\xf5r\x179\xe6\x97q\x02\xc9\xf3%\xd7\x0f\x04f0i\x03\x18\xaa\xd0\xcb\x17\x86\x22\xc39d\x9f-\x83\xc6x\x88\x05\x7fC\x80*\xb7\xd0\x03\x17\x82\xdf\x83r{7\xab\xbf\xc4YP\xd5\xe3v\xd8\xd2q\x9e\xc6c\x03\x04>\x80\xd6\x82P\xe8\xcd\xbb\x17z\xa7:v^\xde2\x1f\xf3\xe5k\xe1/\x14@\xbeJ\xfcJ\x08\x86\x7f)q\xfdY\x9b\xc3\xde\xe1\xd8\xbc2A\xe0\x03\xa0\xdd\xf0wXn\xa3\xf2\xbfo\x97\xdb\xb3)\xbb}\xae\xe2f\xc6Z\x12\x04>\x00\xfa\x11\xfe\xc2\xea\x1b\xe1\x1c\xba\xdb^\xbf0\xd4\x1df'\xa7\xce\xdd{:m\xbd\x5c@\xe0\x03\xa0\xdb\xc1\xef\xa3^\xbf\xec\xa6\xa0\xf3Nb\xd7\x10\x08\x0b-\x06\x02\x1f\x00\xfd\xb5\xf0z\xb9\x80\xc0\x07@G\xc52.\xa9\xa2\xd4\x17&W\x80\xc0\x07@\xbfU\x85\xb9CM\x04\x02\x1f\x00=\x15\xd7\xcb\xddM\xec\xf2l\x88\xb5\x0a\x01\x81\x0f`S\xc2^X/\xb7\xa8\xd8M\xef\x1e\x08|\x00\xf4X\x08s\xa9uw\x9f(\xc3\x02\x02\x1f\x00=\x95\xe7\xf9(K\xf7\xde\x85\xd58L\xd4\x00\x81\x0f`)A\xe4\xa8\xdc\xce\xe3p#\xcb\xa3\x0c\x0b\x08|\x00k\x09{{\xe5\xc5\xa3r\xdb.\xb7\xcbX.\x84\xe5\xb4\xf3\xc3\xc4.a\xbd\xdc\xc9\x12\xeew\x22\xcc\x83\xc0\x07pw\x081\xf4>}#\xce\x22ey\xed<M\xb1\x8c\xb0\x97\xdd\xd4\xfa\x0ba\xfeT\x98\x07\x81\x0f\xd8@e\x008\x8ca\xe0u\xef\xc7\xb0@;\xed<\x9e\xd1\xce\xb7\x8e\xdb.\xc3r'\xec\xdd\x12\xfa@\xe0\x0360\x84T\x95\x07\xd9\xd3J\xad\xb5s\xaaw\xaf\xf5\xf5r\xa7\x84\xbd[[B\x1f\x08|\xc0f9\xcc\xd2\x13\x08\xc6\x9ah%\xed|\xb4\x842,\xa9@'\xf4\x81\xc0\x07l\x82\xd8\xeb\x94*\x0frl\xa5\x87V\xdayT^<N\xecrU\xb6s\xb1\x84\xbb\xde+\xb7\x0b\xa1\x0f\x04>`\xb3\xa5z\x9d\xae3+=\xb4eR\xe3yh],\xed\x22\xf4\x81\xc0\x07l\xb8q\xe2\xba#\xb5\xe0\x16\x17\xcb\xb0\xa4\xd6\xcb\x0deXN\x96u\xff\x0dC\x9f\x92- \xf0\x01\x03\x0b\x22!\xec\xcdZ\xda+\xf4\xeeY\xe9\xa1\x1d\x93\x8a\xeb\x97\xde\x8b*\xf4\x81\xc0\x07l\xae\xfd\xc4uz\xf7\xda\x09\xd5U\xeb\xe5>-\xdb\xf9|\x15\xc7\x12\x9f\xcfq\x0c\xf3\xb3\x84\x92-'\x9e9\x10\xf8\x80a\x04\x91Q6{\xb5\x07\xbd{\xed\xb4qU\xb9\x9b\xd6\xcb\xb0\xd4\x08}!\x5c\xeeU\x84\xbe]\xb5\x17A\xe0\x03\x86!\xd5\xbbw\xa2w\xaf\x15\x9d\x5c/7\x86\xbe\xaaa\xe4\x83\xd8;\x09\x08|\xc0@\x03\x9f\xde\xbd\x05\xc5\x19\xaf\x07\x89]B\x19\x96\xb5\xb5s\x5c\xab\xf7I\xc5n_+\x1f\xc7\xbeg\x13\x04>\xa0\x9fa$\x0c5\xce\x9a5z\xb1\xaas\xca\x06\xae*\xcc\x8d\xd7}\x80\xb1\xee\xdf\xb3\x8a\xdd&q\xf8\x1f\x10\xf8\x80\x9e\xd9K}\xc0k\x9e\x85\x03\xf5~\x96.\xc3\xf2\xacC\xc5\xacC\xf0\xac\x9a\xb9{b\xe6.\x08|@\xff$\xcf\xdf\xd3<\x0b\x85\xbd\xaa\xf5r\x83\xce\x9c\x1b\xd7`\xe6\xaea~\x10\xf8\x80\x9e\xd9\x9b\xf1\xf3\x8b%\xac\xe5\xbai\xaa\xca\xb0<\xe9Z\x1b\xc7!\xfc\xa2b\xb7\x83X\xb7\x11\x10\xf8\x80\xae\x8b=P\xb3\x02\xc9\xa9\x16Z\xa8mGY\xba\xf7\xae\xb3\xe5n\xe2\x04\x92\xaa\xf3\xf9\x8e,\xbf\x06\x02\x1f\xd0\x0f\xa9\x0fl\x81o1E\x96.\xc3r\xd8\xf1r7\xe3,=\xb4\x1b\x1e\xdb\xc4\xd3\x0c\x02\x1f\xd0}{\x02_\xfb\xe2z\xb9\xa92,g\xb1\x14Jg\xdd9\x9f/\xc59\x9e \xf0\x01=0\xab\x87\xefB\xb1\xe5\x85T\x0d\xd5\x16}x\x10\xe5k \x04\xbaiC\xbba&\xef\x17c)\x17@\xe0\x03:nVy\x8dKM3\x9f8\x99a;\xb1\xcbq\x87\xca\xb0\xd4\x11\xceC\xbc;\xb4\x1b&\x9a\xec\xa8\xcf\x08\x02\x1f\xd0\x1f\xb3\xea\xc3\xf90\x9f/\xecU\x95aY\xf9z\xb9\x8b\x8a\xb3\x88\xc3c\xd2\xab\x07\x02\x1f\xd0\xd3p\x92\x09|\xad\x0a\xbda\xa9\x89\x1aG},u\x13B\x9e^=X\xae74\x01\xb0Do\x95\xdb(n{\xf12\x94iq\xfe^\xf3\x00\x1d\xda\xeeqb\x97+\xbdc\x80\xc0\x07\xacT\x9c\x94q\xaa%Z3\xa9\xb8\xfeP\x13\x01\xb3\x18\xd2\x05\xe8\xb8X\x86%\xb5^\xeeY\x9c\xf1\x0a \xf0\x01\xf4\xd4\xa4\xe2z\xbd{\x80\xc0\x07\xd0Wy\x9eW\xad\x97{l\xb2\x03 \xf0\x01\xf47\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01\xf4X\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa7\xf2<\x0f\xcb\xd2=J\xec\x12\xca\xb0\x1ci)@\xe0\x03\xe8\xaf\xaa07\xd6D\x80\xc0\x07\xd0Sy\x9e\xefg\xd5eXN\xb5\x14 \xf0\x01\xf4\x97\xde=@\xe0\x03\x18\xaa<\xcf\x8b,]\x86\xe5I\x1f\xd7\xcb\x05\x04>\x00\xb2\xef\xaf\x97\x9b*\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00=Vd\xe92,\x87\xca\xb0\x00\x02\x1f@O\xc5\xf5r\x0f\x12\xbb\x5c\x94ao\xa2\xa5\x00\x81\x0f\xa0\xbf\x8a\x8a\xeb\xad\xa8\x01\x08|\x00}\x95\xe7\xf98K\x97a9V\x86\x05\x10\xf8\x00\xfa\x1b\xf6\xc2z\xb9\xa9\x89\x18a\xa2F\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pmj\xa2\xc6\x912,\x80\xc0\x07\xd0S\xb1\x0c\xcb\xe3\xc4.W\x992,Um\xb8\x13{I\x81\x8474\x01\xc0\xdaT\x859eX>\x19\xee\xf6\xca\x8b\x9d\xb8\x85\xb0|{\xde\xe3[\xe5v\xaa\x85@\xe0\x03\xe8bxy\x98\xd8%\xac\x97{\xa2\xa5>\xe1\xf9\x8c\x9f\xef\x08|\x90fH\x17`=&\x15\xd7+\xc32%\x04\xcf\xf8\xb9!]\x10\xf8\x00\xba%\xcf\xf3\x10\xe6R\xeb\xe5\x862,\xe7Z\xeaS.g\xfc|O\xd3\x80\xc0\x07\xd0\xa5\xb0\x17z\xa3\x8a\xc4.\xa1\x0c\x8b\xde\xbdf\x81\x0f\xa8\xe0\x1c>`\x15!g/\xbb9\xc9>l\xb7\xff\x0e=\x5coo\xe0yj!\xec\xa5\xca\xb0\x14&j4\x0e|\xbb\x9a\x06\x04>`\xfdNf\x84\x9c\xbdx\xdd\xa6\x04\xdf0\xb9\xe0Qb\x97\xab2\xec)\xc3\xd2<\xf0\x01\x15\x0c\xe9\x02\xab\x0a|\xd3\xecoX;T\x85\xb9\xb1\x97\x0a \xf0\x01C\x0b|\xf7c\xaf\xd7\xe0\x95\x8f3\x84\xdb\xd4\xd0\xe3\x99\xf5r+]&\xdawO\xf3\x80\xc0\x07\xacQ<O\xefz\xc6\xd5\xe3\x0di\x06\xbd{\x8b\xbf\x8e.\xb5\x02\x08|@\xb7M65\xe8\xe4y^d\xe92,O\x85\x19@\xe0\x03\x86`V\x0f\xd7V\x19\x88\x06\x1b\xfab\x19\x96T\x99\x95\xd0\xf3Yxy\x00\x02\x1f\xd0{\xb1\x07k\xd6J\x09C\x0e<!\xe8\xa6\xca\xb0X/\x17\x10\xf8\x80\xc1\x85\x9fi\xee\x0f\xb1\x97/N$8H\xecrQ\x86\xbd\x89\x97E\xa3/\x0e\xf9\x8c\xedT\xeb\x80\xc0\x07t\xe3\xc3:L\xde\xb8\x9aqu1\xc0\x87\x5c\xf5\x98\xac\xa8\x01\x08|\xc0 \xcd\x0aA\x83\xea\xe5\x8b\x8f%U\x86\xe5\x99^)@\xe0\x03\x06)\x0ea\xce\xec\xe5\x8b\x93\x1c\xfa\x1e\xf6\xac\x97\x0b\x08|\xc0\xc6\x9b\x15\x86\xee\x0f$\x08\x1df\xe92,G\xca\xb0\x00\x02\x1f0h\x15\xbd|\x8f\xf3<\x1f\xf5\xf5\xb1\xc5c\x7f\x9c\xd8%<n\xeb\xe5\x02\x02\x1f\xb0\x11R=y\x93\x1e?\xae\xaa0W(\xc3\x02\x08|\xc0F\x883vg\xd5\xe5\xdb\xcd\xf3\xbcwC\xbb\xb1\x0c\xcb\xc3\xc4.g\xca\xb0\x00\x02\x1f\xb0i\xc6\x89\xeb\x8a\x1e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0f\x89\x13\x17\x9e\xcc\xb8:\xacN1\xe9\xcbc\x89=\x92\xdb\x89]\x8e\xcb\xc7{\xeeY\x07\x04>`\x13C_Q^\x5c\xcc\xb8\xba\x17C\xbb\xca\xb0\x00\x02\x1f@\xb5\xfd\x18\x8a\xa6\xf9Z\x19\xa8v:~\xfc!\xec\xa5\xd6\xcb=2Q\x03\x10\xf8\x80\x8d\x16\x87v\xc7\x89]&]=\xf6x\x9e\xe1\xa3\xc4.W\xb1\x17\x13@\xe0\x036>\xf4\x85Y\xbbOg\x5c\xdd\xe5s\xdf\xaa\xc2\xe8\xd8\xb3\x0b\x08|\x00\x1f\x87\xbep\x9e\xdb\xf1\x9d\x1f\x85a\xdew\xca\x9fw24\xe5y\x1e\x86\xa2S\xeb\xe5\x9eY/\x17\xe8\x8274\x01\xd0\xb1\xd07\x8e\x93 F\xe56\xee\xf8\xcc\xd6\xaa2,c\xcf( \xf0\x01L\x0f}\xfb]?\xc62\x94\x16Yz\xbd\xdc\xa7\xd6\xcb\x05\xba\xc2\x90.@\xf3\xb0\x17z SeV\xc2Pt\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pn\xaa\x0c\xcb\xa12,\x80\xc0\x07\xd0Sq\xbd\xdc\x83\xc4.\x17\xd6\xcb\x05\x04>\x80~+*\xae\xb7\xa2\x06 \xf0\x01\xf4U\x9e\xe7\xe3,]\x86\xe5\x992,\x80\xc0\x07\xd0\xdf\xb0W\xb5^n\xa0w\x0f\x10\xf8\x00z,\x84\xb9T\x19\x96'\xca\xb0\x00\x02\x1f@O\xc5\xf5rS\xbdwWYu\x11f\x00\x81\x0f\xa0\xc3\xaa\xca\xb0\x14\xca\xb0\x00\x02\x1f@O\xc52,\x0f\x13\xbb\x9c)\xc3\x02\x08|\x00\xfdV5T[h\x22@\xe0\x03\xe8\xa9X\x86e;\xb1\xcb\xb12,\x80\xc0\x07\xd0\xbd\x107)\xb7\xf3r\xdb\xa9\xd8/\x94aI\xf5\xee\x85\xf5r\x95a\x01\x04>\x80\x8e\x85\xbdqv\xb3,Z\xe8\xb5;-\xff\x9f\x0alE\x96\x9e\xa8qd\xa2\x06 \xf0\x01t+\xec\x85\x1e\xbd\xf7\xef\xfc(\x84\xb9\xaf\x95??\x8deW\xee\xee\x1b\xfe\xff(qsWe\xd8+\xb4*\xd0\x17oh\x02`\x03\xc2^\x18\x9e=\x9dquX*\xedE\xb9\xcf\x93\xec\xe3^\xbbI\xc5M\x1e\xcey\x1c\xfb\xf1\xb6/\xcb\xed\xf5\xde\xc1\xf3:?s\xce \xf0\x01L\x17B\xd2V\xc5>\x8fC\x90+C\xd9?\xc9\xd2\xeb\xe5\x862,'\xf3\x1cD\xf8\xbdX\xe6e\xda\xf1\xec\xd6\x0c\x8dS\x8f\xe9\xb5\xff\xbf\x8ca\xb1\xeag\x97V\x07\x01\x81\x0f`(B@\x1b\xd5\x08}\xe1\xfa\x7fP\xb1\xcfx\x91\x03)\x03\xd6\xed\x84\x91pL\xdb-=\xbeia\xf1\xe1\x9c\x012\xac\x1a\xf2z\x08\xac\xd5\xfb\x18~\xe6\xbcF\x10\xf8\x00\xd6\x22\x9co\x17f\xe7f7\xb3n\x1f.pSO\xdb\xe8\x11\x0b\xb7\x11{\xfaN\xb2\x9a={+t?\xfb\xf4\x9a\xc1\xb5\x8fqJ\x80<\x9b\xb2\xdbi\x0c\x87'^\x9d \xf0\x01\xb4\x19\xfaBP\xdb\x8fAk2%\xd4T\xf9\x7f\xe5\xf6\x9f[<\x9e\xd0\x13\xb6\x17\x83\xe8\xc1\x80\x9b~ZX\xac*y\x03\xb4\xcc,]`\xd3\x82\xdfi\xb9\x8d\xca\x7f\x86I\x1a\xd7\x0d~\xf5\x07\xcb\xed\x1f\xc7\x1a~\xf7Z<\x9eqy\xf1\xee\x06=\x05\xa1\xc7o\xcf\xd0/\x08|\x00\xab\x08~Evs^\xdfq\xc3_\xbd\xad\xe1\xb7\xd3\xe2\xb1\x84\xde\xaew\x1a\x06\xd0>\x0a+\x93\x08{ \xf0\x01\xac4\xf4\xbd\x8c=l\xffa\x8e\xd0\xf7\x8d2\xf4\x15m\xf5\xf6\x95\xc71)/\xf6\x06\x1c\xfa\x9e\xc6\xb6\x06\x04>\x80\xd5\x8a\xabo\xfc\x959\x7f=\x94r\x09\x130\x8a\x96B_\x98\xf9\x1az\x0e/\x06\xd6\xcc\xef\x94\x8f\xcd2t \xf0\x01\xac%\xec\x85\xde\xb9E\xc3Z(\xe5\xd2Z\x98\x89\x93K\xf6\xb2\xe9\xb3[\xfb&\xf4V\xbe\x1d{/\x01\x81\x0f`-BPK\xcd\xd6}\x96\xdd\xd4\xa5\xab\xd2jy\x918\xd4\x1cB\xdfq\x8f\xdb6\x84\xbd=\xa5W@\xe0\x03X\x9b\xb8^n\xaag.\x04\xbdq\x9c\xd1\xfbNE\xf0[J\x89\x91\x1e\xcf\xe0\xbd\x88a\xef\xdc+\x0d\x04>\x80u\x0a!-\xb5\xf2Fq;\x9b4\x0cI\xc6\xe0\xf7V\xf6\xe9^\xb7\x8be\x06\x9b\x1e\xce\xe0\x15\xf6@\xe0\x03X\xbfX|9\xb5\xe2\xc6\xd9\xb4\xf3\xceb\x0d\xbfq\xf9\xcf\x07\xd9\xc7u\xfc\x96^@\xb8G3x\x9fej\xec\x81\xc0\x07\xd0\x11U!\xad\xa8\x08`\x97\xa1\x8e_\xb9\xdd[\xd5\x84\x84\x1e\xcc\xe0\x0d5\xf6\xf6\x85=\x10\xf8\x00\xd6.\x96a\xd9\xae\x08.\xa7]<\xf6\x0e\xcf\xe0}W\x8d=\xe86k\xe9\x02\x9b\x14\xf6\xaa\xd6p\x0dC\xa6\x87\x1d<\xeep\xcc\xb7=g\xa1\xa7\xaf(\xb7\xaf\x96\xdb\xdf\xee\xc0\xe1\xbd\xa3\xec\x0a\x08|\x00]\x12\xc2\x5cj\xa2\xc6QG\x87$\xc3P\xeen\xc7\x8e)\x84\xe3\xfd\xae\xf6\x86\x02\x02\x1f\xb0\x81b\x19\x96\xc7\x89]\xae\xe2\xfa\xba]\xb4\xd3\xb1\xe3\xb9\xad\xb1g&.\xf4\x84s\xf8\x80M1\xa9\xb8\xbe\xcbK\x7fmu\xe8X\xc2\xa4\x91\x1da\x0f\xfaE\x0f\x1f0x\xb1\x0cKjH\xf4\xac\xab+B\xc4c\xefR\xd8Sv\x05zH\x0f\x1f\xb0\x09&\x15\xd7w\xb9w\xef^G\x8e\xe3X\xd8\x83\xfe\xd2\xc3\x07\x0cZ\x9e\xe7U\xeb\xe5>\xed\xf8\xf0d\x08X\xcfb\xf0\x0b\xdb\xf6:\xc2\x9e\xb2+ \xf0\x01t5\xec\x85\x80T$v\xb9\xae\xb8~\xed\xe2,\xd8\xd3\x8a\xc7\xf8\xbd%\x1e\x82\xb2+0\x00\x86t\x81!\xab\xbd^n\x8f-s\x06\xaf\xb0\x07\x03\xa1\x87\x0f\x18\xa4<\xcfC\x10:H\xec\x12\xca\xb0\x1c\x0d\xe0\xa1.#\xf0)\xbb\x02\x03\xa3\x87\x0f\x18\xaa\xaa07\x1e\xc8\xe3l;\xf0\x09{ \xf0\x01t_\x9e\xe7\xfbY\xba\x0c\xcb\xb3\x01\xad\x10\xd1f\xe0\x0beWF\xc2\x1e\x08|\x00]\x0f{U\xeb\xe5\x06\x87\x03z\xc8m\xcd\xdaUc\x0f\x04>\x80\xde\xa8*\xc3\xf2\xa4\x0c5\x97\x03\x09\xb7{-\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x87\x004\xca\xd2\xbdw\xe1\xfc\xb4\xa3\x01=\xe46\x86s\x9f\xaa\xb1\x07\xc3g\x96.0$E\x96.\xc3r8\xb0^\xac\xd1\x82\xbf\xaf\xec\x0al\x08=|\xc0 \xc4\xe1\xcdT\x19\x96\xb3\x01\x86\x9by{\xf8\xae\x85=\xd8,z\xf8\x80\xa1\xa8\x1a\xaa-\x06\xf8\x98w\xe7\x0c{\xca\xae\xc0\x86\xd1\xc3\x07\xf4^\x9e\xe7\xe3,=[\xf5x@eXn\x1f\xf3<\xbd{W\xc2\x1el&=|@\xdf\x83OU\x19\x96\xce\xaf\x97;\xa7\xa6\x81O\xd9\x15\xd8`z\xf8\x80\xbe\x0b\xb3rS\x135\x8e\x86R\x86\xe55\xa3\x06\xfb>\x13\xf6`\xb3\xe9\xe1\x03z+\x96ay\x9c\xd8%\xac\x97[\x0c\xf4\xe1\xef\xd5\xdc\xefX\xd9\x15@\x0f\x1f\xd0g\x93\x8a\xeb\x0f\x07\xfc\xd8\xeb\x0c\xe9>\x11\xf6\x80@\x0f\x1f\xd0K\xb1\x0cKj\x96j(\xc3r2\xd0\xc7>\xca\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07\xf4^U\x98\xd9\xd4\xde\xbd0Ie<\xd4\xb0\x0b\x08|\xc0\x86\xc8\xf3\xbcj\xbd\xdc\xa7\x03/=\xb2\x93\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0oa/\x94a)\x12\xbb\x0c\xb5\x0c\xcb]{S~v!\xec\x01\xb3\xe8\xe1\x03\xfa&\x84\xb9\xd4\xf9k\xc5\x06\x94\x1f\x19\xcd\x08{\xca\xae\x00S\xe9\xe1\x03z#\xae.\xf1(\xb1K(\xc3r4\xf06\x08=\x9cw\x87\xb3\x8f\x85=\xa0\x8a\x1e>\xa0O\xaa\xc2\xdcx\x03\xda\xe0\xee\xf9{j\xec\x01\xb5\xe8\xe1\x03z!\xcf\xf3\xfd\xac\xba\x0c\xcb\xe9\x064\xc5^\xbc|W\xd8\x03\xea\xd2\xc3\x07\xf4\x85\xde\xbd\x1b\xa1\x87O\x8d=@\xe0\x03\x86%\xcf\xf3\x22K\x97ay2\xd0\xf5r\xa79\xdc\xa0\xc7\x0a\xb4\xc4\x90.\xd0\xf5\xb07\xca\xd2E\x94C\x19\x96\xa3Mi\x0fa\x0f\x10\xf8\x80!*\xb2t\x19\x96C3T\x01\x04>\xa0\xa7\xe2z\xb9\x07\x89].\x9c\xcb\x06 \xf0\x01\xfdVT\x5c\x7f\xa8\x89\x00\x04>\xa0\xa7\xf2<\x1fg\xe92,\xc7\x1bR\x86\x05@\xe0\x03\x06\x19\xf6\xc2j\x12\xa9\x89\x18\x9b\xb0^.\x80\xc0\x07\x0cZ\x18\xaaMM\xd482[\x15@\xe0\x03z*\x96ay\x9c\xd8\xe5*\xdb\xa02,\x00\x02\x1f0DUaN\x19\x16\x00\x81\x0f\xe8\xabX\x86\xe5ab\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xfe\x9aT\x5c\xaf\x0c\x0b\x80\xc0\x07\xf4U\x9e\xe7!\xcc\xa5\xd6\xcb\x0deX\xce\xb5\x14\x80\xc0\x07\xf43\xec\x852,Eb\x97P\x86E\xef\x1e\x80\xc0\x07\xf4X\x08{\xa92,\x85\x89\x1a\x00\x02\x1f\xd0Sy\x9e\xef\x94\x17\x8f\x12\xbb\x5c\x95aO\x19\x16\x00\x81\x0f\xe8\xb1\xaa07\xd6D\x00\x02\x1f\xd0Sy\x9e\xefg\xe9\xf5r\xcf\xac\x97\x0b \xf0\x01\xfd\xa6w\x0f@\xe0\x03\x86*\xcf\xf3\x22K\x97ayb\xbd\x5c\x00\x81\x0f\xe8o\xd8\x1be\xe92+\xa1\x0c\x8b\x89\x1a\x00\x02\x1f\xd0cE\x96.\xc3b\xbd\x5c\x00\x81\x0f\xe8\xab\xb8^\xeeAb\x97\x8b2\xecM\xb4\x14\x80\xc0\x07\xf4WQq\xbd\x155\x00\x04>\xa0\xaf\xf2<\x1fg\xe92,\xcf\x94a\x01\x10\xf8\x80\xfe\x86=\xeb\xe5\x02\x08|\xc0\xc0\x850\x97*\xc3r\xa4\x0c\x0b\x80\xc0\x07\xf4T,\xc3\xf28\xb1\xcbU\xa6\x0c\x0b\x80\xc0\x07\xf4ZU\x98+\x94a\x01\x10\xf8\x80\x9e\x8aeX\x1e&v9S\x86\x05@\xe0\x03\xfa\xad\xaawo\x90\x135\xca\xa0\xbb_n\xafjn\xe3.\x04\xf3\x06\xc7kr\x0d}\xfd\xbb\x1c\x97\xdb\xf9k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x98\xf3\xcd5\x04\x83\xed\xc4.\xc7\xaf^\xbd:\x1f\xe2c/\x1f\xd7IyqVs\xf7\xa2\x03\x87\x5c\xf7\x18Bal\xe7[\xd2\xc7\xf7\xa3Iy\xf1\xfe\x94\xf7\xa40\x02\xf1<\xae\xef-\xf0\x014|sU\x86\xa5~\x88\xba\xbf\xce^\xbe\xd8\xbb\xb1[sw\xbd{\xf4\xf1\xfd(\xfc-\x1eT\xec\xf6x\xc8=}\x02\x1f\xb0\xcc\xb0\x93Z/\xf7h\xe8\x135b\x11\xe9g-\x87\xc3e\x98\xd4\xdcOal\xfa\xea\xb0\x07\x7f\x87\x02\x1f\xd0\xbbo\xd3\xa3\xf2\xe2Qb\x97\xab28\x14\x1b\xd2\x1cu?h\xd6\xd2\xcb\x17\xef\xf3~\x8d]\x15\xc6\xa6\xaf\xefG{\x15_>\xef\xda\x1dj;\x08|\xc02L*\xae\x1foJC\xc4b\xd2Ok\xee\xbe\x8e\x10\x5c\xf7>\x15\xc6\x06\x81\x0f\xe0\xfb\xdf\xa6\xf7+\xbe%\x9fm\xe0\xb0`\x08U\xd75\xf6[i/_\x83\xde\xbdM\xea\x91\x05\x81\x0f\xa0\x86\xaa\x19\x9c\xe3Mk\x90x\xaeb\xdd\x99\xadE\x9c\xf0\xb2\xaa Z\x87\xa1\x5c\xfa\xec\xb2\xc1\xbe\xd7Cm\x04\x81\x0fhM\x9c\x09\x97\xea1z\xba\xc1\xc3\x82!\xf0]\xd5\xd8\xef\xfe*\x02V,\x99S\xa7w\xef,\x96\x98\x81\xbe~\xe1\x0a\xef9g\x0d\xfeN\x05>\x80D\x80\xb8W\x11T\xc27\xe7b\x83?t^6x\xfc\x87\xcb\xec\xe5\xabQ2\xe7\xae\xb1W7\x030\xce\xaa{\xef.\x04>\x80z\xdf\x8cS3\xe1\x0e7}\xbd\xdc\xb8\x84\xdcE\x8d]\xb7\xb2\xe5\xf6\xf2\x1df\xf5f->5Q\x83\x81\xfc\xed\x85\xd7\xf1^6\xbb\xa7\xef8\x5c?\xe4\xf7\xa87\xbc\x0c\x80E\xc5\xb2\x07\xa9\xa2\xa6\x17\xd6\xcb\xfdD\xd8z^g\xbf\xb2][\xafUX\xa3'\xf6\xd6F\xf7\xc82\xc8\xd0\x17V\xf5\x09K\x08\xee\x94\x97a\x1b\x95\xdbi\xb9\x9do\xc2\x97Q\x81\x0fhCU0p\xd2\xff\xc7\x1f:\xa7\xe5\x07N\xe8e\xa8\xaa\xf7u\xdb\xcb\xd7v\xe8\xaa\xdb\xbb\xb7\xf1=\xb2\x0c:\xf8\x9do\xda\xe36\xa4\x0b,$\x96\xf6H\x85\x17\xab3|\xda\xb8n8k\xf3\x5c\xbe\x06\xbd{gzdA\xe0\x03\xb8\x1b \x8a\xc4.Vg\x98\x22\x9eOt\x5cc\xd7\xb6\xcf\xe5\xab:\xcf\xf2V\xe1Y\x02\x81\x0f\xe0VUi\x0f\xab3\xa4CU\x9d\x9a_\xad\xf4\xf2\xc5\xe5\xee\x0ej\xecz\xacG\x16\x04>\x80\xbb\x01\xe2qb\x97Ps\xeeHKM\x17\x83p\x9d\xf6i\xab\x97\xaf\xa8\xb1\x8f\x1eY\x10\xf8\x00>\xa1*\xac\x14N\xfa\xaf\xd5\x86u{\xf9F\x0b\x86\xf3:\xbd{G\x9e3\x10\xf8\x00n\x03\xc4^y\xf10\xb1\x8b\x93\xfek\x88\xe1\xaaN\x8fZ\xe8\xe5+\x16\xb8\xab:\xbfk\xbd\x5cX\xcf\xfb\xe9(\xbc\xa7\xc6\xf7\xd5\xa5Q\x96\x05\x98Ge\xef\x9e&\xaa\x1d\xfa&5\x96\xa4\x0b\x0e\xc2~M\xcf\x89\xacQ#\xf1\xd6\xd8\xb3\xb1\x96\x0f\xfbp~fx\x8enk\xc3\x85\xff\xa7f\xbd\x87\x1e\xe1\xf3;\xdb\xa9\xf3d{\xf5\x5c\xef\xc7\xe7{4\xedy.\xf7\x99\xf5<\x9f,\xda\xfb.\xf0\x01M\xdf\xb4B0\xd8N\xec\xe2\xa4\xff\xe6B/\xdf\x075\xf6+\xe6\x08fu\xc2\xb7\xd29\xab\xfd\x1b\x1a\xc5\xe7q\xbf\xe2oi\x9a\xad\x18\x14v\xef\xdc^\xa8\xeb8i\xa3W=~Ax^s\xf7\xb7\x97\xb5\xcer\x0cG\x97Y\xbdY\xe5\x95\xc7Q\xf7q\x95\xb7\x93/\xe1\xb1\xec\xc5\xe7\xfb`\x81\xe7\xf9\xfd\xf2vn\x97~\x9b+\xfc\x19\xd2\x05\x9a\xbe\x09\x1fU\xf4>8\xe9\xbf\xa1\xf8aUgq\xf7\x83&\xe7\xf2\xc5\x0f\x9a\xdd\x1a\xbbz\xceV\xf4e\xa9\xdcBo\xcd\x8b\xecf\xc2\xd3vK7\xbd\x1b\x03\xc1\xe5\xa2\xc3\x821\xf8?\xad\xb9\xfbd\x89k>O\xb2\xfa\xcb\xff\x9dt\xf4\xf9\x0eC\xb5\xa71h\x1e\xb4p\x93\xe1\xf5\xf2~\x08\xc2\xa1\xb7\xbfi\xdb\x0b|@\x13E\xc5\x9b\xb0\x93\xfe\x17k\xdb6\xf7\xab\xbb\xef\x13C\x82+\x09z\x97\xf1\xc3z{\x89w\x15N\x0bx\x1e\x96\xe4[0\xf4\x85/\x00u\xd7|>YB{\x85\x9e\xcf\x875v\xbd\xc8:z\xfaH\x1c\x099\xaf\xf9\x85\xab\xa9\xad\xf8\x85aG\xe0\x03\x96\xf2m\xb5\xbcx\x94\xd8\xc5I\xff\x8b}\xc8\x86\x9e\x80:\xc5\x98k\xf5\xf2\xd5\xec\xddS:g\xf9\x7f7G1\xe8\xdd_\xe1\xdd>\x0a=K\x0b\xf6\xbe\x8dk\xee\xb7[\xde\xcfa\x8b\xed\x15\x8eyR\xf7\x18\xbb\xf8\x05\xf3\xces\xbe\xb5\xc4\xbb\xb9nz\x1a\x86\xc0\x07\xd4U\xf5&lXpqE\x8b\xfb\xd5\xdaG\x8f\xec\xd2\xadk\xcd\xd6\xdd\x06\xc1i\xda\x17\x90p\xdc\xef\xd6}=.R6h\xca\xfbL\x9d\xa0\xf4n<\xc6\xae\x85\xbd\xa2\xe2\x8bq[\x1a\xf7\xac\x0a|@\x9d7\xb1\xbd,\xdd[t\xd6\xd5\xf3h\xfa$\x0e\xad>\xa9\xb1k\xb2\x97\xaff\xef\x9e\xd29\xab\xb1\xc8\xdf\xc5\xc5\x82\xf7\xfd0\x06\x90y_\x8f\xa1\xa7\xaa\xce\xb9\xa5[\x8b\x84\xcb;\xaf\xdb\xbaC\xb9g\xf1\xd8\xba\xf6>\x19\x8e\xffqW_W\x02\x1fP\xf7[w\xcaX\x13\xb5\xa6n1\xe6b\xce\xebn\xe9\x91]M\x88\x0f=\xa8\xa9\xa1\xfa\xf0\x5c?\xcbnz\xd3\xde*\xb7\xcf\x86\x99\xa2q\xdb\xb9\xfdw\xf9\xf3\x07\xe5\xf6v\xbc\xad\xeb\x06\x87\xf0\xb8\x0c\x22;\x0b<\x84q\xcd\xfb[hh\xb7\xc1P\xeeu\x17\xdfojLh\xfbTh\xbd}\xce\xef<\xdf\xb7\xcf\xf3[\xf1\xbaY\xcf\xf5\xf5<_\xb0\x05>\xa0\xea\x8d,\x84\x87\xd4\xf9GO\x9d\xf4\xdfz@\xa8\xf3\xc11\xb5\x97\xaff\xef\xdeq\x17\x87\xc3\x06\xecdJh\x09\x1f\xe6_,\x9f\x87{\xe5\xb6\x1fz\xac\xc29Y\xb3\x86\xd8\xc3\xdfX\xf8\x90/\xb7\x10v\xc2\xf3\xfe\xa4\xc1\xfd\x1f-\xf0z\xbcl\x10\xb0\x16\x19\xda\x0da\xaf\xceP\xee\xb8\xa3\xef7U\xeb\x8a\xdf\xba\x88!o\xef\xf69\x9f\xf2<\x9f\xc6\xeb\xc2c\xbdw'\xe8\xd7\xfd\x02.\xf0\x01s}k=\xac\xf8\xb6]h\xa9\xd6C_h\xd3\xab:\x1f~\xd3>t+~G\xe9\x9c\xd5?\x9f'\xb1\xdd\xafcP\x1b\xc5\x0f\xf3\xf39o\xefe|\x8d\xbc\x95\xd5\xef}\xdbY\xf0\xf8\x9f\xd5\xd8u\xae\xa1\xdd\x06C\xb9\xc7\x1d>u\xa4N(\x0eao\xaf\xe9d\x8b;A\xffA|\x1e\x04>\xa0uG\x15\xdf\xba\x9d\xf4\xbf<u\x82\xf4\xe1\xdd\x99\x985{\xf7<g\xeb\x0b\x04!\xe8\xb5\xd6\xfe18\x8ck\xee~\xd8\xc2\xf1\xb7>\xb4\xdb`(\xf7\xaa\xab_T\xe2\xdf]U\xef\xdeu\x0c{/\x17x\xbe/co\xf0\x5c_\x14\x04>`\xd6\x9bX\xe8\x11H\x15\x0b\xbd\xe8\xe2\x89\xd3C\x11'TT\x9d0\xbf\xf5\xda\x87`UH\xf4\x9c\xad\xef\xf9<YF\xd0\x8e=^u\xca\xf9\xec/x?/\x1b\xdcF\x93\xa1\xddIVo(w\xbf\xc3_T\xf6\xea<\xceu\x1f\xbf\xc0\x07\xccR\x15\x0c\x0c\x0b._Qc\x9f\xc3\x18\xd0\xc3\x07\xec\xae\xe7\xcc\xebd\xd6\x97\x83\x15\xae\xc2Qkh7\x1eO\x9d\xa1\xdc'\x1d?\xe7\xb4N\xbb\xae}(Z\xe0\x03\xa6\xbd\x11\x8f+\xc2\x83\xb5WW \xb6qe/_|\xbe\xaa>\xf4=g\xc3}\x9d\x5cf\xf5\xce\xb1\xdbk\xe1\xee\xc2\xeb\xacN\xb9\x98\xe4\xd0n\x83\xa1\xdc\xb3\x81\x14t_{`\x15\xf8\x80io\xc4Uo\xb0z\x8aVg\x5c\xf3C85\xfcn\xa2\xc6\xf0\xd5\x09\xf3;\x8b\xdeI\x1c\x96\x1c\xd7\xdc=\xb5\xdek\x9dY\xad\x9d,\xc12\xa7\x9du\x1f\x80\xc0\x074}#\xb6\xf6\xea\x0a\xc5\xb6\xae:G\xab\xea\x83\xf3\xc8s6xuz\x90\xee\xb5\xf4\x9a\x0c\xf7U\xa7,\xcc\xd4\xa1\xddx~p\x9d\x02\xc5\xe3\x01\xbdnG\x02\x1f\xd0\x19\xf1<\xb0TO\x90\xb5W\xd7\x17\xc2\xaf\xe7\xfc]\xcf\xd9f\xa83!`\xb7\xad;\x8b\xc3\xacuV\xe1x8\xe5\xdc\xc1:\xaf\xc7\xe3\x81\xad\xdeS\x08|@\x97(\xc3\xd2A\x0d\x8a1O\x0d\x8b\x9e\xb3\x8dx\x8d\xac\xe3\x1c\xb1q\xcd/\x22\x93;_*\xc75\x82ggK\xb0\xccP\xa7\xed\xef\x97\x8f}\x22\xf0\x01kWc\xc6\x9c\xb5W\xd7\x1f\xc6\x9b\xf6\xf2Y\xe3\x98e\x86\xcc\xcb\x9a\xc1,\x84\x9d\xa2\xc1\xf2c\xfb=\xfb\x92rZs\xbf\xb0:\xce$q^\xe3R\xbd\xe1%\x0b\xdc\x09\x14)\x85&Z\xeb\x87\xeb\xcb8\xeb\xf1\xfd\x06\xbf6\xee\xf0\x17\x8c\xd3\x15\xdc\xcdy\xd9n\xbd\x9c\xac\x12C\xc1N\xdc\xc2\xbf\xf7\xe2U\xa3\xac\xde\x12^\xabz]Nj\xae\x94q\x18\x8f\xbd\xaa\xe6\xde\x93\xbe-\xfb\x17\xbeT\x95mpU\xf3y\x09\x93\xab\xf6B\x00^\xf5\x17h\x81\x0f\xb8\x1df\xd9N\xecr\xac\xa4Gg>\x5c\x8b\x9a\x1f,]_\xe3x\xd73\xfa\xa9\x80\xb7\x1f\x83\xdd^\x97B]\xcd/\x16\x97\x15an+K\xcf$\x0f\xfa\x5c\x82\xa5h\xf0e,<\xb7\xef\xc7\xbf\xe5\xb0\x9d\xac\xa2G\xd3\x90.\xf8\xa0\xa9\x1afQ\xd2\xa3[\xea>\x17\x85\xa6\xea\xc7\x97\xadr\x0b\xc3\xee\xdf\x8b\x81\xe1\xa0ga\xaf\xe9*\x1c\xa9\xf7\x99q\x9f\xbf\x8ce\xf5&\xb1|*\xf8\x85\xb0\x1c\x87z\x97Z\xbaE\xe0\x03\x8a\x8ao\xe6GN\xfa\xef\x94Z\xcf\x85\xe7\xac\xdb_\xb2\xe29m/\xe3\x07\xfe\xc3\xbe?\xa6\x06\xabp\xcc\xfc\x223\x80\x12,!\xf4^\xcc\xf1{\xb7\xbd\x9f\xdf(_\x13\xe7\xf1K@\xeb\xe7\xf9\x09|\xb0\xd9\x1f<\xa3\xf2\xe2Qb\x97\xab\x81T\xb9\x87\xae\xfc\xcd\x85\x1e\xda\x10l\x1eg\xf5\xd6\x90\xed\xdb\x97\xc7y\x02\xcf\xb3!L\x08\x8b_\xb2\xf6\xe6l\x83[\xdb\xd9\xc7\xbd~E\x9b\xc1O\xe0\x83\xcdV\xf5&k(\x17\xda\x09z\xf7\xe2D\x95\xaf\x0d0\xe8\xdd\x0d<\xe3\x86\xbfv\x95\x0dg5\x8d\xbb\xa1\xefl\xc1\x9b\xda\x8a_\x0a.\xe3\xb9~\x0b3i\x036\xf7\x03(\xbc)\xa5N\x9cW\xd2\x03\xda\xf9[\x0b\xe7f\x9d.\x18\xf4B\xafQ\x08\x13\xe7\xd9'\x87\xf5O\xef\xfc;\x9c\x8b\xbb\xbd\xe6\xc0\x13\x86$\xc3\xca0\x075\x7f\xe5|h\xa7\x1f\xdc\x86\xbe\x18\xd4\x1e/xs\x1f\x05\xbf8\x13z\xbc\xc8\x0cf\x81\x0f6\xd7\xa4\xe2z\xbd{,\xf3C1\x17\xf6\x92B\x0f\xd1I\x0cD\xa75\xef\xebeG\x1e\xefA\x83_\x09+q\xec\x0f\xf1\xcbe8\x1d&\x16[\x0eA|\xd1\xf34C\x90?\x0d\xa7\x04\xcc;\xfc-\xf0\xc1f\xf68T\xad\x97\xfb\xb4o\xb5\xb0\xa0\x83\x7fg\xf7\x1a\x86\xbd\xeb\x18\x0e&}\x9c\xc0\x10\x1f\xef<\xc1\xed\xa3\x19\xaaC\x5c\xef9>\xa6\xfd8\xa22n\x18\x86_\x17^G\xa1\x9cK6O\xe8s\x0e\x1fl\xe6\x87PQ\xf1\xa1Sh)X\xd8I\x83\xb0\x17f\xb8\x8eB\xafP\x8f\x83\xcfQ6_I\x99\xd0F\x93!\xbf\x10B/m\xb9\x85\xc0\xf7\xa0\xdc\x9ed7\xe7.\xce\xeb\xfd8\xc4+\xf0\x01\x95o\xca\xd6\xcb\x85\xe5~\xb1\x0a\x1f\xeeu\x8aK\x87/Xo\x85\x15A\xfa\xfcw\x17\x03\xc8\x22\xbdW\xbbq\xe4a\xd0B\x98\x8f\xa1~T\xfe\xf7\x8b\xe5v\x9c5_21\x98\xc4*\x0b\x02\x1f0\xf5M\xb9\xea\xfc\x9aP\x86\xe5HK\xc1\xc2\x8a\x9a\xfb\xed\xf5}\x15\x9b8j0i\xa3\xcd\x9a\x86\x98\x9e\x87\xbf\xf3\xd8\xeb\x17\x1e\xf3\x93\x86\xc1o+k8\x12#\xf0\xc1f\xa9\x0ascM\x04\x0b\x07\xa0\xd0\xdbUgh\xf3\xc9@\xce\x95\xad3t\xfd\xacf\x88\xd9\xb8\xca\x00\xa1g7\xd6;\x0d\xc1\xef\xb8\xc1\xaf\x1e4\x09\xc8\x02\x1fl\xd6\x87Pj\x88\xe9\x99\xf5r\xa1\x15u\xce\xaf\xba\x1eBQ\xf38\x0c[5t\x1dF\x0e\xf6k\x86\x99\xed\xb6\xea\xce\xf54\xf8\x85/\xdd\xef\xb4\xfcZ\x13\xf8`\x83\xc2^\xd5z\xb9\x812,\xd0\x8e\xbd\x1a\xfbL\x06\xf0\xbe\x12N\x11\xa9\x13\xce\xc6w\xdec\xea\x0c[>^\xf6\xba\xb2\x1d\x0f~\x93\x06\xa1O\xe0\x03>\x15\xe6RCLO\x86X\x12\x01\xd6\xf4\xe5\xaa\xcep\xee\xe9\x12\xee~w\xc5\x0f7\x04\x93\xca\xa1\xdc\xdb\x91\x838)\xa5hp\xdb\x1b+\x86\xbe:\xc3\xe0\xb5\x83\xb1\xc0\x07\xc3\xff\x00\x1ae\xe9\xde\xbb\xdb\xda_\xc0\xe2\xea~\x00_\xb6\xfcw\xbe\xd2\x1e\xb1\xf2\xfe\xea\xac\xeaq\xfd\xfa{O\x9c\x14Vg\xad\xd9\xedx\x1f\x9b\xacN8\xae]\xd0[\xe0\x83\xcdx\xd3H\xbd)\x1c*\xc3\x02\xab\xb5\x84\xc9\x1a\xa3\x15\x86\xbd\xbd\xf2\xe2Q\x9d\xf7\x9e\x19#\x07\xe3\x9aw\xf5(\xde\x97\xd7H\x0b\x04>\x18\xb0\xf8f\x99*\xc3r6\xef2=\xc0B\x7f\x9bm\x07\xb4\xfd\x15\x1dw\xdd\x12,\x17\xb3J<\xc5 \xf3\xb4\xe6]N\xe2}n\xaa\xab\xb6nH\xe0\x83a\xab\x1a\x12)4\x11\xacEk\x81/\x86\xc7\x83\x15\x1dw\x08{u\xceQ\x1c\xd7x\xef\xa9\x13f\xeeo\xf8\xfbTkaW\xe0\x83\xe1\xf6 \x847\xdc\xd496\xc7\xca\xb0\xc0\xda\xb4\xd9#7Y\xd1{J8\xe6\x875v\xad\xac/\x18O#\x19\xd7\xbc\xeb\x8d\x1c\xda\x8d=\x9b[m\xdd\x9e\xc0\x07\xc3}\xa3H\xf5\xeeY/\x17\x96\xa3\xeeyW\xfb-\xfd\xad\x87\xbf\xe3\xdd\x86\xef\x0d\xf3\xdc\xcf\xa8f\xb0\xbc\xcajN\x02\x8b_8\xeb\x0e\xed\x9el\xe0\xd0n\x9d\xd7\xc8\x99\xc0\x07\x9b\xed\xb0\xe2\x9b\xe1\x912,\xd0\xbe\xd8sUk\xa8r\xd1\xb5cc/\xfe\xe3\x86\xbf6\xefl\xdeIV\xaf\xb7i\xdcp\x12XQ\xb3\xbd\xb6\xb2\x8e\x96j\x09A\xb4\xed\x1e\xc8\x18n\xeb|)?\x15\xf8`C\xc5o\xe2\xa9\x0f\x81\xab!T\xf8\x87\x0e\xab\xbb<X1o9\x95\xd8\xb3\xf7\xfe\x8a\xdeS\xea\xac\xa6\x11<mz\x9aH\xc3\xa1\xdd\x87qX\xb9k\xc21=/\x8f\xed\xb4\x8d\xe0\x17\xc3^h\xc7:\xe7J\xd6\x0e\xc1\x02\x1f\x0cO\xd5\x1b\x80\x155`\xb9\xea\xd6\x8f\x0b\xbdV\xa7MBL\x08\x14\xe5v\x9e5\xef\xd9\x9b7|\x84@\xfa\xb5\x1a\xbb\xce}\x9aH\xc3\xa1\xdd.\xce\xda\xbd}\xfev\xef\x04\xbf\xf1<\xc7\x19\x03ch\x8f\xed\x1a\xbb\x9f5\x19\xa9\x11\xf8`@\xe2\x9b\xc5n\xc5\x1b\xc4\x89\x96\x82\xe5\x89\x1f\xc2u\xcf\xad\x0a\xa1\xef\x83TH\x08\xa1+\xf4\xb2\xc5\xa0\xf7<\x11\x06\xae\xdb|\x1c\x0dJ\xb0\x04\xe3\x05\xeby\x86\xb0Xwh\xb73\xefa\xb1\x8d^\x9f\xc8\x12\xde\x83C\xef\xeb\xf7\xe2\xf3Z\xc4\xa0~oV\xa8\xae\xf9\xfc.\xf4\xe5\xfd\x0d\x7f\x9a0(Uo\xcez\xf7`5\xc6\xe5\xf6\xa2\xc1\xfe\xbb\xb7A\xa1\xfc\xe0\xbf\x0doMfh^\xc7\xfb\xfc\xa0\xc5\xc7P\xd4\x0c\x1f\xcf\x16\xfd\x22\x19\xc2b<'\xf1y\x9d\xb6\x0a\x01iV\x9d\xbf\x15\xdb\xaf\xf9\xbc>\x8e\xe1\xee\xf6\xe7!\xdc\xde_\xe0~\x9f4-\xcc\xac\x87\x0f\x06\x22\x9eg\x93z\x03y\xba\x84\xea\xfe\xc0\xf4\x00sY^\xbc\xbb\xc0M4\x0d{{5CW\xads\x06\x1b\xac\xa6q\x1b4\xdbh\xb3\xd3\xac\xfe\xd0n\xb1\x84\xe2\xd5\xcb\x08|\xb3,\x12\xf6\x8e\xe79\x0f[\xe0\x83a\x84\xbd\xaa\x19]\xca\xb0\xc0\xeaC_\xe8\x81:^\xf2\xdd\xdc\x86\xbd\xba_\xe6*\xcf+\x8b\xef'u{\xec\xc6-/\xcdXd\xf5\x86\xa6\xd7>kw\xc6p\xee\xb2\x85\xb07W\xc0\x16\xf8`\x18\x8e*z\x04\x0a\xeb\xe5\xc2ZB_\xf8p~\xba\xa4\x9b\x0f\xe7\x09\x8e^\x0b{U\xe7\x0e\xd6\x99H0\xc9\xea\xf50\xb6~Np\xc3Y\xbb\xbbq\xb6\xf2\xba\x8cV|\x7f\xef\xce\x1b\xf6\x04>\x18\x808\x8b.\xb5\xac\xd2UG\xceu\x81M\x0d}\xe1t\x8b\xb7\xb3\xf6&U\x84\xf3\xbf\xde)owo\x8e/r;\x15\xef'!P\xd4\xe9\xb5jm(wJ{\x85\x10\xf9\xac\xe6\xee\x8f\xe7-m\xd3\xc2q\x86\xa0\xfd [~/n\xb8\xfd\x07\x8b\xbe\x8f\x0b|\xd0\x7fUo\x02cM\x04k\x0f}!\xc4\x8c\xca\xed\xc9\x02\xc1\xef\x22\x06\xbd\xd0\xab7\x99\xb1O\xd5\xd0\xee(\x11\xf6FY\xfd\x922\xc5\x92\x8b\xb7\x8f\x1b\xb4\xd3d\x8d\xcf\xebe\xecu\xfblvs\xce\xe6YK7\x1d\x1e\xfb\xd3\x18\xf4\xc6m\xb4u^\xde\x88\xbfD\x88\xb6\xbe\xf2\xa5\xd3\xac\xc12E\xd1\xd9\xf5{\x1f\xee\xad\xe3xc\xfd\xae\xd4\xac\xbc0\xe4\xb2\xe7\x99\x85n\x89\x7f\xbb\xe1os'\xf1\x9es\x15\x03\x5cx_:\xb1:No\x9e\xdb{\xf1y\xbd}~\xef\xd5\xf8\x5c\x09A\xf1e|\xaeO\x971\xc1NY\x16\xe87\xbd{\xd0C\xb1\xc7OM\xcca>\xb7\xdf\x0fn]:.C\xba\xd0\xdfo\x91E\x96\x9e\xda\xffD\x8f\x00\x00\x02\x1f\xf47\xec\x8d\xb2t\x11\xe5p\xfe\x87\x89\x1a\x00\x08|\xd0cE\x96.\x9bp\xa8\x0c\x0b\x00\x02\x1f\xf4T\xac\x80\x9f*\xc3r\x91\x98\xc1\x07\x80\xc0\x07\xf4@Qq\xbd\xf5r\x01\x10\xf8\xa0\xafbQ\xd4\xd4\xf4\xfe\xe3\xb8\x1e%\x00\x08|\xd0\xc3\xb0\x17j9\xa5&bX/\x17\x00\x81\x0fz.\x0c\xd5\xa6&j\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xec\x12*\xf2+\xc3\x02\x80\xc0\x07=V\x15\xe6\x94a\x01@\xe0\x83\xbe\x8aeX\x1e&v9\x8b\xcb4\x01\x80\xc0\x07=5\xa9\xb8^\x19\x16\x00\x04>\xe8\xab<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ck)\x00\x04>\xe8g\xd8\x0beX\x8a\xc4.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb1\x10\xf6ReX\x0a\x135\x00\x10\xf8\xa0\xa7\xf2<\xdf)/\x1e%v\xb9*\xc3\x9e2,\x00\x08|\xd0cUan\xac\x89\x00\x10\xf8\xa0\xa7\xf2<\xdf\xcf\xd2\xeb\xe5\x9eY/\x17\x00\x81\x0f\xfaM\xef\x1e\x00\x02\x1f\x0cU\x9e\xe7E\x96.\xc3\xf2\xc4z\xb9\x00\x08|\xd0\xdf\xb07\xca\xd2eVB\x19\x16\x135\x00\x10\xf8\xa0\xc7\x8a,]\x86\xc5z\xb9\x00\x08|\xd0Wq\xbd\xdc\x83\xc4.\x17e\xd8\x9bh)\x00\x04>\xe8\xaf\xa2\xe2z+j\x00 \xf0A_\xe5y>\xce\xd2eX\x8e\x95a\x01@\xe0\x83\xfe\x86\xbd:\xeb\xe5\x16Z\x0a\x00\x81\x0f\xfa+\x0c\xd5\xa6\xca\xb0\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xecr\x95)\xc3\x02\x80\xc0\x07\xbdV\x15\xe6\x0aeX\x00\x10\xf8\xa0\xa7b\x19\x96\x87\x89]\xce\x94a\x01@\xe0\x83~\xab\xea\xddS\x86\x05\x00\x81\x0f\xfa*\xcf\xf3\x10\xe6\xb6\x13\xbb\x842,\xe7Z\x0a\x00\x81\x0f\xfa\x19\xf6\xea\x94a\xd1\xbb\x07\x80\xc0\x07=\x16\xc2^j\xbd\xdc#\x135\x00\x10\xf8\xa0\xa7b\x19\x96G\x89]\xae\xca\xb0Wh)\x00\x04>\xe8\xafI\xc5\xf5cM\x04\x80\xc0\x07=\x95\xe7\xf9~\x96^/\xf7\xccz\xb9\x00\x08|\xd0oUeX\xc6\x9a\x08\x00\x81\x0fz*\xcf\xf3\x22K\xaf\x97\xfb\xd4z\xb9\x00\x08|\xd0\xdf\xb0\x17\xca\xb0\xa4\xca\xac\x842,\x85\x96\x02@\xe0\x83\xfe\x0aC\xb9\xa92,\x87\xca\xb0\x00 \xf0AO\xc5\xf5r\x0f\x12\xbb\x5cX/\x17\x00\x81\x0f\xfa\xad\xa8\xb8\xde\x8a\x1a\x00\x08|\xd0Wy\x9e\x8f\xb3t\x19\x96g\xca\xb0\x00 \xf0A\x7f\xc3\x9e\xf5r\x01\x10\xf8`\xe0B\x98K\x95a9R\x86\x05\x00\x81\x0fz*\xae\x97\xfb8\xb1\xcbUV]\x84\x19\x00\x04>\xe8\xb0\xaa0W(\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<L\xecr\xa6\x0c\x0b\x00\x02\x1f\xf4[U\xef\x9e\x89\x1a\x00\xac\xc5\x1b\x9a\x00\x16\x17\xcb\xb0l'v9~\xf5\xea\xd5\xb9\x96\x02\xe8\x9f\xaf?\xf8\xc2$\xfe\xb3\xf8\xf2\x8bo]\xf6\xf11\xe8\xe1\x83\x05\xfd\xc9?\xfe\x81\xf0\xc5)\xd5\xbb\xa7\x0c\x0b@\xbf\x9dd7+'\xbd(\xc3\xdfi\xb9\x8d\x05>\xd80\x0f\xbe\xf3c\xa3,\xbd^\xee\x91\x89\x1a\x00\xfd\xf5\xe5\x17\xdf\x0a\x81\xef*\xfe7\x14\xd5\x7f\xbf\x0c}\x97\xe5V\x94\xdb=\x81\x0f\x06\xee\xcf\xfd\xe1\x1b\xd9\xaf}\xf0\xab\x9fO\xecrU\x86\xbdBK\x01\xf4\xde\xeb#9\xa1\xdej(\xc3\xf5\xbd0\xe4[n;\x02\x1f\x0c\xd4\xe7^\xfc`\xd5.\x86r\x01\x86a\x92\xdd\x9c\xa23M\x18\xee\xfdF\x19\xfa\xce\xbb:\xdc+\xf0\xc1\x9c~\xea\x0f~4;\xfb\x95\xd3\xd4.\xa1\x0c\xcb\x89\x96\x02\xe8\xbf/\xbf\xf8V85\xa7\xea==L\xde\x0b\xc3\xbd/\xe3p\xefH\xe0\x83\x9e\xfb\xdd\x7f\xff\xdd\xaa]\xc6Z\x09`P\xea\xae\x94\x14\xce\xeb\x0e\xc3\xbda\x92\xc7I\xb9\xed\x09|\xd0C\x7f\xed\xbb\xf7\xb2o\x9e\xffFj\x97\xa7\xd6\xcb\x05\x18\x96/\xbf\xf8V(\xafu\xd6\xf0\xd7BA\xfe\xe7q\x92\xc7x]\x93<\x04>h\xe8\xc7\xff\xf83\xd97O\x93a/\x9c\xe3Qh)\x80A\x9a\xcc\xf9{a\x92\xc7\xfb\xe5v\x19'y\x8c\x04>\xe8\xb07\x7f\xe7\xc7\xb3\xef|\xe7;\xa9]\xac\x97\x0b0P_~\xf1\xad\x10\xf8\xae\x16\xb8\x890\xdc{\xb7\xa6\xdf\xbe\xc0\x07\x1d\xf3\x17\xfe\xef\x0fe\xbf\xf6\xcb\xff\x22\xb5\xcbE\x19\xf6\x8e\xb4\x14\xc0\xa0MZ\xba\x9dP\xd3\xef\x838\xdc{\xb8\xcc\xe1^\x81\x0f\x1a\xf8\xb1\xab\xca?\x19eX\x00\x86\xaf\xed/\xf6a\xb8\xf7k\xd9\x12k\xfa\x09|P\xd3_\xfd\xbd\x9f\xa8*\xc3\xf2\xec\xd5\xabW\xa7Z\x0a`\xd8b\x89\x96\xe3%\xdd\xfcmM\xbfV\x97p{\xc3\xd3\x06\xd5\xc2D\x8d\x97\xdf\xfc\x9f\xc9}\xfe\xd1\xcf\xfd\xcd_\xea\xc2\xd4{\x00V\xe2<\x86\xb3e\x09\xc3\xbd\xbb\xe5\xe7J\xe8M\x0c\xdb\xa4\x0c\x9a\x97\xf3\xdeX\xfe\xea\xd5+O\x19D[_\xf9\xd2i\xfc#\xfb\x84P\x86\xe5W\xff\xd9\xaf\xcc\xfc\xbdb\xffg\xb2\x9f9\xff\xef\x1a\x10\x80e:\x8e\xc1\xef\xb4\xe9/\x1a\xd2\x85)~\xf4\xf7>\xf7\xfd\x7f\x87\xf5rSeXv\xbf\xf4\xd3\xd9\xdf\xf8\xed\xefh4\x00\x96-\xf4(>\xbf]\xc2\xad\xc9$\x0f\x81\x0f^\xf3c\xbf\xfd\x97\xb3{\xdf\xf8\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf|\xfbG\x92eX\xde\xfeS?\x9c\xfd\xf0\xef\xfe\x91\x86\x03`U>Z\xc2-\xbb\xa9\xe9wT\xa7\xa6\x9f!]\xb8\xe3\xf3?\xfb\x0b\xbf\xf3\x13\xdf\xd9\xfa\xd3\xb7\xff\xff\xdc_|\x91\xfd\xab_\xfa\xe73\xf7\xff\xf9\x9f}+\xfb\x85\xff\xf4m\x0d\x07\xc0\xba=\xcbn\x86{\xa7\xae\xf7k\xd2\x06Don\x7fu\xf2\x13\xd9\xc7a/\xf8\xcc\xb7\xaf\x93\xbf\xf3\xb3\xd9\xff\xd2p\x00tAX\xc2\xed\xe1\xd7\x1f|!\x14\x85\xbe\x9d\xe4\xf1\xfdE\x00\xf4\xf0A\x0c{\xd9\x94\xd9V[?\xf2G\xd9\x9f\xf8\xd1\xdf\xca\xfe\xe5\xf3g\x9f\xfa\x9d\xbf\xffs\x7f+\xfb\xbb\xbf\xf1\xdf4\x1e\x00]\x14z,Bo\xdfQX\x03X\xe0C\xd8\x9b\x11\xf6\xee\xfa\xb3\x9f\xfd\x83\xec\xff\xfc\xfe\xbf\xc9\xfe\xf5\xbf\xfb\xb7\x1f\xfd\xff\xfe\xfdQ\xf6O\xb7~\xc4\xb9{\x00\xf4!\xf8\x1d\x0a|\x08{\x0d\xea(\xfd\x9d\x9f\xfcV\xf6\xcb\xff\xf1<{\xe7\x8b\x7f^\x19\x16\x00\xba\xec\x13C\xbb\x02\x1f\xc2^C\xef\xfc\xe07\xb3\xbf\xfe\xdd\xdf\xd2\x80\x00t\xd1\xd4\xc9\x1b\x02\x1f\xc2\xde\x1c\xfe\xde\x1f\xfeF\xf6\xd3\xbf\xff_5$\x00]\x10\x86m\xc3\xe7\xda\xd1\xac\xd58\x04>\x84=\xa1\x0f\x80~\xba\xc8n\x86mO\xee\xce\xc8\x9dFY\x16\x84\xbd9\xfd\x97\x1f\xf8\x5c\xf6\xd3\x99\xc0\x07\xc0\xca5^bM\x0f\x1f\xc2\xde\x1c\xdez\xf5?\xb2\x9f\xff\xbd_\xd7\xa8\x00\xacJ\x18\xb6\xbd\x9d\x84q\xd9\xf4\x97\xf5\xf0!\xec5\xf4\xf9\xfc\x7f\xff\xca\xcf\xff\xee\xaf\xff\xa2V\x05\xd8h{\xe5\xf6x\x05\xf7s\x16C\xded\x91\x1b\xd1\xc3\x87\xb0\xd7\xcc\xf1o^\xfc\xe2X\xab\x02l\xb6\xaf?\xf8B\x98\x05\xfbp\x89w\x11\x86m?*\x9a\xdc\xc6\x8d\x09|\x08{\xc2\x1e\x00\xcd\xc2\xde\xa8\xbcx\xb1\x84\x9b\x9e\xba,Z\x1b\x0c\xe9\x22\xec\x09{\x004s\xd8\xf2\xed\x85a\xdb\xa3\xd7k\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@M_\x7f\xf0\x85{\xe5\xc5e\xb9m-xS\xb7k\xdd\x16\xf3L\xc2hJ\x0f\x1f\xc2\x9e\xb0\x07@}\xfb\x0b\x86\xbd0l[d5j\xe7\x09| \xec\x01\xb0\x1e\xc5\x9c\xbf\x17\x96<;jR;O\xe0\x03a\x0f\x80\x15\xfb\xfa\x83/\xec\x95\x17\xf7\x1b\xfc\xcaB\xb5\xf3\x04>\x10\xf6\x00X\xbd\xba\x9f\x0d\x1f-y\xb6h\xed\xbc6\x99\xb4\x81\xb0'\xec\x01P\xa1f)\x96Vk\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@\xb5Y\x9f\x0fa\x12\xc6$\x06\xbd\x97]=x\x81\x0faO\xd8\x03\xa0\xda\xeb\xb5\xf7ZY\xf2L\xe0\x03a\x0f\x80\x0e\xf8\xfa\x83/\x84\xcf\x88P\x8ae\xa5\xb5\xf3\x04>\x84=a\x0f\x80\xd5\xd9+\xb7w\xb3%,y\xb6*&m \xec\x01\xc0\xc0\x09|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0\x09|\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\xc0\xe6\x06>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x16\xf3\x99\x96\xc3\xd9=a\x0f\x00`\xc0\x81\xaftT\x86\xb4\x91\xb0\x07\x000\xc0\xc0\x17{\xf7B@\x1b\xb7p[\xc2\x1e\x00@\xd7\x02_\xe90^.\x14\xae\x84=\x00\x80\xee\x06\xbe\xdb`u\xbf\x0cm\xfb\xc2\x1e\x00\xc0\x80\x02_\x0cx\xf7\xa7\x84?a\x0f\x00`\x08\x81oJ\xc0{\xd8d\xf2\x86\xb0\x07\x00\xd0\xe1\xc0\x17\x83\xdd\xc3\x1a!P\xd8\x03\x00\xe8c\xe0K\x04\xbb\xca\xe0%\xec\x01\x00\xf4;\xf0%'o\x08{\x00\x00=\x08|eh\x0b!\xeb~b\x97Ca\x0f\x00\xa0\xc7\x81/\xab\x1e\xb6\xdd}}\xf2\x86\xb0\x07\x00\xd0\x93\xc0\x17\x83\xdcn\x8d]\x0f\x85=\x00\x80\x1e\x06\xbel\xc6p\xed\x14ca\x0f\x00`}\xf2W\xaf^\xcd\xf5\x8be\x80{Y^l\xd5\xdc\xfd\xa2\xdc\xb6\x85=\x00\x80\xd5\x9b\xab\x87/N\xd6\xd8j\xf0+\xc2\x1e\x00@\x9f\x02_6\xc7\xd2i\xc2\x1e\x00\xc0z4\x1e\xd2\x8d\x935^\x08{\x00\x00\xfd0O\x0f_!\xec\x01\x00\xf4G\xa3\x1e\xbe7\xb7\xbfz\xaf\xbc\xb8\xcc\x9a\x9d\xbf'\xec\x01\x00\xacQ\xd3\x1e\xbe}a\x0f\x00`\xd8\x81\xefpE\xc7%\xec\x01\x00\xb4\xa4\xf6\x90\xee\x9b\xdb_\xdd)/\xbe!\xec\x01\x00\xf4K\x93\x1e\xbeU\xf4\xee\x09{\x00\x00\xeb\x08|q\xb2\xc6\xfe*\x0e\xa8\xbc\xaf=O\x0b\x00@{j\x0d\xe9\xc6\x955\xde_\xe1q]\x95\xdbQ\xb9M~\xf3\xe2\x17_z\x9a\x00\x00\x96\x1f\xf8.\xcb\x8b\xfbk8\xbe\xebr;\x09\xe1\xaf\x0c~\xe7\x9e.\x00\x80%\x04\xbe8\xc4\xfa\xbc\x03\xc7z\x91\xdd\xf4\xfa\x9d\xe8\xf5\x03\x00h7\xf0M\xca\x8b\x83\x0e\x1d\xb3^?\x00\x80\xb6\x02_\x9c\xac\xf1\xbd\x0e\x1f\x7f\xe8\xf5+\xca\xe0w\xe2\xa9\x04\x00\x98\xaej\x96\xee\xb8\xe3a\xefH\xd8\x03\x00H{\xa3\xe2\xfa\xc3\x8e\x1d\xaf\xe1\x5c\x00\x80\xb6\x02_\x9c\xacq\xbf#\xc7i\xc2\x06\x00@\xdb\x81/\xeb\xc6p\xeeq\xa67\x0f\x00`!S'm\xbc\xb9\xfd\xd5Qy\xf1bM\xc7\xa4\xe82\x00@\x8bf\xf5\xf0\x8d\xd7p,\xc71\xe4\x9dzZ\x00\x00\x86\x13\xf8\xf4\xe6\x01\x00\xac:\xf0\xbd\xb9\xfd\xd5\xfdl\xf9\x935\x9e\xc5\x90\xa7\xa4\x0a\x00\xc0\xaa\x03_\xb6\xbc\xde\xbd\xd0\x9b7\x89A\xefR\xd3\x03\x00\xac\xc6'&m,i\xb2\xc6Y\xa6@2\x00\xc0\xda\xbc\xde\xc37n\xe9vC\x81\xe4I\x0cz\x97\x9a\x19\x00`8\x81/\xf4\xe6\x85!\xdb\x89\xa6\x05\x00\xe8X\xe0{s\xfb\xab!\xec\xcd3YCo\x1e\x00@\x1f\x02_\xd6\xbcw\xcfrg\x00\x00=\xf0\xd1\xa4\x8d\x06\x935Bo^\x98|a\xb93\x00\x80\x9e\xb8\xed\xe1;\xac\xd8Oo\x1e\x00@\xcf\x03\xdfx\xc6\xf5\x96;\x03\x00\xe8{\xe0\x8b\x935\xb6\xee\xfc\xccrg\x00\x00C\x0a|\xd9\xc7\xbd{z\xf3\x00\x00\x06\x1a\xf8B\xc0\xdb\xd7\x9b\x07\x000L\xff_\x80\x01\x00e|\xfb\xc4\xd4o\x058\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = b"\x00\x11\x0bF\x95g\x00p\x00a\x00r\x00a\x00m\x00e\x00t\x00r\x00i\x00c\x00f\x00i\x00t\x00t\x00i\x00n\x00g\x00\x06\x07\x03}\xc3\x00i\x00m\x00a\x00g\x00e\x00s\x00\x1c\x053\xe8'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00z\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x10\x0a1\xdeg\x00m\x00o\x00d\x00e\x00l\x00-\x00v\x00i\x00e\x00w\x00e\x00r\x00.\x00p\x00n\x00g\x00\x1c\x053\xf0'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00x\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x1c\x053\xf4'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00y\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g"
qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00(\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00:\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x9e\x00\x00\x00\x00\x00\x01\x00\x00F6\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00~\xa5\x00\x00\x00x\x00\x00\x00\x00\x00\x01\x00\x006|"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import pendulum
import requests
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from pendulum import DateTime, Period
from slack_sdk import WebClient
class SlackStream(HttpStream, ABC):
url_base = "https://slack.com/api/"
primary_key = "id"
page_size = 100
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
# Slack uses a cursor-based pagination strategy.
# Extract the cursor from the response if it exists and return it in a format that can be used to update request parameters
json_response = response.json()
next_cursor = json_response.get("response_metadata", {}).get("next_cursor")
if next_cursor:
return {"cursor": next_cursor}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = {"limit": self.page_size}
if next_page_token:
params.update(**next_page_token)
return params
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[MutableMapping]:
json_response = response.json()
yield from json_response.get(self.data_field, [])
def backoff_time(self, response: requests.Response) -> Optional[float]:
# This method is called if we run into the rate limit. Slack puts the retry time in the `Retry-After` response header so we
# we return that value. If the response is anything other than a 429 (e.g: 5XX) fall back on default retry behavior.
# https://api.slack.com/docs/rate-limits#web
if response.status_code == 429:
return int(response.headers.get("Retry-After", 0))
@property
@abstractmethod
def data_field(self) -> str:
"""The name of the field in the response which contains the data"""
class Channels(SlackStream):
data_field = "channels"
def path(self, **kwargs) -> str:
return "conversations.list"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["types"] = "public_channel"
return params
class ChannelMembers(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "conversations.members"
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params["channel"] = stream_slice["channel_id"]
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for member_id in super().parse_response(response, **kwargs):
# Slack just returns raw IDs as a string, so we want to put them in a "join table" format
yield {"member_id": member_id, "channel_id": stream_slice["channel_id"]}
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel_record in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel_id": channel_record["id"]}
class Users(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "users.list"
# Incremental Streams
def chunk_date_range(start_date: DateTime, interval=pendulum.duration(days=1)) -> Iterable[Period]:
"""
Yields a list of the beginning and ending timestamps of each day between the start date and now.
The return value is a pendulum.period
"""
now = pendulum.now()
# Each stream_slice contains the beginning and ending timestamp for a 24 hour period
while start_date <= now:
end_date = start_date + interval
yield pendulum.period(start_date, end_date)
start_date = end_date
class IncrementalMessageStream(SlackStream, ABC):
data_field = "messages"
cursor_field = "float_ts"
primary_key = ["channel_id", "ts"]
def __init__(self, default_start_date: DateTime, **kwargs):
self._start_ts = default_start_date.timestamp()
super().__init__(**kwargs)
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params.update(**stream_slice)
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for record in super().parse_response(response, **kwargs):
record[self.primary_key[0]] = stream_slice.get("channel", "")
record[self.cursor_field] = float(record[self.primary_key[1]])
yield record
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
current_stream_state = current_stream_state or {}
current_stream_state[self.cursor_field] = max(
latest_record[self.cursor_field], current_stream_state.get(self.cursor_field, self._start_ts)
)
return current_stream_state
class ChannelMessages(IncrementalMessageStream):
def path(self, **kwargs) -> str:
return "conversations.history"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
stream_state = stream_state or {}
start_date = pendulum.from_timestamp(stream_state.get(self.cursor_field, self._start_ts))
for period in chunk_date_range(start_date):
yield {"oldest": period.start.timestamp(), "latest": period.end.timestamp()}
def read_records(self, stream_slice: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
# Channel is provided when reading threads
if "channel" in stream_slice:
yield from super().read_records(stream_slice=stream_slice, **kwargs)
else:
# if channel is not provided, then get channels and read accordingly
channels = Channels(authenticator=self.authenticator)
for channel_record in channels.read_records(sync_mode=SyncMode.full_refresh):
stream_slice["channel"] = channel_record["id"]
yield from super().read_records(stream_slice=stream_slice, **kwargs)
class Threads(IncrementalMessageStream):
def __init__(self, lookback_window: Mapping[str, int], **kwargs):
self.messages_lookback_window = lookback_window
super().__init__(**kwargs)
def path(self, **kwargs) -> str:
return "conversations.replies"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
"""
The logic for incrementally syncing threads is not very obvious, so buckle up.
To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread, basically its ID.
One complication is that threads can be updated at any time in the future. Therefore, if we wanted to comprehensively sync data i.e: get every
single response in a thread, we'd have to read every message in the slack instance every time we ran a sync, because otherwise there is no
way to guarantee that a thread deep in the past didn't receive a new message.
A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past, get every message since,
and read all of the thread responses. This is essentially the approach we're taking here via slicing: create slices from N days into the
past and read all messages in threads since then. We could optionally filter out records we have already read, but that's omitted to keep
the logic simple to reason about.
Good luck.
"""
stream_state = stream_state or {}
channels_stream = Channels(authenticator=self.authenticator)
if self.cursor_field in stream_state:
# Since new messages can be posted to threads continuously after the parent message has been posted, we get messages from the latest date
# found in the state minus 7 days to pick up any new messages in threads.
# If there is state always use lookback
messages_start_date = pendulum.from_timestamp(stream_state[self.cursor_field]) - self.messages_lookback_window
else:
# If there is no state i.e: this is the first sync then there is no use for lookback, just get messages from the default start date
messages_start_date = pendulum.from_timestamp(self._start_ts)
messages_stream = ChannelMessages(authenticator=self.authenticator, default_start_date=messages_start_date)
for message_chunk in messages_stream.stream_slices(stream_state={self.cursor_field: messages_start_date.timestamp()}):
self.logger.info(f"Syncing replies {message_chunk}")
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
message_chunk["channel"] = channel["id"]
for message in messages_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=message_chunk):
yield {"channel": channel["id"], self.cursor_field: message[self.primary_key]}
class JoinChannelsStream(HttpStream):
"""
This class is a special stream which joins channels because the Slack API only returns messages from channels this bot is in.
Its responses should only be logged for debugging reasons, not read as records.
"""
url_base = "https://slack.com/api/"
http_method = "POST"
primary_key = "id"
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
return [{"message": f"Successfully joined channel: {stream_slice["channel_name"]}"}]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None # No pagination
def path(self, **kwargs) -> str:
return "conversations.join"
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel": channel["id"], "channel_name": channel["name"]}
def request_body_json(self, stream_slice: Mapping = None, **kwargs) -> Optional[Mapping]:
return {"channel": stream_slice["channel"]}
class SourceSlack(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
slack_client = WebClient(token=config["api_token"])
users = slack_client.users_list(limit=1).get("members", [])
if len(users) > 0:
return True, None
else:
return False, "There are no users in the given Slack instance"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = TokenAuthenticator(config["api_token"])
default_start_date = pendulum.parse(config["start_date"])
threads_lookback_window = pendulum.Duration(days=config["lookback_window"])
streams = [
Channels(authenticator=authenticator),
ChannelMembers(authenticator=authenticator),
ChannelMessages(authenticator=authenticator, default_start_date=default_start_date),
Threads(authenticator=authenticator, default_start_date=default_start_date, lookback_window=threads_lookback_window),
Users(authenticator=authenticator),
]
# To sync data from channels, the bot backed by this token needs to join all those channels. This operation is idempotent.
if config["join_channels"]:
logger = AirbyteLogger()
logger.info("joining Slack channels")
join_channels_stream = JoinChannelsStream(authenticator=authenticator)
for stream_slice in join_channels_stream.stream_slices():
for message in join_channels_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice):
logger.info(message["message"])
return streams
| #
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import pendulum
import requests
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from pendulum import DateTime, Period
from slack_sdk import WebClient
class SlackStream(HttpStream, ABC):
url_base = "https://slack.com/api/"
primary_key = "id"
page_size = 100
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
# Slack uses a cursor-based pagination strategy.
# Extract the cursor from the response if it exists and return it in a format that can be used to update request parameters
json_response = response.json()
next_cursor = json_response.get("response_metadata", {}).get("next_cursor")
if next_cursor:
return {"cursor": next_cursor}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = {"limit": self.page_size}
if next_page_token:
params.update(**next_page_token)
return params
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[MutableMapping]:
json_response = response.json()
yield from json_response.get(self.data_field, [])
def backoff_time(self, response: requests.Response) -> Optional[float]:
# This method is called if we run into the rate limit. Slack puts the retry time in the `Retry-After` response header so we
# we return that value. If the response is anything other than a 429 (e.g: 5XX) fall back on default retry behavior.
# https://api.slack.com/docs/rate-limits#web
if response.status_code == 429:
return int(response.headers.get("Retry-After", 0))
@property
@abstractmethod
def data_field(self) -> str:
"""The name of the field in the response which contains the data"""
class Channels(SlackStream):
data_field = "channels"
def path(self, **kwargs) -> str:
return "conversations.list"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(**kwargs)
params["types"] = "public_channel"
return params
class ChannelMembers(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "conversations.members"
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params["channel"] = stream_slice["channel_id"]
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for member_id in super().parse_response(response, **kwargs):
# Slack just returns raw IDs as a string, so we want to put them in a "join table" format
yield {"member_id": member_id, "channel_id": stream_slice["channel_id"]}
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel_record in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel_id": channel_record["id"]}
class Users(SlackStream):
data_field = "members"
def path(self, **kwargs) -> str:
return "users.list"
# Incremental Streams
def chunk_date_range(start_date: DateTime, interval=pendulum.duration(days=1)) -> Iterable[Period]:
"""
Yields a list of the beginning and ending timestamps of each day between the start date and now.
The return value is a pendulum.period
"""
now = pendulum.now()
# Each stream_slice contains the beginning and ending timestamp for a 24 hour period
while start_date <= now:
end_date = start_date + interval
yield pendulum.period(start_date, end_date)
start_date = end_date
class IncrementalMessageStream(SlackStream, ABC):
data_field = "messages"
cursor_field = "float_ts"
primary_key = ["channel_id", "ts"]
def __init__(self, default_start_date: DateTime, **kwargs):
self._start_ts = default_start_date.timestamp()
super().__init__(**kwargs)
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
params.update(**stream_slice)
return params
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
for record in super().parse_response(response, **kwargs):
record[self.primary_key[0]] = stream_slice.get("channel", "")
record[self.cursor_field] = float(record[self.primary_key[1]])
yield record
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
current_stream_state = current_stream_state or {}
current_stream_state[self.cursor_field] = max(
latest_record[self.cursor_field], current_stream_state.get(self.cursor_field, self._start_ts)
)
return current_stream_state
class ChannelMessages(IncrementalMessageStream):
def path(self, **kwargs) -> str:
return "conversations.history"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
stream_state = stream_state or {}
start_date = pendulum.from_timestamp(stream_state.get(self.cursor_field, self._start_ts))
for period in chunk_date_range(start_date):
yield {"oldest": period.start.timestamp(), "latest": period.end.timestamp()}
def read_records(self, stream_slice: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
# Channel is provided when reading threads
if "channel" in stream_slice:
yield from super().read_records(stream_slice=stream_slice, **kwargs)
else:
# if channel is not provided, then get channels and read accordingly
channels = Channels(authenticator=self.authenticator)
for channel_record in channels.read_records(sync_mode=SyncMode.full_refresh):
stream_slice["channel"] = channel_record["id"]
yield from super().read_records(stream_slice=stream_slice, **kwargs)
class Threads(IncrementalMessageStream):
def __init__(self, lookback_window: Mapping[str, int], **kwargs):
self.messages_lookback_window = lookback_window
super().__init__(**kwargs)
def path(self, **kwargs) -> str:
return "conversations.replies"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
"""
The logic for incrementally syncing threads is not very obvious, so buckle up.
To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread, basically its ID.
One complication is that threads can be updated at any time in the future. Therefore, if we wanted to comprehensively sync data i.e: get every
single response in a thread, we'd have to read every message in the slack instance every time we ran a sync, because otherwise there is no
way to guarantee that a thread deep in the past didn't receive a new message.
A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past, get every message since,
and read all of the thread responses. This is essentially the approach we're taking here via slicing: create slices from N days into the
past and read all messages in threads since then. We could optionally filter out records we have already read, but that's omitted to keep
the logic simple to reason about.
Good luck.
"""
stream_state = stream_state or {}
channels_stream = Channels(authenticator=self.authenticator)
if self.cursor_field in stream_state:
# Since new messages can be posted to threads continuously after the parent message has been posted, we get messages from the latest date
# found in the state minus 7 days to pick up any new messages in threads.
# If there is state always use lookback
messages_start_date = pendulum.from_timestamp(stream_state[self.cursor_field]) - self.messages_lookback_window
else:
# If there is no state i.e: this is the first sync then there is no use for lookback, just get messages from the default start date
messages_start_date = pendulum.from_timestamp(self._start_ts)
messages_stream = ChannelMessages(authenticator=self.authenticator, default_start_date=messages_start_date)
for message_chunk in messages_stream.stream_slices(stream_state={self.cursor_field: messages_start_date.timestamp()}):
self.logger.info(f"Syncing replies {message_chunk}")
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
message_chunk["channel"] = channel["id"]
for message in messages_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=message_chunk):
yield {"channel": channel["id"], self.cursor_field: message[self.primary_key]}
class JoinChannelsStream(HttpStream):
"""
This class is a special stream which joins channels because the Slack API only returns messages from channels this bot is in.
Its responses should only be logged for debugging reasons, not read as records.
"""
url_base = "https://slack.com/api/"
http_method = "POST"
primary_key = "id"
def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]:
return [{"message": f"Successfully joined channel: {stream_slice['channel_name']}"}]
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None # No pagination
def path(self, **kwargs) -> str:
return "conversations.join"
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
channels_stream = Channels(authenticator=self.authenticator)
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
yield {"channel": channel["id"], "channel_name": channel["name"]}
def request_body_json(self, stream_slice: Mapping = None, **kwargs) -> Optional[Mapping]:
return {"channel": stream_slice["channel"]}
class SourceSlack(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
slack_client = WebClient(token=config["api_token"])
users = slack_client.users_list(limit=1).get("members", [])
if len(users) > 0:
return True, None
else:
return False, "There are no users in the given Slack instance"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = TokenAuthenticator(config["api_token"])
default_start_date = pendulum.parse(config["start_date"])
threads_lookback_window = pendulum.Duration(days=config["lookback_window"])
streams = [
Channels(authenticator=authenticator),
ChannelMembers(authenticator=authenticator),
ChannelMessages(authenticator=authenticator, default_start_date=default_start_date),
Threads(authenticator=authenticator, default_start_date=default_start_date, lookback_window=threads_lookback_window),
Users(authenticator=authenticator),
]
# To sync data from channels, the bot backed by this token needs to join all those channels. This operation is idempotent.
if config["join_channels"]:
logger = AirbyteLogger()
logger.info("joining Slack channels")
join_channels_stream = JoinChannelsStream(authenticator=authenticator)
for stream_slice in join_channels_stream.stream_slices():
for message in join_channels_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice):
logger.info(message["message"])
return streams
|
#!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get("filename")!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get("params")!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get("name")!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
| #!/usr/bin/env python
"""TcEx Framework Validate Module."""
# standard library
import ast
import importlib
import json
import os
import sys
import traceback
from collections import deque
from pathlib import Path
from typing import Dict, Union
# third-party
import colorama as c
# from jsonschema import SchemaError, ValidationError, validate
from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
from tcex.app_config.tcex_json import TcexJson
from tcex.bin.bin_abc import BinABC
try:
# standard library
import sqlite3
except ModuleNotFoundError:
# this module is only required for certain CLI commands
pass
class Validate(BinABC):
"""Validate syntax, imports, and schemas.
* Python and JSON file syntax
* Python import modules
* install.json schema
* layout.json schema
"""
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
self.ignore_validation = ignore_validation
# class properties
self._app_packages = []
self._install_json_schema = None
self._layout_json_schema = None
self.config = {}
self.ij = InstallJson()
self.invalid_json_files = []
self.lj = LayoutJson()
self.tj = TcexJson()
# initialize validation data
self.validation_data = self._validation_data
@property
def _validation_data(self) -> Dict[str, list]:
"""Return structure for validation data."""
return {
'errors': [],
'fileSyntax': [],
'layouts': [],
'moduleImports': [],
'schema': [],
'feeds': [],
}
def _check_node_import(self, node: Union[ast.Import, ast.ImportFrom], filename: str) -> None:
"""."""
if isinstance(node, ast.Import):
for n in node.names:
m = n.name.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
elif isinstance(node, ast.ImportFrom):
m = node.module.split('.')[0]
if not self.check_import_stdlib(m):
m_status = self.check_imported(m)
if not m_status:
self.validation_data['errors'].append(
f'Module validation failed for {filename} '
f'module "{m}" could not be imported).'
)
self.validation_data['moduleImports'].append(
{'filename': filename, 'module': m, 'status': m_status}
)
def check_imports(self) -> None:
"""Check the projects top level directory for missing imports.
This method will check only files ending in **.py** and does not handle imports validation
for sub-directories.
"""
for filename in sorted(os.listdir(self.app_path)):
if not filename.endswith('.py'):
continue
fq_path = os.path.join(self.app_path, filename)
with open(fq_path, 'rb') as f:
# TODO: [low] is there a better way?
code_lines = deque([(f.read(), 1)])
while code_lines:
code, _ = code_lines.popleft() # pylint: disable=unused-variable
try:
parsed_code = ast.parse(code)
for node in ast.walk(parsed_code):
self._check_node_import(node, filename)
except SyntaxError:
pass
@staticmethod
def check_import_stdlib(module: str) -> bool:
"""Check if module is in Python stdlib.
Args:
module: The name of the module to check.
Returns:
bool: Returns True if the module is in the stdlib or template.
"""
if (
module in stdlib_list('3.6')
or module in stdlib_list('3.7')
or module in stdlib_list('3.8')
or module
in ['app', 'args', 'base_app_input', 'job_app', 'playbook_app', 'run', 'service_app']
):
return True
return False
@staticmethod
def check_imported(module: str) -> bool:
"""Check whether the provide module can be imported (package installed).
Args:
module: The name of the module to check availability.
Returns:
bool: True if the module can be imported, False otherwise.
"""
try:
del sys.modules[module]
except (AttributeError, KeyError):
pass
# https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
find_spec = importlib.util.find_spec(module)
found = find_spec is not None
if found is True:
# if dist-packages|site-packages in module_path the import doesn't count
try:
if 'dist-packages' in find_spec.origin:
found = False
except TypeError:
pass
try:
if 'site-packages' in find_spec.origin:
found = False
except TypeError:
pass
return found
def check_install_json(self) -> None:
"""Check all install.json files for valid schema."""
if 'install.json' in self.invalid_json_files:
return
status = True
try:
self.ij.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
'''Schema validation failed for install.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.ij.fqfn.name, 'status': status})
def check_job_json(self) -> None:
"""Validate feed files for feed job apps."""
if 'install.json' in self.invalid_json_files:
# can't proceed if install.json can't be read
return
# use developer defined app version (deprecated) or package_version from InstallJson model
app_version = self.tj.model.package.app_version or self.ij.model.package_version
program_name = (f'''{self.tj.model.package.app_name}_{app_version}''').replace('_', ' ')
status = True
for feed in self.ij.model.feeds:
if feed.job_file in self.invalid_json_files:
# no need to check if schema if json is invalid
continue
jj = JobJson(filename=feed.job_file)
# validate the job file exists
if not jj.fqfn.is_file():
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json file could not be found.'''
)
continue
try:
# validate the schema
jj.model
except ValidationError as ex:
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
# validate program name
if status is True and jj.model.program_name != program_name:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. '''
f'''The job.json programName {jj.model.program_name} != {program_name}.'''
)
# validate program version
if status is True and jj.model.program_version != self.ij.model.program_version:
status = False
self.validation_data['errors'].append(
f'''Schema validation failed for {feed.job_file}. The job.json program'''
f'''Version {jj.model.program_version} != {self.ij.model.program_version}.'''
)
self.validation_data['schema'].append({'filename': feed.job_file, 'status': status})
def check_layout_json(self) -> None:
"""Check all layout.json files for valid schema."""
if not self.lj.has_layout or 'layout.json' in self.invalid_json_files:
return
status = True
try:
self.lj.model
except ValidationError as ex:
self.invalid_json_files.append(self.ij.fqfn.name)
status = False
for error in json.loads(ex.json()):
location = [str(location) for location in error.get('loc')]
self.validation_data['errors'].append(
f'''Schema validation failed for layout.json. '''
f'''{error.get('msg')}: {' -> '.join(location)}'''
)
except ValueError:
# any JSON decode error will be caught during syntax validation
return
self.validation_data['schema'].append({'filename': self.lj.fqfn.name, 'status': status})
if status is True:
self.check_layout_params()
def check_layout_params(self) -> None:
"""Check that the layout.json is consistent with install.json.
The layout.json files references the params.name from the install.json file. The method
will validate that no reference appear for inputs in install.json that don't exist.
"""
# do not track hidden or serviceConfig inputs as they should not be in layouts.json
ij_input_names = list(self.ij.model.filter_params(service_config=False, hidden=False))
ij_output_names = [o.name for o in self.ij.model.playbook.output_variables]
# Check for duplicate inputs
for name in self.ij.validate.validate_duplicate_input():
self.validation_data['errors'].append(
f'Duplicate input name found in install.json ({name})'
)
status = False
# Check for duplicate sequence numbers
for sequence in self.ij.validate.validate_duplicate_sequence():
self.validation_data['errors'].append(
f'Duplicate sequence number found in install.json ({sequence})'
)
status = False
# Check for duplicate outputs variables
for output in self.ij.validate.validate_duplicate_output():
self.validation_data['errors'].append(
f'Duplicate output variable name found in install.json ({output})'
)
status = False
if 'sqlite3' in sys.modules:
# create temporary inputs tables
self.permutations.db_create_table(self.permutations._input_table, ij_input_names)
# inputs
status = True
for i in self.lj.model.inputs:
for p in i.parameters:
if p.name not in ij_input_names:
# update validation data errors
self.validation_data['errors'].append(
'Layouts input.parameters[].name validations failed '
f'''("{p.get('name')}" is defined in layout.json, '''
'but hidden or not found in install.json).'
)
status = False
else:
# any item in list afterwards is a problem
ij_input_names.remove(p.name)
if 'sqlite3' in sys.modules:
if p.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table}''' # nosec
f''' WHERE {p.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
'''Layouts input.parameters[].display validations failed '''
f'''("{p.display}" query is an invalid statement).'''
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'inputs', 'status': status})
if ij_input_names:
input_names = ','.join(ij_input_names)
# update validation data errors
self.validation_data['errors'].append(
f'Layouts input.parameters[].name validations failed ("{input_names}" '
'values from install.json were not included in layout.json.'
)
status = False
# outputs
status = True
for o in self.lj.model.outputs:
if o.name not in ij_output_names:
# update validation data errors
self.validation_data['errors'].append(
f'''Layouts output validations failed ({o.name} is defined '''
'''in layout.json, but not found in install.json).'''
)
status = False
if 'sqlite3' in sys.modules:
if o.display:
display_query = (
f'''SELECT * FROM {self.permutations._input_table} ''' # nosec
f'''WHERE {o.display}'''
)
try:
self.permutations.db_conn.execute(display_query.replace('"', ''))
except sqlite3.Error:
self.validation_data['errors'].append(
f"""Layouts outputs.display validations failed ("{o.display}" """
f"""query is an invalid statement)."""
)
status = False
# update validation data for module
self.validation_data['layouts'].append({'params': 'outputs', 'status': status})
def check_syntax(self, app_path=None) -> None:
"""Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): The path of Python files.
"""
fqpn = Path(app_path or os.getcwd())
for fqfn in sorted(fqpn.iterdir()):
error = None
status = True
if fqfn.name.endswith('.py'):
try:
with fqfn.open(mode='rb') as fh:
ast.parse(fh.read(), filename=fqfn.name)
except SyntaxError:
status = False
# cleanup output
e = []
for line in traceback.format_exc().split('\n')[-5:-2]:
e.append(line.strip())
error = ' '.join(e)
elif fqfn.name.endswith('.json'):
try:
with fqfn.open() as fh:
json.load(fh)
except ValueError as e:
# update tracker for common files
self.invalid_json_files.append(fqfn.name)
status = False
error = e
else:
# skip unsupported file types
continue
if error:
# update validation data errors
self.validation_data['errors'].append(
f'Syntax validation failed for {fqfn.name} ({error}).'
)
# store status for this file
self.validation_data['fileSyntax'].append({'filename': fqfn.name, 'status': status})
def interactive(self) -> None:
"""[App Builder] Run in interactive mode."""
while True:
line = sys.stdin.readline().strip()
if line == 'quit':
sys.exit()
elif line == 'validate':
self.check_syntax()
self.check_imports()
self.check_install_json()
self.check_layout_json()
self.check_job_json()
self.print_json()
# reset validation_data
self.validation_data = self._validation_data
def print_json(self) -> None:
"""[App Builder] Print JSON output."""
print(json.dumps({'validation_data': self.validation_data}))
# TODO: [low] switch to typer echo?
def _print_file_syntax_results(self) -> None:
"""Print file syntax results."""
if self.validation_data.get('fileSyntax'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated File Syntax:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('fileSyntax'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('filename')!s:<60}{status_color}{status_value!s:<25}")
def _print_imports_results(self) -> None:
"""Print import results."""
if self.validation_data.get('moduleImports'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Imports:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<30}{'Module:'!s:<30}{'Status:'!s:<25}''')
for f in self.validation_data.get('moduleImports'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(
f'''{f.get('filename')!s:<30}{c.Fore.WHITE}'''
f'''{f.get('module')!s:<30}{status_color}{status_value!s:<25}'''
)
def _print_schema_results(self) -> None:
"""Print schema results."""
if self.validation_data.get('schema'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Schema:')
print(f'''{c.Style.BRIGHT}{'File:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('schema'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f'''{f.get('filename')!s:<60}{status_color}{status_value!s:<25}''')
def _print_layouts_results(self) -> None:
"""Print layout results."""
if self.validation_data.get('layouts'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Layouts:')
print(f'''{c.Style.BRIGHT}{'Params:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('layouts'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('params')!s:<60}{status_color}{status_value!s:<25}")
def _print_feed_results(self) -> None:
"""Print feed results."""
if self.validation_data.get('feeds'):
print(f'\n{c.Style.BRIGHT}{c.Fore.BLUE}Validated Feed Jobs:')
print(f'''{c.Style.BRIGHT}{'Feeds:'!s:<60}{'Status:'!s:<25}''')
for f in self.validation_data.get('feeds'):
status_color = self.status_color(f.get('status'))
status_value = self.status_value(f.get('status'))
print(f"{f.get('name')!s:<60}{status_color}{status_value!s:<25}")
def _print_errors(self) -> None:
"""Print errors results."""
if self.validation_data.get('errors'):
print('\n') # separate errors from normal output
for error in self.validation_data.get('errors'):
# print all errors
print(f'* {c.Fore.RED}{error}')
# ignore exit code
if not self.ignore_validation:
self.exit_code = 1
def print_results(self) -> None:
"""Print results."""
# Validating Syntax
self._print_file_syntax_results()
# Validating Imports
self._print_imports_results()
# Validating Schema
self._print_schema_results()
# Validating Layouts
self._print_layouts_results()
# Validating Feed Job Definition Files
self._print_feed_results()
self._print_errors()
@staticmethod
def status_color(status) -> str:
"""Return the appropriate status color."""
return c.Fore.GREEN if status else c.Fore.RED
@staticmethod
def status_value(status) -> str:
"""Return the appropriate status color."""
return 'passed' if status else 'failed'
|
import frappe, re
from renovation_service_provider_manager import invoke_mediator
@frappe.whitelist(allow_guest=True)
def get_service_provider_client_id(provider):
k = f"client_id_{re.sub("[^0-9a-zA-Z]+", "_", provider.lower())}"
client_id = frappe.cache().get_value(k)
if client_id:
return client_id
client_id = get_client_id_from_mediator(provider)
frappe.cache().set_value(k, client_id, expires_in_sec=18000) # 5hr
return client_id
def get_client_id_from_mediator(provider):
try:
r = invoke_mediator("/api/method/renovation_mediator.api.get_service_provider_client_id", {"provider": provider})
r.raise_for_status()
r = r.json()
return r["message"]
except:
frappe.throw(r.text) | import frappe, re
from renovation_service_provider_manager import invoke_mediator
@frappe.whitelist(allow_guest=True)
def get_service_provider_client_id(provider):
k = f"client_id_{re.sub('[^0-9a-zA-Z]+', '_', provider.lower())}"
client_id = frappe.cache().get_value(k)
if client_id:
return client_id
client_id = get_client_id_from_mediator(provider)
frappe.cache().set_value(k, client_id, expires_in_sec=18000) # 5hr
return client_id
def get_client_id_from_mediator(provider):
try:
r = invoke_mediator("/api/method/renovation_mediator.api.get_service_provider_client_id", {"provider": provider})
r.raise_for_status()
r = r.json()
return r["message"]
except:
frappe.throw(r.text) |
#!/usr/bin/env python
"""
Object-oriented implementation of backup reporting code.
Defines a class called 'Backup' that records all backups of a device
"""
import os, sys, argparse
import glob
from configparser import ConfigParser
from atlassian import Confluence
class Backup:
def __init__(self, device, backup_root):
self.device = device
self.root = backup_root
config_pattern = "{}/*/{}".format(self.root, device)
configs = glob.glob(config_pattern, recursive=True)
# Remove the full pathname, we only want the directory and the filename
bkps = [dir[len(backup_root)+1:] for dir in configs]
self.backups = bkps
def name(self):
return self.device
def latest(self):
if len(self.backups) >= 1:
return self.backups[-1].split('/')[0]
else:
return "NotFound"
def main():
parser = ConfigParser()
parser.read('config-demo.ini')
device_list_file = parser['backups']['device_list']
apikey = parser['confluence']['apikey']
username = parser['confluence']['username']
url = parser['confluence']['url']
page_ID = parser['confluence']['page_ID']
confluence = Confluence(url=url, username=username, password=apikey)
# Read in all the devices from the nominated file
with open(device_list_file) as file:
lines = file.readlines()
devices = [line.rstrip() for line in lines]
wiki_table = "||Device||Date||"
for device in devices:
device_bkp = Backup(device, parser['backups']['path'])
latest_bkp_date = device_bkp.latest()
print(f"Latest backup for {device_bkp.name()} is {latest_bkp_date}")
wiki_table += "\n" + f"|{device}|{latest_bkp_date}|"
print("Wiki text for table is:")
print(wiki_table)
result = confluence.update_page(
page_id=page_ID,
title='Config Retrievals',
representation="wiki",
body=wiki_table)
#pprint(result)
print(f"Title of page set to '{result["title"]}'")
print(f"Confluence revision for page is now {result["version"]["confRev"]}")
if __name__ == "__main__":
main()
| #!/usr/bin/env python
"""
Object-oriented implementation of backup reporting code.
Defines a class called 'Backup' that records all backups of a device
"""
import os, sys, argparse
import glob
from configparser import ConfigParser
from atlassian import Confluence
class Backup:
def __init__(self, device, backup_root):
self.device = device
self.root = backup_root
config_pattern = "{}/*/{}".format(self.root, device)
configs = glob.glob(config_pattern, recursive=True)
# Remove the full pathname, we only want the directory and the filename
bkps = [dir[len(backup_root)+1:] for dir in configs]
self.backups = bkps
def name(self):
return self.device
def latest(self):
if len(self.backups) >= 1:
return self.backups[-1].split('/')[0]
else:
return "NotFound"
def main():
parser = ConfigParser()
parser.read('config-demo.ini')
device_list_file = parser['backups']['device_list']
apikey = parser['confluence']['apikey']
username = parser['confluence']['username']
url = parser['confluence']['url']
page_ID = parser['confluence']['page_ID']
confluence = Confluence(url=url, username=username, password=apikey)
# Read in all the devices from the nominated file
with open(device_list_file) as file:
lines = file.readlines()
devices = [line.rstrip() for line in lines]
wiki_table = "||Device||Date||"
for device in devices:
device_bkp = Backup(device, parser['backups']['path'])
latest_bkp_date = device_bkp.latest()
print(f"Latest backup for {device_bkp.name()} is {latest_bkp_date}")
wiki_table += "\n" + f"|{device}|{latest_bkp_date}|"
print("Wiki text for table is:")
print(wiki_table)
result = confluence.update_page(
page_id=page_ID,
title='Config Retrievals',
representation="wiki",
body=wiki_table)
#pprint(result)
print(f"Title of page set to '{result['title']}'")
print(f"Confluence revision for page is now {result['version']['confRev']}")
if __name__ == "__main__":
main()
|
import os
import re
import shutil
import sys
import urllib.error
import urllib.parse
import urllib.request
from zipfile import ZipFile
import helpers.config as config
from helpers.logger import Logger
class Updater:
__instance = None
@staticmethod
def Get():
if Updater.__instance is None:
return Updater()
return Updater.__instance
def __init__(self):
if Updater.__instance is not None:
return
else:
self.log = Logger("pyLaunch.Frontend.Updater", "frontend.log")
self.DeleteFolders = ["src"]
self.UpdateFolder = "updatefiles"
def Automatic(self) -> bool:
if not self.CheckConnection():
return False
UpdateAvailable = self.CheckVersions()
if UpdateAvailable:
print(f"An update is available! [v{".".join(self.Versions[1])}]")
if not 'n' in input(f"Would you like to update from [{".".join(self.Versions[0])}]? (Y/n) > "):
if self.DownloadUpdate():
return self.InstallUpdate()
return False
def CheckConnection(self) -> str:
if config.CONFIGURATION['Update']['SkipCheck']:
return "Skipping update check"
try:
urllib.request.urlopen('http://google.com')
return True
except Exception as e:
return "Unable to connect to the internet" # Unable to connect to the internet
def DownloadUpdate(self) -> bool:
response = None
try:
response = urllib.request.urlopen(f"https://api.github.com/repos/{config.CONFIGURATION["Update"]["Organization"]}/{config.CONFIGURATION["Update"]["Repository"]}/zipball/{config.CONFIGURATION["Update"]["Branch"]}")
except urllib.error.HTTPError as e:
print(f"Unable to download update from GitHub: {e}")
input("Press enter to continue...")
return False
if not os.path.exists(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.mkdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
with open(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}gh_download.zip", "wb") as f:
f.write(response.read())
# Zip is downloaded, now extract
os.chdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
zipFileContent = dict()
zipFileContentSize = 0
with ZipFile(f"gh_download.zip", 'r') as zipFile:
for name in zipFile.namelist():
zipFileContent[name] = zipFile.getinfo(name).file_size
zipFileContentSize = sum(zipFileContent.values())
extractedContentSize = 0
for zippedFileName, zippedFileSize in zipFileContent.items():
UnzippedFilePath = os.path.abspath(f"{zippedFileName}")
os.makedirs(os.path.dirname(UnzippedFilePath), exist_ok=True)
if os.path.isfile(UnzippedFilePath):
zipFileContentSize -= zippedFileSize
else:
zipFile.extract(zippedFileName, path="", pwd=None)
extractedContentSize += zippedFileSize
try:
done = int(50*extractedContentSize/zipFileContentSize)
percentage = (extractedContentSize / zipFileContentSize) * 100
except ZeroDivisionError:
done = 50
percentage = 100
sys.stdout.write('\r[{}{}] {:.2f}%'.format('█' * done, '.' * (50-done), percentage))
sys.stdout.flush()
sys.stdout.write('\n')
os.chdir(config.PATH_ROOT)
return True
def InstallUpdate(self) -> bool:
print("Installing new version")
for file in os.listdir(config.CONFIGURATION['Launch']['ProjectRoot']):
if os.path.isdir(f"{config.CONFIGURATION["Launch"]["ProjectRoot"]}{os.sep}{file}"):
if file in self.DeleteFolders:
shutil.rmtree(f"{config.CONFIGURATION["Launch"]["ProjectRoot"]}{os.sep}{file}")
else: # Files
os.remove(f"{config.CONFIGURATION["Launch"]["ProjectRoot"]}{os.sep}{file}")
# Old version is deleted
for file in os.listdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.rename(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}{file}", f"{config.CONFIGURATION["Launch"]["ProjectRoot"]}{os.sep}{file}")
shutil.rmtree(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
return True
def CheckVersions(self):
# Sucessful return: bool
# Unsuccessful: list[message: str, continue: bool]
self.Versions = self._GetVersions()
if type(self.Versions[1]) == bool:
return self.Versions
self.Versions[0] = self._GetVersionAsInt(self.Versions[0])
self.Versions[1] = self._GetVersionAsInt(self.Versions[1])
self.Difference = []
for installed, checked in zip(self.Versions[0], self.Versions[1]):
self.Difference.append(checked - installed)
for section in self.Difference:
if section < 0: # When working on project and updating locally
return False
elif section > 0:
return True
return False
def _GetVersions(self) -> list:
# Sucessful return: list[InstalledVersion: str, CheckedVersion: str]
# Unsucessful: list[message: str, continue: bool]
if not os.path.exists(f"{config.CONFIGURATION["Launch"]["ProjectRoot"]}{os.sep}{config.CONFIGURATION["Update"]["VersionPath"]}"):
# This means either the configuration is incorrect, or pyLaunch isn't where it should be
# continue is False, because the project cannot be launched
return [f"Unable to locate installed version at {config.CONFIGURATION["Update"]["VersionPath"]}", False]
InstalledVersion = None # Local Version
CheckedVersion = None # Version on GitHub
with open(f"{config.CONFIGURATION["Launch"]["ProjectRoot"]}{os.sep}{config.CONFIGURATION["Update"]["VersionPath"]}", "r") as f:
lines = f.readlines()
InstalledVersion = self._GetVersionFromStr(lines)
try:
response = urllib.request.urlopen(f"https://raw.githubusercontent.com/{config.CONFIGURATION["Update"]["Organization"]}/{config.CONFIGURATION["Update"]["Repository"]}/{config.CONFIGURATION["Update"]["Branch"]}{config.CONFIGURATION["Update"]["VersionPath"]}")
content = response.read().decode("UTF-8").split("\n")
CheckedVersion = self._GetVersionFromStr(content)
except urllib.error.HTTPError as e:
# The Project URL is invalid (cannot find Org/Repo/Branch/VersionPath) or,
# raw.githubusercontent is down, continue is True, the project can still be launched
return ["Project URL does not exist or githubusercontent is down", True] # URL doesn't exist or something went wrong
if CheckedVersion is None:
# Some other error, just to be safe.
return ["Unable to get current version from GitHub", True]
return [InstalledVersion, CheckedVersion]
def _GetVersionFromStr(self, lines: str) -> str:
ver = None
for line in lines:
line = line.strip()
if config.CONFIGURATION['Update']['Find'] in line:
ver = line[len(config.CONFIGURATION['Update']['Find']):].strip('"')
match = re.match(r"\d+\.\d+\.\d+", ver) # > #.#.#
if match:
return ver[match.start():match.end()]
return None
def _GetVersionAsInt(self, version: str) -> list:
version = version.split(".")
intVer = []
for section in version:
if section.isalnum():
newSection = ""
for char in section:
if char.isnumeric():
newSection += char
section = newSection
intVer.append(int(section))
return intVer
| import os
import re
import shutil
import sys
import urllib.error
import urllib.parse
import urllib.request
from zipfile import ZipFile
import helpers.config as config
from helpers.logger import Logger
class Updater:
__instance = None
@staticmethod
def Get():
if Updater.__instance is None:
return Updater()
return Updater.__instance
def __init__(self):
if Updater.__instance is not None:
return
else:
self.log = Logger("pyLaunch.Frontend.Updater", "frontend.log")
self.DeleteFolders = ["src"]
self.UpdateFolder = "updatefiles"
def Automatic(self) -> bool:
if not self.CheckConnection():
return False
UpdateAvailable = self.CheckVersions()
if UpdateAvailable:
print(f"An update is available! [v{'.'.join(self.Versions[1])}]")
if not 'n' in input(f"Would you like to update from [{'.'.join(self.Versions[0])}]? (Y/n) > "):
if self.DownloadUpdate():
return self.InstallUpdate()
return False
def CheckConnection(self) -> str:
if config.CONFIGURATION['Update']['SkipCheck']:
return "Skipping update check"
try:
urllib.request.urlopen('http://google.com')
return True
except Exception as e:
return "Unable to connect to the internet" # Unable to connect to the internet
def DownloadUpdate(self) -> bool:
response = None
try:
response = urllib.request.urlopen(f"https://api.github.com/repos/{config.CONFIGURATION['Update']['Organization']}/{config.CONFIGURATION['Update']['Repository']}/zipball/{config.CONFIGURATION['Update']['Branch']}")
except urllib.error.HTTPError as e:
print(f"Unable to download update from GitHub: {e}")
input("Press enter to continue...")
return False
if not os.path.exists(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.mkdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
with open(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}gh_download.zip", "wb") as f:
f.write(response.read())
# Zip is downloaded, now extract
os.chdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
zipFileContent = dict()
zipFileContentSize = 0
with ZipFile(f"gh_download.zip", 'r') as zipFile:
for name in zipFile.namelist():
zipFileContent[name] = zipFile.getinfo(name).file_size
zipFileContentSize = sum(zipFileContent.values())
extractedContentSize = 0
for zippedFileName, zippedFileSize in zipFileContent.items():
UnzippedFilePath = os.path.abspath(f"{zippedFileName}")
os.makedirs(os.path.dirname(UnzippedFilePath), exist_ok=True)
if os.path.isfile(UnzippedFilePath):
zipFileContentSize -= zippedFileSize
else:
zipFile.extract(zippedFileName, path="", pwd=None)
extractedContentSize += zippedFileSize
try:
done = int(50*extractedContentSize/zipFileContentSize)
percentage = (extractedContentSize / zipFileContentSize) * 100
except ZeroDivisionError:
done = 50
percentage = 100
sys.stdout.write('\r[{}{}] {:.2f}%'.format('█' * done, '.' * (50-done), percentage))
sys.stdout.flush()
sys.stdout.write('\n')
os.chdir(config.PATH_ROOT)
return True
def InstallUpdate(self) -> bool:
print("Installing new version")
for file in os.listdir(config.CONFIGURATION['Launch']['ProjectRoot']):
if os.path.isdir(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}"):
if file in self.DeleteFolders:
shutil.rmtree(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
else: # Files
os.remove(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
# Old version is deleted
for file in os.listdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.rename(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}{file}", f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
shutil.rmtree(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
return True
def CheckVersions(self):
# Sucessful return: bool
# Unsuccessful: list[message: str, continue: bool]
self.Versions = self._GetVersions()
if type(self.Versions[1]) == bool:
return self.Versions
self.Versions[0] = self._GetVersionAsInt(self.Versions[0])
self.Versions[1] = self._GetVersionAsInt(self.Versions[1])
self.Difference = []
for installed, checked in zip(self.Versions[0], self.Versions[1]):
self.Difference.append(checked - installed)
for section in self.Difference:
if section < 0: # When working on project and updating locally
return False
elif section > 0:
return True
return False
def _GetVersions(self) -> list:
# Sucessful return: list[InstalledVersion: str, CheckedVersion: str]
# Unsucessful: list[message: str, continue: bool]
if not os.path.exists(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{config.CONFIGURATION['Update']['VersionPath']}"):
# This means either the configuration is incorrect, or pyLaunch isn't where it should be
# continue is False, because the project cannot be launched
return [f"Unable to locate installed version at {config.CONFIGURATION['Update']['VersionPath']}", False]
InstalledVersion = None # Local Version
CheckedVersion = None # Version on GitHub
with open(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{config.CONFIGURATION['Update']['VersionPath']}", "r") as f:
lines = f.readlines()
InstalledVersion = self._GetVersionFromStr(lines)
try:
response = urllib.request.urlopen(f"https://raw.githubusercontent.com/{config.CONFIGURATION['Update']['Organization']}/{config.CONFIGURATION['Update']['Repository']}/{config.CONFIGURATION['Update']['Branch']}{config.CONFIGURATION['Update']['VersionPath']}")
content = response.read().decode("UTF-8").split("\n")
CheckedVersion = self._GetVersionFromStr(content)
except urllib.error.HTTPError as e:
# The Project URL is invalid (cannot find Org/Repo/Branch/VersionPath) or,
# raw.githubusercontent is down, continue is True, the project can still be launched
return ["Project URL does not exist or githubusercontent is down", True] # URL doesn't exist or something went wrong
if CheckedVersion is None:
# Some other error, just to be safe.
return ["Unable to get current version from GitHub", True]
return [InstalledVersion, CheckedVersion]
def _GetVersionFromStr(self, lines: str) -> str:
ver = None
for line in lines:
line = line.strip()
if config.CONFIGURATION['Update']['Find'] in line:
ver = line[len(config.CONFIGURATION['Update']['Find']):].strip('"')
match = re.match(r"\d+\.\d+\.\d+", ver) # > #.#.#
if match:
return ver[match.start():match.end()]
return None
def _GetVersionAsInt(self, version: str) -> list:
version = version.split(".")
intVer = []
for section in version:
if section.isalnum():
newSection = ""
for char in section:
if char.isnumeric():
newSection += char
section = newSection
intVer.append(int(section))
return intVer
|
import copy
import functools
import warnings
from types import MethodType
from typing import Dict, List, Optional, Type, Union
import dill
import pandas as pd
from feast.base_feature_view import BaseFeatureView
from feast.data_source import RequestSource
from feast.errors import RegistryInferenceFailure, SpecifiedFeaturesNotPresentError
from feast.feature import Feature
from feast.feature_view import FeatureView
from feast.feature_view_projection import FeatureViewProjection
from feast.field import Field, from_value_type
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureView as OnDemandFeatureViewProto,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureViewMeta,
OnDemandFeatureViewSpec,
OnDemandSource,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
UserDefinedFunction as UserDefinedFunctionProto,
)
from feast.type_map import (
feast_value_type_to_pandas_type,
python_type_to_feast_value_type,
)
from feast.usage import log_exceptions
from feast.value_type import ValueType
warnings.simplefilter("once", DeprecationWarning)
class OnDemandFeatureView(BaseFeatureView):
"""
[Experimental] An OnDemandFeatureView defines a logical group of features that are
generated by applying a transformation on a set of input sources, such as feature
views and request data sources.
Attributes:
name: The unique name of the on demand feature view.
features: The list of features in the output of the on demand feature view.
source_feature_view_projections: A map from input source names to actual input
sources with type FeatureViewProjection.
source_request_sources: A map from input source names to the actual input
sources with type RequestSource.
udf: The user defined transformation function, which must take pandas dataframes
as inputs.
description: A human-readable description.
tags: A dictionary of key-value pairs to store arbitrary metadata.
owner: The owner of the on demand feature view, typically the email of the primary
maintainer.
"""
# TODO(adchia): remove inputs from proto and declaration
name: str
features: List[Field]
source_feature_view_projections: Dict[str, FeatureViewProjection]
source_request_sources: Dict[str, RequestSource]
udf: MethodType
description: str
tags: Dict[str, str]
owner: str
@log_exceptions
def __init__(
self,
*args,
name: Optional[str] = None,
features: Optional[List[Feature]] = None,
sources: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
udf: Optional[MethodType] = None,
inputs: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object.
Args:
name: The unique name of the on demand feature view.
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
udf (optional): The user defined transformation function, which must take pandas
dataframes as inputs.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["name", "features", "inputs", "udf"]
_name = name
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
_udf = udf
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {", ".join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_name = args[0]
if len(args) >= 2:
_schema = args[1]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 3:
_sources = args[2]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if len(args) >= 4:
_udf = args[3]
if not _name:
raise ValueError(
"The name of the on demand feature view must be specified."
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
super().__init__(
name=_name,
features=_schema,
description=description,
tags=tags,
owner=owner,
)
assert _sources is not None
self.source_feature_view_projections: Dict[str, FeatureViewProjection] = {}
self.source_request_sources: Dict[str, RequestSource] = {}
for source_name, odfv_source in _sources.items():
if isinstance(odfv_source, RequestSource):
self.source_request_sources[source_name] = odfv_source
elif isinstance(odfv_source, FeatureViewProjection):
self.source_feature_view_projections[source_name] = odfv_source
else:
self.source_feature_view_projections[
source_name
] = odfv_source.projection
if _udf is None:
raise ValueError("The `udf` parameter must be specified.")
assert _udf
self.udf = _udf
@property
def proto_class(self) -> Type[OnDemandFeatureViewProto]:
return OnDemandFeatureViewProto
def __copy__(self):
fv = OnDemandFeatureView(
name=self.name,
schema=self.features,
sources=dict(
**self.source_feature_view_projections, **self.source_request_sources,
),
udf=self.udf,
description=self.description,
tags=self.tags,
owner=self.owner,
)
fv.projection = copy.copy(self.projection)
return fv
def __eq__(self, other):
if not super().__eq__(other):
return False
if (
not self.source_feature_view_projections
== other.source_feature_view_projections
or not self.source_request_sources == other.source_request_sources
or not self.udf.__code__.co_code == other.udf.__code__.co_code
):
return False
return True
def __hash__(self):
return super().__hash__()
def to_proto(self) -> OnDemandFeatureViewProto:
"""
Converts an on demand feature view object to its protobuf representation.
Returns:
A OnDemandFeatureViewProto protobuf.
"""
meta = OnDemandFeatureViewMeta()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.last_updated_timestamp:
meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)
sources = {}
for source_name, fv_projection in self.source_feature_view_projections.items():
sources[source_name] = OnDemandSource(
feature_view_projection=fv_projection.to_proto()
)
for (source_name, request_sources,) in self.source_request_sources.items():
sources[source_name] = OnDemandSource(
request_data_source=request_sources.to_proto()
)
spec = OnDemandFeatureViewSpec(
name=self.name,
features=[feature.to_proto() for feature in self.features],
sources=sources,
user_defined_function=UserDefinedFunctionProto(
name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True),
),
description=self.description,
tags=self.tags,
owner=self.owner,
)
return OnDemandFeatureViewProto(spec=spec, meta=meta)
@classmethod
def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto):
"""
Creates an on demand feature view from a protobuf representation.
Args:
on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.
Returns:
A OnDemandFeatureView object based on the on-demand feature view protobuf.
"""
sources = {}
for (
source_name,
on_demand_source,
) in on_demand_feature_view_proto.spec.sources.items():
if on_demand_source.WhichOneof("source") == "feature_view":
sources[source_name] = FeatureView.from_proto(
on_demand_source.feature_view
).projection
elif on_demand_source.WhichOneof("source") == "feature_view_projection":
sources[source_name] = FeatureViewProjection.from_proto(
on_demand_source.feature_view_projection
)
else:
sources[source_name] = RequestSource.from_proto(
on_demand_source.request_data_source
)
on_demand_feature_view_obj = cls(
name=on_demand_feature_view_proto.spec.name,
schema=[
Field(
name=feature.name,
dtype=from_value_type(ValueType(feature.value_type)),
)
for feature in on_demand_feature_view_proto.spec.features
],
sources=sources,
udf=dill.loads(
on_demand_feature_view_proto.spec.user_defined_function.body
),
description=on_demand_feature_view_proto.spec.description,
tags=dict(on_demand_feature_view_proto.spec.tags),
owner=on_demand_feature_view_proto.spec.owner,
)
# FeatureViewProjections are not saved in the OnDemandFeatureView proto.
# Create the default projection.
on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition(
on_demand_feature_view_obj
)
if on_demand_feature_view_proto.meta.HasField("created_timestamp"):
on_demand_feature_view_obj.created_timestamp = (
on_demand_feature_view_proto.meta.created_timestamp.ToDatetime()
)
if on_demand_feature_view_proto.meta.HasField("last_updated_timestamp"):
on_demand_feature_view_obj.last_updated_timestamp = (
on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime()
)
return on_demand_feature_view_obj
def get_request_data_schema(self) -> Dict[str, ValueType]:
schema: Dict[str, ValueType] = {}
for request_source in self.source_request_sources.values():
if isinstance(request_source.schema, List):
new_schema = {}
for field in request_source.schema:
new_schema[field.name] = field.dtype.to_value_type()
schema.update(new_schema)
elif isinstance(request_source.schema, Dict):
schema.update(request_source.schema)
else:
raise Exception(
f"Request source schema is not correct type: ${str(type(request_source.schema))}"
)
return schema
def get_transformed_features_df(
self, df_with_features: pd.DataFrame, full_feature_names: bool = False,
) -> pd.DataFrame:
# Apply on demand transformations
columns_to_cleanup = []
for source_fv_projection in self.source_feature_view_projections.values():
for feature in source_fv_projection.features:
full_feature_ref = f"{source_fv_projection.name}__{feature.name}"
if full_feature_ref in df_with_features.keys():
# Make sure the partial feature name is always present
df_with_features[feature.name] = df_with_features[full_feature_ref]
columns_to_cleanup.append(feature.name)
elif feature.name in df_with_features.keys():
# Make sure the full feature name is always present
df_with_features[full_feature_ref] = df_with_features[feature.name]
columns_to_cleanup.append(full_feature_ref)
# Compute transformed values and apply to each result row
df_with_transformed_features = self.udf.__call__(df_with_features)
# Work out whether the correct columns names are used.
rename_columns: Dict[str, str] = {}
for feature in self.features:
short_name = feature.name
long_name = f"{self.projection.name_to_use()}__{feature.name}"
if (
short_name in df_with_transformed_features.columns
and full_feature_names
):
rename_columns[short_name] = long_name
elif not full_feature_names:
# Long name must be in dataframe.
rename_columns[long_name] = short_name
# Cleanup extra columns used for transformation
df_with_features.drop(columns=columns_to_cleanup, inplace=True)
return df_with_transformed_features.rename(columns=rename_columns)
def infer_features(self):
"""
Infers the set of features associated to this feature view from the input source.
Raises:
RegistryInferenceFailure: The set of features could not be inferred.
"""
df = pd.DataFrame()
for feature_view_projection in self.source_feature_view_projections.values():
for feature in feature_view_projection.features:
dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type())
df[f"{feature_view_projection.name}__{feature.name}"] = pd.Series(
dtype=dtype
)
df[f"{feature.name}"] = pd.Series(dtype=dtype)
for request_data in self.source_request_sources.values():
for field in request_data.schema:
dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type())
df[f"{field.name}"] = pd.Series(dtype=dtype)
output_df: pd.DataFrame = self.udf.__call__(df)
inferred_features = []
for f, dt in zip(output_df.columns, output_df.dtypes):
inferred_features.append(
Field(
name=f,
dtype=from_value_type(
python_type_to_feast_value_type(f, type_name=str(dt))
),
)
)
if self.features:
missing_features = []
for specified_features in self.features:
if specified_features not in inferred_features:
missing_features.append(specified_features)
if missing_features:
raise SpecifiedFeaturesNotPresentError(
[f.name for f in missing_features], self.name
)
else:
self.features = inferred_features
if not self.features:
raise RegistryInferenceFailure(
"OnDemandFeatureView",
f"Could not infer Features for the feature view '{self.name}'.",
)
@staticmethod
def get_requested_odfvs(feature_refs, project, registry):
all_on_demand_feature_views = registry.list_on_demand_feature_views(
project, allow_cache=True
)
requested_on_demand_feature_views: List[OnDemandFeatureView] = []
for odfv in all_on_demand_feature_views:
for feature in odfv.features:
if f"{odfv.name}:{feature.name}" in feature_refs:
requested_on_demand_feature_views.append(odfv)
break
return requested_on_demand_feature_views
# TODO(felixwang9817): Force this decorator to accept kwargs and switch from
# `features` to `schema`.
def on_demand_feature_view(
*args,
features: Optional[List[Feature]] = None,
sources: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
inputs: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object with the given user function as udf.
Args:
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["features", "inputs"]
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {", ".join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_schema = args[0]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 2:
_sources = args[1]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(
name=user_function.__name__,
sources=_sources,
schema=_schema,
udf=user_function,
description=description,
tags=tags,
owner=owner,
)
functools.update_wrapper(
wrapper=on_demand_feature_view_obj, wrapped=user_function
)
return on_demand_feature_view_obj
return decorator
| import copy
import functools
import warnings
from types import MethodType
from typing import Dict, List, Optional, Type, Union
import dill
import pandas as pd
from feast.base_feature_view import BaseFeatureView
from feast.data_source import RequestSource
from feast.errors import RegistryInferenceFailure, SpecifiedFeaturesNotPresentError
from feast.feature import Feature
from feast.feature_view import FeatureView
from feast.feature_view_projection import FeatureViewProjection
from feast.field import Field, from_value_type
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureView as OnDemandFeatureViewProto,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureViewMeta,
OnDemandFeatureViewSpec,
OnDemandSource,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
UserDefinedFunction as UserDefinedFunctionProto,
)
from feast.type_map import (
feast_value_type_to_pandas_type,
python_type_to_feast_value_type,
)
from feast.usage import log_exceptions
from feast.value_type import ValueType
warnings.simplefilter("once", DeprecationWarning)
class OnDemandFeatureView(BaseFeatureView):
"""
[Experimental] An OnDemandFeatureView defines a logical group of features that are
generated by applying a transformation on a set of input sources, such as feature
views and request data sources.
Attributes:
name: The unique name of the on demand feature view.
features: The list of features in the output of the on demand feature view.
source_feature_view_projections: A map from input source names to actual input
sources with type FeatureViewProjection.
source_request_sources: A map from input source names to the actual input
sources with type RequestSource.
udf: The user defined transformation function, which must take pandas dataframes
as inputs.
description: A human-readable description.
tags: A dictionary of key-value pairs to store arbitrary metadata.
owner: The owner of the on demand feature view, typically the email of the primary
maintainer.
"""
# TODO(adchia): remove inputs from proto and declaration
name: str
features: List[Field]
source_feature_view_projections: Dict[str, FeatureViewProjection]
source_request_sources: Dict[str, RequestSource]
udf: MethodType
description: str
tags: Dict[str, str]
owner: str
@log_exceptions
def __init__(
self,
*args,
name: Optional[str] = None,
features: Optional[List[Feature]] = None,
sources: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
udf: Optional[MethodType] = None,
inputs: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object.
Args:
name: The unique name of the on demand feature view.
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
udf (optional): The user defined transformation function, which must take pandas
dataframes as inputs.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["name", "features", "inputs", "udf"]
_name = name
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
_udf = udf
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_name = args[0]
if len(args) >= 2:
_schema = args[1]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 3:
_sources = args[2]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if len(args) >= 4:
_udf = args[3]
if not _name:
raise ValueError(
"The name of the on demand feature view must be specified."
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
super().__init__(
name=_name,
features=_schema,
description=description,
tags=tags,
owner=owner,
)
assert _sources is not None
self.source_feature_view_projections: Dict[str, FeatureViewProjection] = {}
self.source_request_sources: Dict[str, RequestSource] = {}
for source_name, odfv_source in _sources.items():
if isinstance(odfv_source, RequestSource):
self.source_request_sources[source_name] = odfv_source
elif isinstance(odfv_source, FeatureViewProjection):
self.source_feature_view_projections[source_name] = odfv_source
else:
self.source_feature_view_projections[
source_name
] = odfv_source.projection
if _udf is None:
raise ValueError("The `udf` parameter must be specified.")
assert _udf
self.udf = _udf
@property
def proto_class(self) -> Type[OnDemandFeatureViewProto]:
return OnDemandFeatureViewProto
def __copy__(self):
fv = OnDemandFeatureView(
name=self.name,
schema=self.features,
sources=dict(
**self.source_feature_view_projections, **self.source_request_sources,
),
udf=self.udf,
description=self.description,
tags=self.tags,
owner=self.owner,
)
fv.projection = copy.copy(self.projection)
return fv
def __eq__(self, other):
if not super().__eq__(other):
return False
if (
not self.source_feature_view_projections
== other.source_feature_view_projections
or not self.source_request_sources == other.source_request_sources
or not self.udf.__code__.co_code == other.udf.__code__.co_code
):
return False
return True
def __hash__(self):
return super().__hash__()
def to_proto(self) -> OnDemandFeatureViewProto:
"""
Converts an on demand feature view object to its protobuf representation.
Returns:
A OnDemandFeatureViewProto protobuf.
"""
meta = OnDemandFeatureViewMeta()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.last_updated_timestamp:
meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)
sources = {}
for source_name, fv_projection in self.source_feature_view_projections.items():
sources[source_name] = OnDemandSource(
feature_view_projection=fv_projection.to_proto()
)
for (source_name, request_sources,) in self.source_request_sources.items():
sources[source_name] = OnDemandSource(
request_data_source=request_sources.to_proto()
)
spec = OnDemandFeatureViewSpec(
name=self.name,
features=[feature.to_proto() for feature in self.features],
sources=sources,
user_defined_function=UserDefinedFunctionProto(
name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True),
),
description=self.description,
tags=self.tags,
owner=self.owner,
)
return OnDemandFeatureViewProto(spec=spec, meta=meta)
@classmethod
def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto):
"""
Creates an on demand feature view from a protobuf representation.
Args:
on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.
Returns:
A OnDemandFeatureView object based on the on-demand feature view protobuf.
"""
sources = {}
for (
source_name,
on_demand_source,
) in on_demand_feature_view_proto.spec.sources.items():
if on_demand_source.WhichOneof("source") == "feature_view":
sources[source_name] = FeatureView.from_proto(
on_demand_source.feature_view
).projection
elif on_demand_source.WhichOneof("source") == "feature_view_projection":
sources[source_name] = FeatureViewProjection.from_proto(
on_demand_source.feature_view_projection
)
else:
sources[source_name] = RequestSource.from_proto(
on_demand_source.request_data_source
)
on_demand_feature_view_obj = cls(
name=on_demand_feature_view_proto.spec.name,
schema=[
Field(
name=feature.name,
dtype=from_value_type(ValueType(feature.value_type)),
)
for feature in on_demand_feature_view_proto.spec.features
],
sources=sources,
udf=dill.loads(
on_demand_feature_view_proto.spec.user_defined_function.body
),
description=on_demand_feature_view_proto.spec.description,
tags=dict(on_demand_feature_view_proto.spec.tags),
owner=on_demand_feature_view_proto.spec.owner,
)
# FeatureViewProjections are not saved in the OnDemandFeatureView proto.
# Create the default projection.
on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition(
on_demand_feature_view_obj
)
if on_demand_feature_view_proto.meta.HasField("created_timestamp"):
on_demand_feature_view_obj.created_timestamp = (
on_demand_feature_view_proto.meta.created_timestamp.ToDatetime()
)
if on_demand_feature_view_proto.meta.HasField("last_updated_timestamp"):
on_demand_feature_view_obj.last_updated_timestamp = (
on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime()
)
return on_demand_feature_view_obj
def get_request_data_schema(self) -> Dict[str, ValueType]:
schema: Dict[str, ValueType] = {}
for request_source in self.source_request_sources.values():
if isinstance(request_source.schema, List):
new_schema = {}
for field in request_source.schema:
new_schema[field.name] = field.dtype.to_value_type()
schema.update(new_schema)
elif isinstance(request_source.schema, Dict):
schema.update(request_source.schema)
else:
raise Exception(
f"Request source schema is not correct type: ${str(type(request_source.schema))}"
)
return schema
def get_transformed_features_df(
self, df_with_features: pd.DataFrame, full_feature_names: bool = False,
) -> pd.DataFrame:
# Apply on demand transformations
columns_to_cleanup = []
for source_fv_projection in self.source_feature_view_projections.values():
for feature in source_fv_projection.features:
full_feature_ref = f"{source_fv_projection.name}__{feature.name}"
if full_feature_ref in df_with_features.keys():
# Make sure the partial feature name is always present
df_with_features[feature.name] = df_with_features[full_feature_ref]
columns_to_cleanup.append(feature.name)
elif feature.name in df_with_features.keys():
# Make sure the full feature name is always present
df_with_features[full_feature_ref] = df_with_features[feature.name]
columns_to_cleanup.append(full_feature_ref)
# Compute transformed values and apply to each result row
df_with_transformed_features = self.udf.__call__(df_with_features)
# Work out whether the correct columns names are used.
rename_columns: Dict[str, str] = {}
for feature in self.features:
short_name = feature.name
long_name = f"{self.projection.name_to_use()}__{feature.name}"
if (
short_name in df_with_transformed_features.columns
and full_feature_names
):
rename_columns[short_name] = long_name
elif not full_feature_names:
# Long name must be in dataframe.
rename_columns[long_name] = short_name
# Cleanup extra columns used for transformation
df_with_features.drop(columns=columns_to_cleanup, inplace=True)
return df_with_transformed_features.rename(columns=rename_columns)
def infer_features(self):
"""
Infers the set of features associated to this feature view from the input source.
Raises:
RegistryInferenceFailure: The set of features could not be inferred.
"""
df = pd.DataFrame()
for feature_view_projection in self.source_feature_view_projections.values():
for feature in feature_view_projection.features:
dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type())
df[f"{feature_view_projection.name}__{feature.name}"] = pd.Series(
dtype=dtype
)
df[f"{feature.name}"] = pd.Series(dtype=dtype)
for request_data in self.source_request_sources.values():
for field in request_data.schema:
dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type())
df[f"{field.name}"] = pd.Series(dtype=dtype)
output_df: pd.DataFrame = self.udf.__call__(df)
inferred_features = []
for f, dt in zip(output_df.columns, output_df.dtypes):
inferred_features.append(
Field(
name=f,
dtype=from_value_type(
python_type_to_feast_value_type(f, type_name=str(dt))
),
)
)
if self.features:
missing_features = []
for specified_features in self.features:
if specified_features not in inferred_features:
missing_features.append(specified_features)
if missing_features:
raise SpecifiedFeaturesNotPresentError(
[f.name for f in missing_features], self.name
)
else:
self.features = inferred_features
if not self.features:
raise RegistryInferenceFailure(
"OnDemandFeatureView",
f"Could not infer Features for the feature view '{self.name}'.",
)
@staticmethod
def get_requested_odfvs(feature_refs, project, registry):
all_on_demand_feature_views = registry.list_on_demand_feature_views(
project, allow_cache=True
)
requested_on_demand_feature_views: List[OnDemandFeatureView] = []
for odfv in all_on_demand_feature_views:
for feature in odfv.features:
if f"{odfv.name}:{feature.name}" in feature_refs:
requested_on_demand_feature_views.append(odfv)
break
return requested_on_demand_feature_views
# TODO(felixwang9817): Force this decorator to accept kwargs and switch from
# `features` to `schema`.
def on_demand_feature_view(
*args,
features: Optional[List[Feature]] = None,
sources: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
inputs: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object with the given user function as udf.
Args:
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["features", "inputs"]
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_schema = args[0]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 2:
_sources = args[1]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(
name=user_function.__name__,
sources=_sources,
schema=_schema,
udf=user_function,
description=description,
tags=tags,
owner=owner,
)
functools.update_wrapper(
wrapper=on_demand_feature_view_obj, wrapped=user_function
)
return on_demand_feature_view_obj
return decorator
|
# -*- coding: utf-8 -*-
"""Access to FAIRsharing via its API.
.. seealso:: https://beta.fairsharing.org/API_doc
"""
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import pystow
import requests
import yaml
from tqdm import tqdm
__all__ = [
"ensure_fairsharing",
"load_fairsharing",
"FairsharingClient",
]
PATH = pystow.join("bio", "fairsharing", name="fairsharing.yaml")
def load_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
path = ensure_fairsharing(force_download=force_download, use_tqdm=use_tqdm, **kwargs)
with path.open() as file:
return yaml.safe_load(file)
def ensure_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
if PATH.exists() and not force_download:
return PATH
client = FairsharingClient(**kwargs)
# As of 2021-12-13, there are a bit less than 4k records that take about 3 minutes to download
rv = {
row["prefix"]: row
for row in tqdm(
client.iter_records(),
unit_scale=True,
unit="record",
desc="Downloading FAIRsharing",
disable=not use_tqdm,
)
}
with PATH.open("w") as file:
yaml.safe_dump(rv, file, allow_unicode=True, sort_keys=True)
return PATH
# These fields are the same in each record
REDUNDANT_FIELDS = {
"fairsharing-licence",
}
class FairsharingClient:
"""A client for programmatic access to the FAIRsharing private API."""
def __init__(
self,
login: Optional[str] = None,
password: Optional[str] = None,
base_url: Optional[str] = None,
):
"""Instantiate the client and get an appropriate JWT token.
:param login: FAIRsharing username
:param password: Corresponding FAIRsharing password
:param base_url: The base URL
"""
self.base_url = base_url or "https://api.fairsharing.org"
self.signin_url = f"{self.base_url}/users/sign_in"
self.records_url = f"{self.base_url}/fairsharing_records"
self.username = pystow.get_config(
"fairsharing", "login", passthrough=login, raise_on_missing=True
)
self.password = pystow.get_config(
"fairsharing", "password", passthrough=password, raise_on_missing=True
)
self.jwt = self.get_jwt()
self.session = requests.Session()
self.session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {self.jwt}",
}
)
def get_jwt(self) -> str:
"""Get the JWT."""
payload = {
"user": {
"login": self.username,
"password": self.password,
},
}
res = requests.post(self.signin_url, json=payload).json()
return res["jwt"]
def iter_records(self) -> Iterable[Mapping[str, Any]]:
"""Iterate over all FAIRsharing records."""
yield from self._iter_records_helper(self.records_url)
def _preprocess_record(
self, record: MutableMapping[str, Any]
) -> Optional[MutableMapping[str, Any]]:
if "type" in record:
del record["type"]
record = {"id": record["id"], **record["attributes"]}
doi = record.get("doi")
if doi is None:
# Records without a DOI can't be resolved
url = record["url"]
if not url.startswith("https://fairsharing.org/fairsharing_records/"):
tqdm.write(f"{record["id"]} has no DOI: {record["url"]}")
return None
elif doi.startswith("10.25504/"):
record["prefix"] = record.pop("doi")[len("10.25504/") :]
else:
tqdm.write(f"DOI has unexpected prefix: {record["doi"]}")
record["description"] = _removeprefix(
record.get("description"), "This FAIRsharing record describes: "
)
record["name"] = _removeprefix(record.get("name"), "FAIRsharing record for: ")
for key in REDUNDANT_FIELDS:
if key in record:
del record[key]
return record
def _iter_records_helper(self, url: str) -> Iterable[Mapping[str, Any]]:
res = self.session.get(url).json()
for record in res["data"]:
yv = self._preprocess_record(record)
if yv:
yield yv
next_url = res["links"].get("next")
if next_url:
yield from self._iter_records_helper(next_url)
def _removeprefix(s: Optional[str], prefix) -> Optional[str]:
if s is None:
return None
if s.startswith(prefix):
return s[len(prefix) :]
return s
if __name__ == "__main__":
ensure_fairsharing(force_download=True)
| # -*- coding: utf-8 -*-
"""Access to FAIRsharing via its API.
.. seealso:: https://beta.fairsharing.org/API_doc
"""
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import pystow
import requests
import yaml
from tqdm import tqdm
__all__ = [
"ensure_fairsharing",
"load_fairsharing",
"FairsharingClient",
]
PATH = pystow.join("bio", "fairsharing", name="fairsharing.yaml")
def load_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
path = ensure_fairsharing(force_download=force_download, use_tqdm=use_tqdm, **kwargs)
with path.open() as file:
return yaml.safe_load(file)
def ensure_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
if PATH.exists() and not force_download:
return PATH
client = FairsharingClient(**kwargs)
# As of 2021-12-13, there are a bit less than 4k records that take about 3 minutes to download
rv = {
row["prefix"]: row
for row in tqdm(
client.iter_records(),
unit_scale=True,
unit="record",
desc="Downloading FAIRsharing",
disable=not use_tqdm,
)
}
with PATH.open("w") as file:
yaml.safe_dump(rv, file, allow_unicode=True, sort_keys=True)
return PATH
# These fields are the same in each record
REDUNDANT_FIELDS = {
"fairsharing-licence",
}
class FairsharingClient:
"""A client for programmatic access to the FAIRsharing private API."""
def __init__(
self,
login: Optional[str] = None,
password: Optional[str] = None,
base_url: Optional[str] = None,
):
"""Instantiate the client and get an appropriate JWT token.
:param login: FAIRsharing username
:param password: Corresponding FAIRsharing password
:param base_url: The base URL
"""
self.base_url = base_url or "https://api.fairsharing.org"
self.signin_url = f"{self.base_url}/users/sign_in"
self.records_url = f"{self.base_url}/fairsharing_records"
self.username = pystow.get_config(
"fairsharing", "login", passthrough=login, raise_on_missing=True
)
self.password = pystow.get_config(
"fairsharing", "password", passthrough=password, raise_on_missing=True
)
self.jwt = self.get_jwt()
self.session = requests.Session()
self.session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {self.jwt}",
}
)
def get_jwt(self) -> str:
"""Get the JWT."""
payload = {
"user": {
"login": self.username,
"password": self.password,
},
}
res = requests.post(self.signin_url, json=payload).json()
return res["jwt"]
def iter_records(self) -> Iterable[Mapping[str, Any]]:
"""Iterate over all FAIRsharing records."""
yield from self._iter_records_helper(self.records_url)
def _preprocess_record(
self, record: MutableMapping[str, Any]
) -> Optional[MutableMapping[str, Any]]:
if "type" in record:
del record["type"]
record = {"id": record["id"], **record["attributes"]}
doi = record.get("doi")
if doi is None:
# Records without a DOI can't be resolved
url = record["url"]
if not url.startswith("https://fairsharing.org/fairsharing_records/"):
tqdm.write(f"{record['id']} has no DOI: {record['url']}")
return None
elif doi.startswith("10.25504/"):
record["prefix"] = record.pop("doi")[len("10.25504/") :]
else:
tqdm.write(f"DOI has unexpected prefix: {record['doi']}")
record["description"] = _removeprefix(
record.get("description"), "This FAIRsharing record describes: "
)
record["name"] = _removeprefix(record.get("name"), "FAIRsharing record for: ")
for key in REDUNDANT_FIELDS:
if key in record:
del record[key]
return record
def _iter_records_helper(self, url: str) -> Iterable[Mapping[str, Any]]:
res = self.session.get(url).json()
for record in res["data"]:
yv = self._preprocess_record(record)
if yv:
yield yv
next_url = res["links"].get("next")
if next_url:
yield from self._iter_records_helper(next_url)
def _removeprefix(s: Optional[str], prefix) -> Optional[str]:
if s is None:
return None
if s.startswith(prefix):
return s[len(prefix) :]
return s
if __name__ == "__main__":
ensure_fairsharing(force_download=True)
|
"""common logic for all queries"""
import json
from functools import partial, singledispatch
from operator import itemgetter
import snug
from gentools import (compose, map_yield, map_send, oneyield, reusable,
map_return)
from .load import registry
API_URL = 'https://slack.com/api/'
class ApiError(Exception):
pass
def _parse_content(response):
"""parse the response body as JSON, raise on errors"""
if response.status_code != 200:
raise ApiError(f'unknown error: {response.content.decode()}')
result = json.loads(response.content)
if not result['ok']:
raise ApiError(f'{result['error']}: {result.get('detail')}')
return result
basic_interaction = compose(map_yield(snug.prefix_adder(API_URL)),
map_send(_parse_content))
"""basic request/response parsing"""
@singledispatch
def _dump_queryparam_value(val):
return str(val)
@_dump_queryparam_value.register(bool)
def _dump_bool_value(val):
return 'true' if val else 'false'
def _dump_params(params):
return {k: _dump_queryparam_value(v) for k, v in params.items()
if v is not None}
def paginated_retrieval(methodname, itemtype):
"""decorator factory for retrieval queries from query params"""
return compose(
reusable,
basic_interaction,
map_yield(partial(_params_as_get, methodname)),
)
def _params_as_get(methodname: str, params: dict) -> snug.Request:
return snug.GET(methodname, params=_dump_params(params))
def json_post(methodname, rtype, key):
"""decorator factory for json POST queries"""
return compose(
reusable,
map_return(registry(rtype), itemgetter(key)),
basic_interaction,
map_yield(partial(_json_as_post, methodname)),
oneyield,
)
def _json_as_post(methodname: str, body: dict) -> snug.Request:
return snug.POST(methodname,
json.dumps({k: v for k, v in body.items()
if v is not None}),
headers={'Content-Type': 'application/json'})
| """common logic for all queries"""
import json
from functools import partial, singledispatch
from operator import itemgetter
import snug
from gentools import (compose, map_yield, map_send, oneyield, reusable,
map_return)
from .load import registry
API_URL = 'https://slack.com/api/'
class ApiError(Exception):
pass
def _parse_content(response):
"""parse the response body as JSON, raise on errors"""
if response.status_code != 200:
raise ApiError(f'unknown error: {response.content.decode()}')
result = json.loads(response.content)
if not result['ok']:
raise ApiError(f'{result["error"]}: {result.get("detail")}')
return result
basic_interaction = compose(map_yield(snug.prefix_adder(API_URL)),
map_send(_parse_content))
"""basic request/response parsing"""
@singledispatch
def _dump_queryparam_value(val):
return str(val)
@_dump_queryparam_value.register(bool)
def _dump_bool_value(val):
return 'true' if val else 'false'
def _dump_params(params):
return {k: _dump_queryparam_value(v) for k, v in params.items()
if v is not None}
def paginated_retrieval(methodname, itemtype):
"""decorator factory for retrieval queries from query params"""
return compose(
reusable,
basic_interaction,
map_yield(partial(_params_as_get, methodname)),
)
def _params_as_get(methodname: str, params: dict) -> snug.Request:
return snug.GET(methodname, params=_dump_params(params))
def json_post(methodname, rtype, key):
"""decorator factory for json POST queries"""
return compose(
reusable,
map_return(registry(rtype), itemgetter(key)),
basic_interaction,
map_yield(partial(_json_as_post, methodname)),
oneyield,
)
def _json_as_post(methodname: str, body: dict) -> snug.Request:
return snug.POST(methodname,
json.dumps({k: v for k, v in body.items()
if v is not None}),
headers={'Content-Type': 'application/json'})
|
"""Provide useful functions for using PTLFlow."""
# =============================================================================
# Copyright 2021 Henrique Morimitsu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
__version__ = '0.2.5'
import logging
from argparse import Namespace
from pathlib import Path
from typing import List, Optional
import requests
import torch
from torch import hub
from ptlflow.models.base_model.base_model import BaseModel
from ptlflow.models.dicl.dicl import DICL
from ptlflow.models.fastflownet.fastflownet import FastFlowNet
from ptlflow.models.flownet.flownet2 import FlowNet2
from ptlflow.models.flownet.flownetc import FlowNetC
from ptlflow.models.flownet.flownetcs import FlowNetCS
from ptlflow.models.flownet.flownetcss import FlowNetCSS
from ptlflow.models.flownet.flownets import FlowNetS
from ptlflow.models.flownet.flownetsd import FlowNetSD
from ptlflow.models.gma.gma import GMA
from ptlflow.models.hd3.hd3 import HD3, HD3Context
from ptlflow.models.irr.pwcnet import IRRPWCNet
from ptlflow.models.irr.pwcnet_irr import IRRPWCNetIRR
from ptlflow.models.irr.irr_pwc import IRRPWC
from ptlflow.models.lcv.lcv_raft import LCV_RAFT, LCV_RAFTSmall
from ptlflow.models.liteflownet.liteflownet import LiteFlowNet
from ptlflow.models.liteflownet.liteflownet3 import (
LiteFlowNet3, LiteFlowNet3PseudoReg, LiteFlowNet3S, LiteFlowNet3SPseudoReg)
from ptlflow.models.liteflownet.liteflownet2 import LiteFlowNet2, LiteFlowNet2PseudoReg
from ptlflow.models.maskflownet.maskflownet import MaskFlownet, MaskFlownet_S
from ptlflow.models.pwcnet.pwcnet import PWCNet, PWCDCNet
from ptlflow.models.raft.raft import RAFT, RAFTSmall
from ptlflow.models.scopeflow.irr_pwc_v2 import ScopeFlow
from ptlflow.models.starflow.starflow import StarFlow
from ptlflow.models.vcn.vcn import VCN, VCNSmall
from ptlflow.utils.utils import config_logging
try:
from ptlflow.models.scv.scv import SCVEighth, SCVQuarter
except ImportError as e:
print(e)
SCVEighth = None
SCVQuarter = None
config_logging()
models_dict = {
'dicl': DICL,
'fastflownet': FastFlowNet,
'flownet2': FlowNet2,
'flownetc': FlowNetC,
'flownetcs': FlowNetCS,
'flownetcss': FlowNetCSS,
'flownets': FlowNetS,
'flownetsd': FlowNetSD,
'gma': GMA,
'hd3': HD3,
'hd3_ctxt': HD3Context,
'irr_pwc': IRRPWC,
'irr_pwcnet': IRRPWCNet,
'irr_pwcnet_irr': IRRPWCNetIRR,
'lcv_raft': LCV_RAFT,
'lcv_raft_small': LCV_RAFTSmall,
'liteflownet': LiteFlowNet,
'liteflownet2': LiteFlowNet2,
'liteflownet2_pseudoreg': LiteFlowNet2PseudoReg,
'liteflownet3': LiteFlowNet3,
'liteflownet3_pseudoreg': LiteFlowNet3PseudoReg,
'liteflownet3s': LiteFlowNet3S,
'liteflownet3s_pseudoreg': LiteFlowNet3SPseudoReg,
'maskflownet': MaskFlownet,
'maskflownet_s': MaskFlownet_S,
'pwcnet': PWCNet,
'pwcdcnet': PWCDCNet,
'raft': RAFT,
'raft_small': RAFTSmall,
'scopeflow': ScopeFlow,
'scv4': SCVQuarter,
'scv8': SCVEighth,
'starflow': StarFlow,
'vcn': VCN,
'vcn_small': VCNSmall,
}
def download_scripts(
destination_dir: Path = Path('ptlflow_scripts')
) -> None:
"""Download the main scripts and configs to start working with PTLFlow."""
github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/'
script_names = [
'datasets.yml',
'infer.py',
'test.py',
'train.py',
'validate.py'
]
destination_dir.mkdir(parents=True, exist_ok=True)
for sname in script_names:
script_url = github_url + sname
data = requests.get(script_url)
if data.status_code == 200:
with open(destination_dir / sname, 'wb') as f:
f.write(data.content)
else:
logging.warning('Script %s was not found.', script_url)
logging.info('Downloaded scripts to %s.', str(destination_dir))
def get_model(
model_name: str,
pretrained_ckpt: Optional[str] = None,
args: Optional[Namespace] = None
) -> BaseModel:
"""Return an instance of a chosen model.
The instance can have configured by he arguments, and load some existing pretrained weights.
Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,
returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to
"return get_model_reference()()", which looks confusing. This can be rewritten as
"model_ref = get_model_reference(); return model_ref()".
Parameters
----------
model_name : str
Name of the model to get an instance of.
pretrained_ckpt : Optional[str], optional
Name of the pretrained weight to load or a path to a local checkpoint file.
args : Optional[Namespace], optional
Some arguments that ill be provided to the model.
Returns
-------
BaseModel
The instance of the chosen model.
Raises
------
ValueError
If the given checkpoint name is not a valid choice.
ValueError
If a checkpoint name is given, but the model does not have any pretrained weights available.
See Also
--------
get_model_reference : To get a reference to the class of a model.
"""
model_ref = get_model_reference(model_name)
if args is None:
parser = model_ref.add_model_specific_args()
args = parser.parse_args([])
model = model_ref(args)
if pretrained_ckpt is None and args is not None and args.pretrained_ckpt is not None:
pretrained_ckpt = args.pretrained_ckpt
if pretrained_ckpt is not None:
if Path(pretrained_ckpt).exists():
ckpt_path = pretrained_ckpt
elif hasattr(model_ref, 'pretrained_checkpoints'):
ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt)
if ckpt_path is None:
raise ValueError(
f'Invalid checkpoint name {pretrained_ckpt}. '
f'Choose one from {{{','.join(model.pretrained_checkpoints.keys())}}}')
else:
raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if Path(ckpt_path).exists():
ckpt = torch.load(ckpt_path, map_location=torch.device(device))
else:
model_dir = Path(hub.get_dir()) / 'ptlflow' / 'checkpoints'
ckpt = hub.load_state_dict_from_url(
ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True)
state_dict = ckpt['state_dict']
model.load_state_dict(state_dict)
return model
def get_model_reference(
model_name: str
) -> BaseModel:
"""Return a reference to the class of a chosen model.
Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this
function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as
"model_ref = get_model_reference(); model_instance = model_ref()".
Parameters
----------
model_name : str
Name of the model to get a reference of.
Returns
-------
BaseModel
A reference to the chosen model.
Raises
------
ValueError
If the given name is not a valid choice.
See Also
--------
get_model : To get an instance of a model.
"""
try:
return models_dict[model_name]
except KeyError:
raise ValueError(f'Unknown model name: {model_name}. Choose from [{', '.join(models_dict.keys())}]')
def get_trainable_model_names() -> List[str]:
"""Return a list of model names that are able to be trained.
This function return the names of the model that have a loss function defined.
Returns
=======
List[str]
The list of the model names that can be trained.
"""
return [mname for mname in models_dict.keys() if get_model(mname).loss_fn is not None]
| """Provide useful functions for using PTLFlow."""
# =============================================================================
# Copyright 2021 Henrique Morimitsu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
__version__ = '0.2.5'
import logging
from argparse import Namespace
from pathlib import Path
from typing import List, Optional
import requests
import torch
from torch import hub
from ptlflow.models.base_model.base_model import BaseModel
from ptlflow.models.dicl.dicl import DICL
from ptlflow.models.fastflownet.fastflownet import FastFlowNet
from ptlflow.models.flownet.flownet2 import FlowNet2
from ptlflow.models.flownet.flownetc import FlowNetC
from ptlflow.models.flownet.flownetcs import FlowNetCS
from ptlflow.models.flownet.flownetcss import FlowNetCSS
from ptlflow.models.flownet.flownets import FlowNetS
from ptlflow.models.flownet.flownetsd import FlowNetSD
from ptlflow.models.gma.gma import GMA
from ptlflow.models.hd3.hd3 import HD3, HD3Context
from ptlflow.models.irr.pwcnet import IRRPWCNet
from ptlflow.models.irr.pwcnet_irr import IRRPWCNetIRR
from ptlflow.models.irr.irr_pwc import IRRPWC
from ptlflow.models.lcv.lcv_raft import LCV_RAFT, LCV_RAFTSmall
from ptlflow.models.liteflownet.liteflownet import LiteFlowNet
from ptlflow.models.liteflownet.liteflownet3 import (
LiteFlowNet3, LiteFlowNet3PseudoReg, LiteFlowNet3S, LiteFlowNet3SPseudoReg)
from ptlflow.models.liteflownet.liteflownet2 import LiteFlowNet2, LiteFlowNet2PseudoReg
from ptlflow.models.maskflownet.maskflownet import MaskFlownet, MaskFlownet_S
from ptlflow.models.pwcnet.pwcnet import PWCNet, PWCDCNet
from ptlflow.models.raft.raft import RAFT, RAFTSmall
from ptlflow.models.scopeflow.irr_pwc_v2 import ScopeFlow
from ptlflow.models.starflow.starflow import StarFlow
from ptlflow.models.vcn.vcn import VCN, VCNSmall
from ptlflow.utils.utils import config_logging
try:
from ptlflow.models.scv.scv import SCVEighth, SCVQuarter
except ImportError as e:
print(e)
SCVEighth = None
SCVQuarter = None
config_logging()
models_dict = {
'dicl': DICL,
'fastflownet': FastFlowNet,
'flownet2': FlowNet2,
'flownetc': FlowNetC,
'flownetcs': FlowNetCS,
'flownetcss': FlowNetCSS,
'flownets': FlowNetS,
'flownetsd': FlowNetSD,
'gma': GMA,
'hd3': HD3,
'hd3_ctxt': HD3Context,
'irr_pwc': IRRPWC,
'irr_pwcnet': IRRPWCNet,
'irr_pwcnet_irr': IRRPWCNetIRR,
'lcv_raft': LCV_RAFT,
'lcv_raft_small': LCV_RAFTSmall,
'liteflownet': LiteFlowNet,
'liteflownet2': LiteFlowNet2,
'liteflownet2_pseudoreg': LiteFlowNet2PseudoReg,
'liteflownet3': LiteFlowNet3,
'liteflownet3_pseudoreg': LiteFlowNet3PseudoReg,
'liteflownet3s': LiteFlowNet3S,
'liteflownet3s_pseudoreg': LiteFlowNet3SPseudoReg,
'maskflownet': MaskFlownet,
'maskflownet_s': MaskFlownet_S,
'pwcnet': PWCNet,
'pwcdcnet': PWCDCNet,
'raft': RAFT,
'raft_small': RAFTSmall,
'scopeflow': ScopeFlow,
'scv4': SCVQuarter,
'scv8': SCVEighth,
'starflow': StarFlow,
'vcn': VCN,
'vcn_small': VCNSmall,
}
def download_scripts(
destination_dir: Path = Path('ptlflow_scripts')
) -> None:
"""Download the main scripts and configs to start working with PTLFlow."""
github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/'
script_names = [
'datasets.yml',
'infer.py',
'test.py',
'train.py',
'validate.py'
]
destination_dir.mkdir(parents=True, exist_ok=True)
for sname in script_names:
script_url = github_url + sname
data = requests.get(script_url)
if data.status_code == 200:
with open(destination_dir / sname, 'wb') as f:
f.write(data.content)
else:
logging.warning('Script %s was not found.', script_url)
logging.info('Downloaded scripts to %s.', str(destination_dir))
def get_model(
model_name: str,
pretrained_ckpt: Optional[str] = None,
args: Optional[Namespace] = None
) -> BaseModel:
"""Return an instance of a chosen model.
The instance can have configured by he arguments, and load some existing pretrained weights.
Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,
returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to
"return get_model_reference()()", which looks confusing. This can be rewritten as
"model_ref = get_model_reference(); return model_ref()".
Parameters
----------
model_name : str
Name of the model to get an instance of.
pretrained_ckpt : Optional[str], optional
Name of the pretrained weight to load or a path to a local checkpoint file.
args : Optional[Namespace], optional
Some arguments that ill be provided to the model.
Returns
-------
BaseModel
The instance of the chosen model.
Raises
------
ValueError
If the given checkpoint name is not a valid choice.
ValueError
If a checkpoint name is given, but the model does not have any pretrained weights available.
See Also
--------
get_model_reference : To get a reference to the class of a model.
"""
model_ref = get_model_reference(model_name)
if args is None:
parser = model_ref.add_model_specific_args()
args = parser.parse_args([])
model = model_ref(args)
if pretrained_ckpt is None and args is not None and args.pretrained_ckpt is not None:
pretrained_ckpt = args.pretrained_ckpt
if pretrained_ckpt is not None:
if Path(pretrained_ckpt).exists():
ckpt_path = pretrained_ckpt
elif hasattr(model_ref, 'pretrained_checkpoints'):
ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt)
if ckpt_path is None:
raise ValueError(
f'Invalid checkpoint name {pretrained_ckpt}. '
f'Choose one from {{{",".join(model.pretrained_checkpoints.keys())}}}')
else:
raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if Path(ckpt_path).exists():
ckpt = torch.load(ckpt_path, map_location=torch.device(device))
else:
model_dir = Path(hub.get_dir()) / 'ptlflow' / 'checkpoints'
ckpt = hub.load_state_dict_from_url(
ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True)
state_dict = ckpt['state_dict']
model.load_state_dict(state_dict)
return model
def get_model_reference(
model_name: str
) -> BaseModel:
"""Return a reference to the class of a chosen model.
Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this
function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as
"model_ref = get_model_reference(); model_instance = model_ref()".
Parameters
----------
model_name : str
Name of the model to get a reference of.
Returns
-------
BaseModel
A reference to the chosen model.
Raises
------
ValueError
If the given name is not a valid choice.
See Also
--------
get_model : To get an instance of a model.
"""
try:
return models_dict[model_name]
except KeyError:
raise ValueError(f'Unknown model name: {model_name}. Choose from [{", ".join(models_dict.keys())}]')
def get_trainable_model_names() -> List[str]:
"""Return a list of model names that are able to be trained.
This function return the names of the model that have a loss function defined.
Returns
=======
List[str]
The list of the model names that can be trained.
"""
return [mname for mname in models_dict.keys() if get_model(mname).loss_fn is not None]
|
#!/usr/bin/env python
# coding: utf-8
import logging.config
import os
# Конфигурация базы данных
DB_CONFIG = {
'username': 'root',
'password': os.environ.get('MYSQL_TRADING_PASS'),
'host': '127.0.0.1',
'dbname': 'trading_db',
}
# Конфигурация журналирования
LOGGING = {
'version': 1,
'formatters': { # Форматирование сообщения
'main': {
'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': { # Обработчикаи сообщений
'file_handler': {
'class': 'logging.FileHandler',
'filename': '/tmp/trading.log',
'formatter': 'main',
},
'streamlogger': {
'class': 'logging.StreamHandler',
'formatter': 'main',
},
},
'loggers': { # Логгеры
'prod_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'INFO',
},
'devel_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'DEBUG',
},
},
}
logging.config.dictConfig(LOGGING)
# Базовая конфигурация
class Config(object):
DEBUG = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG["username"]}:{DB_CONFIG["password"]}" \
f"@{DB_CONFIG["host"]}/{DB_CONFIG["dbname"]}?charset=utf8"
SQLALCHEMY_TRACK_MODIFICATIONS = False
LOGGER_NAME = 'devel_logger'
MAIL_SERVER = 'smtp.yandex.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_USERNAME')
CELERY_BROKER_URL = 'redis://0.0.0.0:6379/'
CELERY_RESULT_BACKEND = 'redis://0.0.0.0:6379/'
CELERY_DEFAULT_QUEUE = 'request_handler_queue'
# Конфигурация выпуска
class ProductionConfig(Config):
DEBUG = False
LOGGER_NAME = 'prod_logger'
# Конфигурация разработки
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
LOGGER_NAME = 'devel_logger'
# Конфигурация тестирования
class TestConfig(Config):
DEBUG = True
TESTING = True
WTF_CSRF_ENABLED = False
LOGGER_NAME = 'devel_logger'
test_db_name = "test_trading_db"
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG["username"]}:{DB_CONFIG["password"]}" \
f"@{DB_CONFIG["host"]}/{test_db_name}?charset=utf8"
# Текущая конфигурация
# --------------------------------------------------
_currentConfig = DevelopmentConfig
def getConfig():
return _currentConfig
def setConfig(config):
global _currentConfig
_currentConfig = config
# --------------------------------------------------
# Размер буффера данных, загружаемых в базу
chunkSize = 30000
| #!/usr/bin/env python
# coding: utf-8
import logging.config
import os
# Конфигурация базы данных
DB_CONFIG = {
'username': 'root',
'password': os.environ.get('MYSQL_TRADING_PASS'),
'host': '127.0.0.1',
'dbname': 'trading_db',
}
# Конфигурация журналирования
LOGGING = {
'version': 1,
'formatters': { # Форматирование сообщения
'main': {
'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': { # Обработчикаи сообщений
'file_handler': {
'class': 'logging.FileHandler',
'filename': '/tmp/trading.log',
'formatter': 'main',
},
'streamlogger': {
'class': 'logging.StreamHandler',
'formatter': 'main',
},
},
'loggers': { # Логгеры
'prod_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'INFO',
},
'devel_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'DEBUG',
},
},
}
logging.config.dictConfig(LOGGING)
# Базовая конфигурация
class Config(object):
DEBUG = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{DB_CONFIG['dbname']}?charset=utf8"
SQLALCHEMY_TRACK_MODIFICATIONS = False
LOGGER_NAME = 'devel_logger'
MAIL_SERVER = 'smtp.yandex.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_USERNAME')
CELERY_BROKER_URL = 'redis://0.0.0.0:6379/'
CELERY_RESULT_BACKEND = 'redis://0.0.0.0:6379/'
CELERY_DEFAULT_QUEUE = 'request_handler_queue'
# Конфигурация выпуска
class ProductionConfig(Config):
DEBUG = False
LOGGER_NAME = 'prod_logger'
# Конфигурация разработки
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
LOGGER_NAME = 'devel_logger'
# Конфигурация тестирования
class TestConfig(Config):
DEBUG = True
TESTING = True
WTF_CSRF_ENABLED = False
LOGGER_NAME = 'devel_logger'
test_db_name = "test_trading_db"
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{test_db_name}?charset=utf8"
# Текущая конфигурация
# --------------------------------------------------
_currentConfig = DevelopmentConfig
def getConfig():
return _currentConfig
def setConfig(config):
global _currentConfig
_currentConfig = config
# --------------------------------------------------
# Размер буффера данных, загружаемых в базу
chunkSize = 30000
|
# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# Reference: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Double DQN Agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
class QValueVisualizationWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env.reset()
self.image_shape = self.env.render(mode="rgb_array").shape
self.q_values = [[0.,0.,0.,0.]]
# self.metadata['video.frames_per_second'] = 60
def set_q_values(self, q_values):
self.q_values = q_values
def render(self, mode="human"):
if mode=="rgb_array":
env_rgb_array = super().render(mode)
fig, ax = plt.subplots(figsize=(self.image_shape[1]/100,self.image_shape[0]/100), constrained_layout=True, dpi=100)
df = pd.DataFrame(np.array(self.q_values).T)
sns.barplot(x=df.index, y=0, data=df, ax=ax)
ax.set(xlabel='actions', ylabel='q-values')
fig.canvas.draw()
X = np.array(fig.canvas.renderer.buffer_rgba())
Image.fromarray(X)
# Image.fromarray(X)
rgb_image = np.array(Image.fromarray(X).convert('RGB'))
plt.close(fig)
q_value_rgb_array = rgb_image
return np.append(env_rgb_array, q_value_rgb_array, axis=1)
else:
super().render(mode)
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
if args.capture_video:
env = QValueVisualizationWrapper(env)
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
# respect the default timelimit
assert isinstance(env.action_space, Discrete), "only discrete action space is supported"
# modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py#
class ReplayBuffer():
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask_lst.append(done_mask)
return np.array(s_lst), np.array(a_lst), \
np.array(r_lst), np.array(s_prime_lst), \
np.array(done_mask_lst)
# ALGO LOGIC: initialize agent here:
# tricks taken from https://github.com/cpnota/autonomous-learning-library/blob/6d1111afce0d1582de463326f7d078a86e850551/all/presets/atari/models/__init__.py#L16
# apparently matters
class Linear0(nn.Linear):
def reset_parameters(self):
nn.init.constant_(self.weight, 0.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
Linear0(512, env.action_space.n)
)
def forward(self, x):
x = torch.Tensor(x).to(device)
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = ReplayBuffer(args.buffer_size)
q_network = QNetwork().to(device)
target_network = QNetwork().to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
# TRY NOT TO MODIFY: start the game
obs = env.reset()
episode_reward = 0
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
obs = np.array(obs)
logits = q_network.forward(obs.reshape((1,)+obs.shape))
if args.capture_video:
env.set_q_values(logits.tolist())
if random.random() < epsilon:
action = env.action_space.sample()
else:
action = torch.argmax(logits, dim=1).tolist()[0]
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, reward, done, info = env.step(action)
episode_reward += reward
# TRY NOT TO MODIFY: record rewards for plotting purposes
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info["episode"]["r"]}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
# ALGO LOGIC: training.
rb.put((obs, action, reward, next_obs, done))
if global_step > args.learning_starts and global_step % args.train_frequency == 0:
s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)
with torch.no_grad():
# target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]
current_value = q_network.forward(s_next_obses)
target_value = target_network.forward(s_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.Tensor(s_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(s_dones).to(device))
old_val = q_network.forward(s_obs).gather(1, torch.LongTensor(s_actions).view(-1,1).to(device)).squeeze()
loss = loss_fn(td_target, old_val)
writer.add_scalar("losses/td_loss", loss, global_step)
# optimize the midel
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
# update the target network
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
if done:
# important to note that because `EpisodicLifeEnv` wrapper is applied,
# the real episode reward is actually the sum of episode reward of 5 lives
# which we record through `info['episode']['r']` provided by gym.wrappers.RecordEpisodeStatistics
obs, episode_reward = env.reset(), 0
env.close()
writer.close()
| # https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# Reference: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Double DQN Agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
class QValueVisualizationWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env.reset()
self.image_shape = self.env.render(mode="rgb_array").shape
self.q_values = [[0.,0.,0.,0.]]
# self.metadata['video.frames_per_second'] = 60
def set_q_values(self, q_values):
self.q_values = q_values
def render(self, mode="human"):
if mode=="rgb_array":
env_rgb_array = super().render(mode)
fig, ax = plt.subplots(figsize=(self.image_shape[1]/100,self.image_shape[0]/100), constrained_layout=True, dpi=100)
df = pd.DataFrame(np.array(self.q_values).T)
sns.barplot(x=df.index, y=0, data=df, ax=ax)
ax.set(xlabel='actions', ylabel='q-values')
fig.canvas.draw()
X = np.array(fig.canvas.renderer.buffer_rgba())
Image.fromarray(X)
# Image.fromarray(X)
rgb_image = np.array(Image.fromarray(X).convert('RGB'))
plt.close(fig)
q_value_rgb_array = rgb_image
return np.append(env_rgb_array, q_value_rgb_array, axis=1)
else:
super().render(mode)
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
if args.capture_video:
env = QValueVisualizationWrapper(env)
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
# respect the default timelimit
assert isinstance(env.action_space, Discrete), "only discrete action space is supported"
# modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py#
class ReplayBuffer():
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask_lst.append(done_mask)
return np.array(s_lst), np.array(a_lst), \
np.array(r_lst), np.array(s_prime_lst), \
np.array(done_mask_lst)
# ALGO LOGIC: initialize agent here:
# tricks taken from https://github.com/cpnota/autonomous-learning-library/blob/6d1111afce0d1582de463326f7d078a86e850551/all/presets/atari/models/__init__.py#L16
# apparently matters
class Linear0(nn.Linear):
def reset_parameters(self):
nn.init.constant_(self.weight, 0.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
Linear0(512, env.action_space.n)
)
def forward(self, x):
x = torch.Tensor(x).to(device)
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = ReplayBuffer(args.buffer_size)
q_network = QNetwork().to(device)
target_network = QNetwork().to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
# TRY NOT TO MODIFY: start the game
obs = env.reset()
episode_reward = 0
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
obs = np.array(obs)
logits = q_network.forward(obs.reshape((1,)+obs.shape))
if args.capture_video:
env.set_q_values(logits.tolist())
if random.random() < epsilon:
action = env.action_space.sample()
else:
action = torch.argmax(logits, dim=1).tolist()[0]
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, reward, done, info = env.step(action)
episode_reward += reward
# TRY NOT TO MODIFY: record rewards for plotting purposes
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
# ALGO LOGIC: training.
rb.put((obs, action, reward, next_obs, done))
if global_step > args.learning_starts and global_step % args.train_frequency == 0:
s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)
with torch.no_grad():
# target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]
current_value = q_network.forward(s_next_obses)
target_value = target_network.forward(s_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.Tensor(s_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(s_dones).to(device))
old_val = q_network.forward(s_obs).gather(1, torch.LongTensor(s_actions).view(-1,1).to(device)).squeeze()
loss = loss_fn(td_target, old_val)
writer.add_scalar("losses/td_loss", loss, global_step)
# optimize the midel
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
# update the target network
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
if done:
# important to note that because `EpisodicLifeEnv` wrapper is applied,
# the real episode reward is actually the sum of episode reward of 5 lives
# which we record through `info['episode']['r']` provided by gym.wrappers.RecordEpisodeStatistics
obs, episode_reward = env.reset(), 0
env.close()
writer.close()
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module for having some fun with people. """
import os
import urllib
import requests
from re import sub
from cowpy import cow
from asyncio import sleep
from collections import deque
from random import choice, getrandbits, randint
from userbot import bot, CMD_HELP
from userbot.events import register
from userbot.modules.admin import get_user_from_event
# ================= CONSTANT =================
METOOSTR = [
"Aku Juga Terimakasih",
"Haha Iya, Aku Juga",
"Sama Haha",
"Aku Juga Gabut",
"Sama Sini",
"Haha Iya",
"Aku Juga",
]
ZALG_LIST = [[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
EMOJIS = [
"😂",
"😂",
"👌",
"✌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"🍴",
"💦",
"💦",
"🍑",
"🍆",
"😩",
"😏",
"👉👌",
"👀",
"👅",
"😩",
"🚰",
]
INSULT_STRINGS = [
"Jangan minum dan mengetik.",
"Saya pikir Anda harus pulang atau lebih baik ke rumah sakit jiwa.",
"Perintah tidak ditemukan. Sama seperti otak Anda.",
"Apakah kamu sadar bahwa kamu membodohi dirimu sendiri? Ternyata tidak.",
"Anda bisa mengetik lebih baik dari itu.",
"Bot aturan 544 bagian 9 mencegah saya membalas orang bodoh seperti Anda.",
"Maaf, kami tidak menjual otak.",
"Percayalah kamu tidak normal.",
"Saya yakin otak Anda terasa seperti baru, mengingat Anda tidak pernah menggunakannya.",
"Jika saya ingin bunuh diri, saya akan meningkatkan ego Anda dan melompat ke IQ Anda.",
"Zombie memakan otak ... kamu aman.",
"Anda tidak berevolusi dari kera, mereka berevolusi dari Anda.",
"Kembalilah dan bicara padaku ketika IQ mu melebihi umurmu.",
"Saya tidak mengatakan Anda bodoh, saya hanya mengatakan bahwa Anda tidak beruntung dalam hal berpikir.",
"Kamu berbicara bahasa apa? Karena terdengar seperti omong kosong.",
"Kebodohan bukanlah kejahatan jadi kamu bebas pergi.",
"Anda adalah bukti bahwa evolusi BISA mundur.",
"Aku akan bertanya berapa umurmu tapi aku tahu kamu tidak bisa menghitung setinggi itu.",
"Sebagai orang luar, apa pendapat Anda tentang umat manusia?",
"Otak bukanlah segalanya. Dalam kasusmu mereka bukan apa-apa.",
"Biasanya orang hidup dan belajar. Kamu hidup saja.",
"Aku tidak tahu apa yang membuatmu begitu bodoh, tapi itu benar-benar berhasil.",
"Teruslah berbicara, suatu hari nanti kamu akan mengatakan sesuatu yang cerdas! (Meskipun aku ragu)"
"Shock saya, katakan sesuatu yang cerdas.",
"IQ Anda lebih rendah dari ukuran sepatu Anda.",
"Aduh! Neurotransmiter Anda tidak lagi bekerja.",
"Apakah kamu gila kamu bodoh.",
"Setiap orang berhak untuk menjadi bodoh tetapi Anda menyalahgunakan hak istimewa tersebut.",
"Maaf aku menyakiti perasaanmu saat menyebutmu bodoh. Kupikir kamu sudah tahu itu.",
"Anda harus mencoba mencicipi sianida.",
"Enzim Anda dimaksudkan untuk mencerna racun tikus.",
"Kamu harus mencoba tidur selamanya.",
"Ambil pistol dan tembak dirimu sendiri.",
"Anda bisa membuat rekor dunia dengan melompat dari pesawat tanpa parasut.",
"Berhenti berbicara BS dan melompat di depan kereta peluru yang sedang berjalan.",
"Cobalah mandi dengan Hydrochloric Acid daripada air.",
"Coba ini: jika Anda menahan napas di bawah air selama satu jam, Anda dapat menahannya selamanya.",
"Go Green! Berhenti menghirup Oksigen.",
"Tuhan sedang mencarimu. Kamu harus pergi untuk bertemu dengannya.",
"berikan 100% mu. Sekarang, pergi donor darah.",
"Cobalah melompat dari gedung seratus lantai tetapi Anda hanya dapat melakukannya sekali.",
"Anda harus menyumbangkan otak Anda melihat bahwa Anda tidak pernah menggunakannya.",
"Relawan untuk target dalam jarak tembak.",
"Tembak kepala itu menyenangkan. Dapatkan dirimu sendiri.",
"Anda harus mencoba berenang dengan hiu putih besar.",
"Anda harus mengecat diri Anda dengan warna merah dan berlari dalam bull marathon.",
"Anda bisa tetap di bawah air selama sisa hidup Anda tanpa harus kembali lagi.",
"Bagaimana kalau kamu berhenti bernapas selama 1 hari? Itu akan bagus.",
"Cobalah memprovokasi harimau saat kalian berdua berada di dalam sangkar.",
"Sudahkah Anda mencoba menembak diri Anda sendiri setinggi 100m menggunakan kanon.",
"Anda harus mencoba menahan TNT di mulut Anda dan menyalakannya.",
"Cobalah bermain menangkap dan melempar dengan RDX itu menyenangkan.",
"Saya dengar phogine beracun tapi saya rasa Anda tidak keberatan menghirupnya untuk bersenang-senang.",
"Luncurkan diri Anda ke luar angkasa sambil melupakan oksigen di Bumi.",
"Kamu harus mencoba bermain ular tangga, dengan ular sungguhan dan tanpa tangga.",
"Menari telanjang di beberapa kabel HT.",
"Gunung Berapi Aktif adalah kolam renang terbaik untuk Anda.",
"Anda harus mencoba mandi air panas di gunung berapi.",
"Cobalah untuk menghabiskan satu hari di peti mati dan itu akan menjadi milikmu selamanya.",
"Pukul Uranium dengan neutron yang bergerak lambat di hadapanmu. Ini akan menjadi pengalaman yang berharga.",
"Anda bisa menjadi orang pertama yang menginjak matahari. Selamat mencoba.",
]
UWUS = [
"(・`ω´・)",
";;w;;",
"owo",
"UwU",
">w<",
"^w^",
r"\(^o\) (/o^)/",
"( ^ _ ^)∠☆",
"(ô_ô)",
"~:o",
";-;",
"(*^*)",
"(>_",
"(♥_♥)",
"*(^O^)*",
"((+_+))",
]
IWIS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
FACEREACTS = [
"ʘ‿ʘ",
"ヾ(-_- )ゞ",
"(っ˘ڡ˘ς)",
"(´ж`ς)",
"( ಠ ʖ̯ ಠ)",
"(° ͜ʖ͡°)╭∩╮",
"(ᵟຶ︵ ᵟຶ)",
"(งツ)ว",
"ʚ(•`",
"(っ▀¯▀)つ",
"(◠﹏◠)",
"( ͡ಠ ʖ̯ ͡ಠ)",
"( ఠ ͟ʖ ఠ)",
"(∩`-´)⊃━☆゚.*・。゚",
"(⊃。•́‿•̀。)⊃",
"(._.)",
"{•̃_•̃}",
"(ᵔᴥᵔ)",
"♨_♨",
"⥀.⥀",
"ح˚௰˚づ ",
"(҂◡_◡)",
"ƪ(ړײ)ƪ",
"(っ•́。•́)♪♬",
"◖ᵔᴥᵔ◗ ♪ ♫ ",
"(☞゚ヮ゚)☞",
"[¬º-°]¬",
"(Ծ‸ Ծ)",
"(•̀ᴗ•́)و ̑̑",
"ヾ(´〇`)ノ♪♪♪",
"(ง'̀-'́)ง",
"ლ(•́•́ლ)",
"ʕ •́؈•̀ ₎",
"♪♪ ヽ(ˇ∀ˇ )ゞ",
"щ(゚Д゚щ)",
"( ˇ෴ˇ )",
"눈_눈",
"(๑•́ ₃ •̀๑) ",
"( ˘ ³˘)♥ ",
"ԅ(≖‿≖ԅ)",
"♥‿♥",
"◔_◔",
"⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾",
"乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍",
"( ఠൠఠ )ノ",
"٩(๏_๏)۶",
"┌(ㆆ㉨ㆆ)ʃ",
"ఠ_ఠ",
"(づ。◕‿‿◕。)づ",
"(ノಠ ∩ಠ)ノ彡( \\o°o)\\",
"“ヽ(´▽`)ノ”",
"༼ ༎ຶ ෴ ༎ຶ༽",
"。゚( ゚இ‸இ゚)゚。",
"(づ ̄ ³ ̄)づ",
"(⊙.☉)7",
"ᕕ( ᐛ )ᕗ",
"t(-_-t)",
"(ಥ⌣ಥ)",
"ヽ༼ ಠ益ಠ ༽ノ",
"༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽",
"ミ●﹏☉ミ",
"(⊙_◎)",
"¿ⓧ_ⓧﮌ",
"ಠ_ಠ",
"(´・_・`)",
"ᕦ(ò_óˇ)ᕤ",
"⊙﹏⊙",
"(╯°□°)╯︵ ┻━┻",
r"¯\_(⊙︿⊙)_/¯",
"٩◔̯◔۶",
"°‿‿°",
"ᕙ(⇀‸↼‶)ᕗ",
"⊂(◉‿◉)つ",
"V•ᴥ•V",
"q(❂‿❂)p",
"ಥ_ಥ",
"ฅ^•ﻌ•^ฅ",
"ಥ﹏ಥ",
"( ^_^)o自自o(^_^ )",
"ಠ‿ಠ",
"ヽ(´▽`)/",
"ᵒᴥᵒ#",
"( ͡° ͜ʖ ͡°)",
"┬─┬ ノ( ゜-゜ノ)",
"ヽ(´ー`)ノ",
"☜(⌒▽⌒)☞",
"ε=ε=ε=┌(;*´Д`)ノ",
"(╬ ಠ益ಠ)",
"┬─┬⃰͡ (ᵔᵕᵔ͜ )",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻",
r"¯\_(ツ)_/¯",
"ʕᵔᴥᵔʔ",
"(`・ω・´)",
"ʕ•ᴥ•ʔ",
"ლ(`ー´ლ)",
"ʕʘ̅͜ʘ̅ʔ",
"( ゚Д゚)",
r"¯\(°_o)/¯",
"(。◕‿◕。)",
]
RUNS_STR = [
"Berlari ke Thanos..",
"Berlari jauh, jauh dari bumi..",
"Berlari lebih cepat dari Bolt karena aku pengguna bot !!",
"Berlari ke Mia Khalifa..",
"Grup ini terlalu berbahaya untuk ditangani, aku harus lari.",
"`Berlari Dari Orang Yang Bau Sawi 😬`",
"Aku sangat lelah untuk berlari dan mengejarmu 💔",
"Aku pergi dulu",
"Saya hanya berjalan pergi, karena saya terlalu gemuk untuk lari.",
"Saya Cape!",
"Larii Disini Bau Sawii 😭",
"Saya lari karena saya sangat gabut.",
"Lari... \nkarena diet bukanlah pilihan.",
"Berlari Cepat Dari Orang Gila",
"Jika kamu ingin menangkapku, kamu harus cepat... \nJika kamu ingin tinggal bersamaku, kamu harus menjadi orang yang baik... \nTapi jika kamu ingin melewati aku... \nKamu pasti bercanda. ",
"Siapapun dapat berlari seratus meter, itu hitungan empat puluh dua ribu dua ratus berikutnya.",
"Mengapa semua orang ini mengikuti saya?",
"Apakah anak-anak masih mengejarku?",
"Berlari Sekencang Super Dede.. Apakah Sopan Begitu?",
]
CHASE_STR = [
"Menurutmu kemana kamu akan pergi?",
"Hah? Apa? Apakah mereka lolos?",
"ZZzzZZzz... Hah? Apa? Oh, hanya mereka lagi, lupakan.",
"Kembali kesini!",
"Tidak terlalu cepat...",
"Awas ke dinding!",
"Jangan tinggalkan aku sendiri dengan mereka !!",
"Kamu lari, kamu mati.",
"Bercanda, aku ada dimana-mana",
"Kamu akan menyesali itu ...",
"Kamu juga bisa mencoba /kickme, kudengar itu menyenangkan.",
"Ganggu orang lain, tidak ada yang peduli.",
"Kamu bisa lari, tapi kamu tidak bisa bersembunyi.",
"Apakah hanya itu yang kamu punya?",
"Saya di belakang Anda...",
"Anda punya teman!",
"Kita bisa melakukan ini dengan cara mudah, atau cara sulit.",
"Anda tidak mengerti, bukan?",
"Ya, sebaiknya kau lari!",
"Tolong, ingatkan saya apakah saya peduli?",
"Aku akan lari lebih cepat jika jadi kamu.",
"Itu pasti droid yang kami cari.",
"Semoga peluang selalu menguntungkan Anda.",
"Kata-kata terakhir yang terkenal.",
"Dan mereka menghilang selamanya, tidak pernah terlihat lagi.",
"Oh, lihat aku! Saya sangat keren, saya bisa lari dari bot orang ini",
"Ya ya, cukup ketuk /kickme.",
"Ini, ambil cincin ini dan pergilah ke Mordor saat kamu melakukannya.",
"Legenda mengatakan, mereka masih berjalan...",
"Tidak seperti Harry Potter, orang tuamu tidak bisa melindungimu dariku.",
"Ketakutan menyebabkan kemarahan. Kemarahan mengarah pada kebencian. Kebencian menyebabkan penderitaan. Jika Anda terus berlari dalam ketakutan, Anda mungkin"
"jadilah Vader berikutnya.",
"Beberapa kalkulasi nanti, saya telah memutuskan minat saya pada kejahatan Anda tepat 0.",
"Legenda mengatakan, mereka masih berjalan.",
"Teruskan, kami tidak yakin kami menginginkanmu di sini.",
"Kamu seorang penyihir- Oh. Tunggu. Kamu bukan Harry, terus bergerak.",
"JANGAN BERLARI DI SINI!",
"Hasta la vista, sayang.",
"Siapa yang membiarkan anjing keluar?",
"Ini lucu, karena tidak ada yang peduli.",
"Ah, sayang sekali, Aku suka yang itu.",
"Terus terang, sayangku, aku tidak peduli.",
"Milkshake saya membawa semua anak laki-laki ke halaman... Jadi lari lebih cepat!",
"Anda tidak bisa MENANGANI kebenaran!",
"Dahulu kala, di galaksi yang sangat jauh... Seseorang akan peduli tentang itu, Tapi sekarang tidak lagi.",
"Hei, lihat mereka! Mereka lari dari palu yang tak terelakkan... Manis.",
"Han menembak lebih dulu, Aku juga.",
"Apa yang kamu kejar, kelinci putih?",
"Seperti yang dikatakan The Doctor... LARI!",
]
HELLOSTR = [
"Hai!",
"'Ello, bro!",
"Apa itu crackin?",
"Apa kabarmu?",
"Halo, apa kabar, apa kabar!",
"Halo, siapa di sana, saya sedang berbicara.",
"Kamu tahu siapa ini.",
"Yo!",
"Wassup.",
"Salam dan salam!",
"Halo, sinar matahari!",
"Hei, apa kabar, hai!",
"Apa yang menendang, ayam kecil?",
"Ciluk ba!",
"Halo-bagus!",
"Halo, mahasiswa baru!",
"Saya datang dengan damai!",
"Ahoy, sobat!",
"Hiya!",
]
SHGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
CRI = [
"أ‿أ",
"╥﹏╥",
"(;﹏;)",
"(ToT)",
"(┳Д┳)",
"(ಥ﹏ಥ)",
"(;へ:)",
"(T_T)",
"(πーπ)",
"(T▽T)",
"(⋟﹏⋞)",
"(iДi)",
"(´Д⊂ヽ",
"(;Д;)",
"(>﹏<)",
"(TдT)",
"(つ﹏⊂)",
"༼☯﹏☯༽",
"(ノ﹏ヽ)",
"(ノAヽ)",
"(╥_╥)",
"(T⌓T)",
"(༎ຶ⌑༎ຶ)",
"(☍﹏⁰)。",
"(ಥ_ʖಥ)",
"(つд⊂)",
"(≖͞_≖̥)",
"(இ﹏இ`。)",
"༼ಢ_ಢ༽",
"༼ ༎ຶ ෴ ༎ຶ༽",
]
SLAP_TEMPLATES_EN = [
"{hits} {victim} dengan {item}.",
"{hits} {victim} di wajah dengan {item}.",
"{hits} {victim} sekitar sedikit dengan {item}.",
"{throws} {item} ke {Victim}.",
"mengambil {item} dan {throws} ke wajah {victim}.",
"Menusuk {victim} dengan tombak cinta.",
"{throws} beberapa {item} ke {victim}.",
"mengambil {item} dan {throws} ke wajah {victim}.",
"meluncurkan {item} ke arah umum {korban}.",
"duduk di wajah {victim} sambil membanting {item}.",
"mulai menampar {victim} dengan konyol dengan {item}.",
"pin {victim} ke bawah dan berulang kali {hits} mereka dengan {item}.",
"mengambil {item} dan {hits} {victim} dengannya.",
"mulai menampar {victim} dengan konyol dengan {item}.",
"menahan {victim} dan berulang kali {hits} mereka dengan {item}.",
"memukul {victim} dengan {item}.",
"mengambil {item} dan {hits} {victim} dengannya.",
"mengikat {victim} ke kursi dan {throws} {item} padanya.",
"{hits} {victim} {where} dengan {item}.",
"mengikat {victim} ke tiang dan mencambuk mereka {where} dengan {item}."
"memberikan dorongan ramah untuk membantu {victim} belajar berenang di lahar.",
"mengirim {victim} ke /laut /lahar.",
"mengirim {victim} ke lubang memori.",
"memenggal {victim}.",
"melemparkan {victim} dari sebuah gedung.",
"mengganti semua musik {victim} dengan lagu iri bilang bos.",
"spam email {victim}.",
"membuat {victim} depresi.",
"menampar {victim} tanpa apa-apa.",
"pukul {victim} dengan pesawat garuda.",
"memukul kepala {victim}.",
"taruh {victim} di tong sampah.",
"Menendang {victim} dan melemparnya ke sungai.",
"letakkan {victim} di rumah hantu.",
"menampar {victim} dengan tongkat besi!"]
ITEMS_EN = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Ringgis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_EN = [
"melempar",
"melemparkan",
]
HIT_EN = [
"memukul",
"menendang",
"menampar",
"memukul",
"melempar",
]
WHERE_EN = ["di pipi", "di kepala", "di pantat", "di badan"]
SLAP_TEMPLATES_ID = [
"{hits} {victim} dengan {item}.",
"{throws} sebuah {item} kepada {victim}.",
"mengambil {item} dan {hits} {victim} .",
"Mengambil Sebuah {item} dan {hits} {victim} Dengan itu.",
"Menjatuhkan {victim} Ke Lava.",
"Mengirimkan {victim} ke Kawah.",
"Membuang {victim} Ke Laut.",
"Mengeluarkan {victim} Dari Bumi.",
"Melempar {victim} Ke luar angkasa.",
"Menaruh {victim} di Pluto.",
"Melemparkan sebuah {item} ke {victim}.",
"Melemparkan {item} kepada {victim}.",
"Menampar {victim} menggunakan {item}.",
"Membuang {victim} Ke udara.",
"Menghapus {victim} Dari Daftar Teman.",
"Melemparkan {item} {where} {victim}.",
"Meletakan {item} {where} {victim}.",
"Menyerang {victim} menggunakan {anime}.",
"Mengehack Seluruh akun {victim}"
]
ITEMS_ID = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Ringgis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_ID = [
"Melempar",
"Melemparkan",
]
HIT_ID = [
"Memukul",
"melemparkan",
"Memukuli",
]
WHERE_ID = ["di pipi", "di kepala", "di bokong", "di badan"]
SLAP_TEMPLATES_Jutsu = [
"Menyerang {victim} Menggunakan {hits}.",
"Menyerang {victim} Menggunakan {item}.",
"Melemparkan {throws} kepada {victim} .",
"Melemparkan {throws} {where} {victim}."
]
ITEMS_Jutsu = [
"KAA MEE HAA MEE HAA",
"Chibaku Tensei",
]
THROW_Jutsu = [
"Futon Rasen Shuriken",
"Shuriken",
]
HIT_Jutsu = [
"Rasengan",
"Chidori",
]
GAMBAR_TITIT = """
😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋😋
😋😋😋😋😋😋
😋😋😋 😋😋😋
😋😋 😋😋
"""
GAMBAR_OK = """
░▐▀▀▀▀▀▀▀▀▌▐▀▌▄▄▄▀▀▓▀
░▐▌▓▀▀▀▀▓▌▌▐▐▌▀▌▄▄▀░░
░▐▐▌▐▀▀▌▐▐▌▐▌▐▓▄▀░░░░
░▐▌▌▐▄▄▌▐▌▌▐▐▌▓▀▄░░░░
░▐▐▓▄▄▄▄▓▐▌▐▌▌▄▌▀▀▄░░
░▐▄▄▄▄▄▄▄▄▌▐▄▌▀▀▀▄▄▓▄
"""
GAMBAR_TENGKORAK = """
░░░░░░░░░░░░░▄▐░░░░
░░░░░░░▄▄▄░░▄██▄░░░
░░░░░░▐▀█▀▌░░░░▀█▄░
░░░░░░▐█▄█▌░░░░░░▀█▄
░░░░░░░▀▄▀░░░▄▄▄▄▄▀▀
░░░░░▄▄▄██▀▀▀▀░░░░░
░░░░█▀▄▄▄█░▀▀░░░░░░
░░░░▌░▄▄▄▐▌▀▀▀░░░░░
░▄░▐░░░▄▄░█░▀▀░░░░░
░▀█▌░░░▄░▀█▀░▀░░░░░
░░░░░░░░▄▄▐▌▄▄░░░░░
░░░░░░░░▀███▀█▄░░░░
░░░░░░░▐▌▀▄▀▄▀▐░░░░
░░░░░░░▐▀░░░░░░▐▌░░
░░░░░░░█░░░░░░░░█░░
░░░░░░▐▌░░░░░░░░░█░
"""
GAMBAR_KONTL = """
⣠⡶⠚⠛⠲⢄⡀
⣼⠁ ⠀⠀⠀ ⠳⢤⣄
⢿⠀⢧⡀⠀⠀⠀⠀⠀⢈⡇
⠈⠳⣼⡙⠒⠶⠶⠖⠚⠉⠳⣄
⠀⠀⠈⣇⠀⠀⠀⠀⠀⠀⠀⠈⠳⣄
⠀⠀⠀⠘⣆ ⠀⠀⠀⠀ ⠀⠈⠓⢦⣀
⠀⠀⠀⠀⠈⢳⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠲⢤
⠀⠀⠀⠀⠀⠀⠙⢦⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⢧
⠀⠀⠀⠀⠀⠀⠀⡴⠋⠓⠦⣤⡀⠀⠀⠀⠀⠀⠀⠀⠈⣇
⠀⠀⠀⠀⠀⠀⣸⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡄
⠀⠀⠀⠀⠀⠀⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡇
⠀⠀⠀⠀⠀⠀⢹⡄⠀⠀⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⠃
⠀⠀⠀⠀⠀⠀⠀⠙⢦⣀⣳⡀⠀⠀⠀⠀⠀⠀⠀⠀⣰⠏
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠛⢦⣀⣀⣀⣀⣠⡴⠚⠁⠉⠉⠉
"""
WHERE_Jutsu = ["Di Pipi", "Di Kepala", "Di Bokong", "Di Badan ,Di Pantat"]
normiefont = [
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z']
weebyfont = [
'卂',
'乃',
'匚',
'刀',
'乇',
'下',
'厶',
'卄',
'工',
'丁',
'长',
'乚',
'从',
'𠘨',
'口',
'尸',
'㔿',
'尺',
'丂',
'丅',
'凵',
'リ',
'山',
'乂',
'丫',
'乙']
# ===========================================
@register(outgoing=True, pattern=r"^\.(\w+)say (.*)")
async def univsaye(cowmsg):
""" For .cowsay module, userbot wrapper for cow which says things. """
arg = cowmsg.pattern_match.group(1).lower()
text = cowmsg.pattern_match.group(2)
if arg == "cow":
arg = "default"
if arg not in cow.COWACTERS:
return
cheese = cow.get_cow(arg)
cheese = cheese()
await cowmsg.edit(f"`{cheese.milk(text).replace("`", "´")}`")
@register(outgoing=True, pattern=r"^\.coinflip (.*)")
async def coin(event):
r = choice(["Kepala", "Ekor"])
input_str = event.pattern_match.group(1)
if input_str:
input_str = input_str.lower()
if r == "Kepala":
if input_str == "Kepala":
await event.edit(
"Koin Itu Mendarat Di: **Kepala**.\nKamu Benar.")
elif input_str == "Ekor":
await event.edit(
"Koin Itu Mendarat Di: **Kepala**.\nKamu Salah, Coba Lagi..."
)
else:
await event.edit("Koin Itu Mendarat Di: **Kepala**.")
elif r == "Ekor":
if input_str == "Ekor":
await event.edit(
"Koin Itu Mendarat Di: **Ekor**.\nKamu Benar.")
elif input_str == "Kepala":
await event.edit(
"Koin Itu Mendarat Di: **Ekor**.\nKamu Salah, Coba Lagi..."
)
else:
await event.edit("Koin Itu Mendarat Di: **Ekor**.")
@register(pattern=r"^\.slap(?: |$)(.*)", outgoing=True)
async def who(event):
""" slaps a user, or get slapped if not a reply. """
replied_user = await get_user_from_event(event)
if replied_user:
replied_user = replied_user[0]
else:
return
caption = await slap(replied_user, event)
try:
await event.edit(caption)
except BaseException:
await event.edit(
"`Tidak bisa slap orang ini, perlu mengambil beberapa meteor dan batu!`"
)
async def slap(replied_user, event):
""" Construct a funny slap sentence !! """
user_id = replied_user.id
first_name = replied_user.first_name
username = replied_user.username
if username:
slapped = "@{}".format(username)
else:
slapped = f"[{first_name}](tg://user?id={user_id})"
slap_str = event.pattern_match.group(1)
if slap_str == "en":
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
elif slap_str == "id":
temp = choice(SLAP_TEMPLATES_ID)
item = choice(ITEMS_ID)
hit = choice(HIT_ID)
throw = choice(THROW_ID)
where = choice(WHERE_ID)
elif slap_str == "jutsu":
temp = choice(SLAP_TEMPLATES_Jutsu)
item = choice(ITEMS_Jutsu)
hit = choice(HIT_Jutsu)
throw = choice(THROW_Jutsu)
where = choice(WHERE_Jutsu)
else:
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
caption = "..." + temp.format(
victim=slapped, item=item, hits=hit, throws=throw, where=where)
return caption
@register(outgoing=True, pattern=r"^\.boobs(?: |$)(.*)")
async def boobs(e):
await e.edit("`Berdosa, Mendapatkan Gambar Boobs...`")
await sleep(3)
await e.edit("`Mengirim Gambar Boobs...`")
nsfw = requests.get(
'http://api.oboobs.ru/noise/1').json()[0]["Gambar Boobs"]
urllib.request.urlretrieve(
"http://media.oboobs.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'boobs.jpg')
await e.client.send_file(e.chat_id, "boobs.jpg")
os.remove("boobs.jpg")
await e.delete()
@register(outgoing=True, pattern=r"^\.pantat(?: |$)(.*)")
async def butts(e):
await e.edit("`Berdosa, Mendapatkan Gambar Pantat Yang Indah...`")
await sleep(3)
await e.edit("`Mengirim Gambar Pantat Indah...`")
nsfw = requests.get(
'http://api.obutts.ru/noise/1').json()[0]["Gambar Pantat"]
urllib.request.urlretrieve(
"http://media.obutts.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'butts.jpg')
await e.client.send_file(e.chat_id, "butts.jpg")
os.remove("butts.jpg")
await e.delete()
@register(outgoing=True, pattern=r"^\.(yes|no|maybe|decide)$")
async def decide(event):
decision = event.pattern_match.group(1).lower()
message_id = event.reply_to_msg_id if event.reply_to_msg_id else None
if decision != "decide":
r = requests.get(f"https://yesno.wtf/api?force={decision}").json()
else:
r = requests.get(f"https://yesno.wtf/api").json()
await event.delete()
await event.client.send_message(event.chat_id,
str(r["answer"]).upper(),
reply_to=message_id,
file=r["image"])
@register(outgoing=True, pattern=r"^\.fp$")
async def facepalm(e):
""" Facepalm 🤦♂ """
await e.edit("🤦♂")
@register(outgoing=True, pattern=r"^\.cry$")
async def cry(e):
""" y u du dis, i cry everytime !! """
await e.edit(choice(CRI))
@register(outgoing=True, pattern=r"^\.insult$")
async def insult(e):
""" I make you cry !! """
await e.edit(choice(INSULT_STRINGS))
@register(outgoing=True, pattern=r"^\.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await cp_e.edit("`😂🅱️AhHH👐MaNtAp👅Bro👅UnTuk✌️MeMbuAT👌Ku👐TeRliHat👀LuCu💞HaHAhaA!💦`")
reply_text = choice(EMOJIS)
# choose a random character in the message to be substituted with 🅱️
b_char = choice(message).lower()
for owo in message:
if owo == " ":
reply_text += choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern=r"^\.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await vpr.edit("`B e r i k a n S e b u a h T e k s U n t u k Vapor!`")
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await stret.edit("`Beriiiiiiiiikaaannnn sebuuuuuuuuuah teeeeeeeks!`")
count = randint(3, 10)
reply_text = sub(r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1" * count),
message)
await stret.edit(reply_text)
@register(outgoing=True, pattern=r"^\.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await zgfy.edit(
"`b̜́ͨe͒͜r̠͂ͬi̷̱̋k͖͒ͤa̋ͫ͑n͕͂͗ t̢͘͟e͂̽̈́k͎͂͠s̤͚ͭ m̪͔͑è͜͡n͈ͮḁ͞ͅk̲̮͛u̺͂ͩt̬̗́k͍̙̮á ̺n̨̹ͪ`"
)
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
rand = randint(0, 2)
if rand == 0:
charac = charac.strip() + \
choice(ZALG_LIST[0]).strip()
elif rand == 1:
charac = charac.strip() + \
choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.hi$")
async def hoi(hello):
""" Greet everyone! """
await hello.edit(choice(HELLOSTR))
@register(outgoing=True, pattern=r"^\.owo(?: |$)(.*)")
async def faces(owo):
""" UwU """
textx = await owo.get_reply_message()
message = owo.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await owo.edit("` Mohon Berikan Teks UwU! `")
reply_text = sub(r"(r|l)", "w", message)
reply_text = sub(r"(R|L)", "W", reply_text)
reply_text = sub(r"n([aeiou])", r"ny\1", reply_text)
reply_text = sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text)
reply_text = sub(r"\!+", " " + choice(UWUS), reply_text)
reply_text = reply_text.replace("ove", "uv")
reply_text += " " + choice(UWUS)
await owo.edit(reply_text)
@register(outgoing=True, pattern=r"^\.react$")
async def react_meme(react):
""" Make your userbot react to everything. """
await react.edit(choice(FACEREACTS))
@register(outgoing=True, pattern=r"^\.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern=r"^\.chase$")
async def police(chase):
""" Lari bro lari, aku akan segera menangkapmu !! """
await chase.edit(choice(CHASE_STR))
@register(outgoing=True, pattern=r"^\.run$")
async def runner_lol(run):
""" Lari, lari, LARIII! """
await run.edit(choice(RUNS_STR))
@register(outgoing=True, pattern=r"^\.metoo$")
async def metoo(hahayes):
""" Haha yes """
await hahayes.edit(choice(METOOSTR))
@register(outgoing=True, pattern=r"^\.oem$")
async def oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern=r"^\.Oem$")
async def Oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern=r"^\.10iq$")
async def iqless(e):
await e.edit("♿")
@register(outgoing=True, pattern="^.fuck$")
async def iqless(e):
await e.edit("🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕")
@register(outgoing=True, pattern=r"^\.moon$")
async def moon(event):
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.bunga$")
async def moon(event):
deq = deque(list("🌼🌻🌺🌹🌸🌷"))
try:
for x in range(35):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.waktu$")
async def moon(event):
deq = deque(list("🎑🌄🌅🌇🌆🌃🌌"))
try:
for x in range(100):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.buah$")
async def moon(event):
deq = deque(list("🍉🍓🍇🍎🍍🍐🍌"))
try:
for x in range(35):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.clock$")
async def clock(event):
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.rain$")
async def rain(event):
deq = deque(list("☀️🌤⛅️🌥☁️🌧⛈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.love$")
async def love(event):
deq = deque(list("❤️🧡💛💚💙💜🖤💕💞💓💗💖💘💝"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.earth$")
async def earth(event):
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.hati$")
async def earth(event):
deq = deque(list("🖤💜💙💚💛🧡❤️🤍"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.monyet$")
async def earth(event):
deq = deque(list("🙈🙉🙈🙉🙈🙉🙈🙉"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.emo$")
async def earth(event):
deq = deque(list("🙂😁😄😃😂🤣😭🐵🙊🙉🙈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await mock.edit("`bEriKan PeSan UnTuK MoCk!`")
for charac in message:
if charac.isalpha() and randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.weeb(?: |$)(.*)")
async def weebify(e):
args = e.pattern_match.group(1)
if not args:
get = await e.get_reply_message()
args = get.text
if not args:
await e.edit("`Apa Yang Anda Lakukan Tuan ツ`")
return
string = ' '.join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
weebycharacter = weebyfont[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, weebycharacter)
await e.edit(string)
@register(outgoing=True, pattern=r"^\.clap(?: |$)(.*)")
async def claptext(memereview):
""" Praise people! """
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await memereview.edit("`Tuan, Mohon Balas Ke Pesan Orang Yang Ingin Anda Puji ツ`")
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
@register(outgoing=True, pattern=r"^\.teksbiru$")
async def bluetext(bt_e):
""" Believe me, you will find this useful. """
if await bt_e.get_reply_message() and bt_e.is_group:
await bt_e.edit(
"/TEKSBIRU /APAKAH /ANDA.\n"
"/SEDANG /GABUT /KARNA /TERTARIK /MELIHAT /TEKS /BIRU /PASTI /ANDA /BOSAN?")
@register(outgoing=True, pattern=r"^\.f (.*)")
async def payf(event):
paytext = event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8, paytext * 8, paytext * 2, paytext * 2, paytext * 2,
paytext * 6, paytext * 6, paytext * 2, paytext * 2, paytext * 2,
paytext * 2, paytext * 2)
await event.edit(pay)
@register(outgoing=True, pattern=r"^\.lfy (.*)")
async def let_me_google_that_for_you(lmgtfy_q):
textx = await lmgtfy_q.get_reply_message()
qry = lmgtfy_q.pattern_match.group(1)
if qry:
query = str(qry)
elif textx:
query = textx
query = query.message
query_encoded = query.replace(" ", "+")
lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}"
payload = {'format': 'json', 'url': lfy_url}
r = requests.get('http://is.gd/create.php', params=payload)
await lmgtfy_q.edit("Ini Dia, Bantu Dirimu Sendiri."
f"\n[{query}]({r.json()["shorturl"]})")
@register(outgoing=True, pattern=r"^\.sayhi$")
async def sayhi(e):
await e.edit(
"\n💰💰💰💰💰💰💰💰💰💰💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷🔷🔷🔷🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰💰💰💰💰💰💰💰💰💰💰💰")
@register(pattern=r".scam(?: |$)(.*)", outgoing=True)
async def scam(event):
""" Just a small command to fake chat actions for fun !! """
options = [
'mengetik', 'kontak', 'game', 'lokasi', 'suara', 'bulat', 'video',
'foto', 'dokumen', 'batal'
]
input_str = event.pattern_match.group(1)
args = input_str.split()
if len(args) == 0: # Let bot decide action and time
scam_action = choice(options)
scam_time = randint(30, 60)
elif len(args) == 1: # User decides time/action, bot decides the other.
try:
scam_action = str(args[0]).lower()
scam_time = randint(30, 60)
except ValueError:
scam_action = choice(options)
scam_time = int(args[0])
elif len(args) == 2: # User decides both action and time
scam_action = str(args[0]).lower()
scam_time = int(args[1])
else:
await event.edit("`Tidak Valid`")
return
try:
if (scam_time > 300):
await event.delete()
async with event.client.action(event.chat_id, scam_action):
await sleep(scam_time)
except BaseException:
return
@register(pattern=r".type(?: |$)(.*)", outgoing=True)
async def typewriter(typew):
""" Just a small command to make your keyboard become a typewriter! """
textx = await typew.get_reply_message()
message = typew.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await typew.edit("`Berikan Sebuah Teks Untuk Type!`")
sleep_time = 0.03
typing_symbol = "|"
old_text = ""
await typew.edit(typing_symbol)
await sleep(sleep_time)
for character in message:
old_text = old_text + "" + character
typing_text = old_text + "" + typing_symbol
await typew.edit(typing_text)
await sleep(sleep_time)
await typew.edit(old_text)
await sleep(sleep_time)
@register(outgoing=True, pattern=r"^\.leave$")
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`Tuan Telah Meninggalkan Grup ツ`")
@register(outgoing=True, pattern=r"^\.fail$")
async def fail(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `"
"`\n████▌▄▌▄▐▐▌█████ `"
"`\n████▌▄▌▄▐▐▌▀████ `"
"`\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `")
@register(outgoing=True, pattern=r"^\.lol$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `"
"`\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `"
"`\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `"
"`\n╱┗━━━┛╰━━━╯┗━━━┛╱ `")
@register(outgoing=True, pattern=r"^\.rock$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮┈┈┈┈┈┈┈┈┈┈┈┈ `"
"`\n┈┃┃┈╭╮┈┏╮╭╮╭╮┃╭ `"
"`\n┈┃┃┈┃┃┈┣┫┃┃┃┈┣┫ `"
"`\n┈┃┣┳┫┃┈┃╰╰╯╰╯┃╰ `"
"`\n╭┻┻┻┫┃┈┈╭╮┃┃━┳━ `"
"`\n┃╱╭━╯┃┈┈┃┃┃┃┈┃┈ `"
"`\n╰╮╱╱╱┃┈┈╰╯╰╯┈┃┈ `")
@register(outgoing=True, pattern=r"^\.lool$")
async def lool(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`"
"`\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`"
"`\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `")
@register(outgoing=True, pattern=r"^\.stfu$")
async def stfu(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n██████████████████████████████`"
"`\n██▀▀▀▀████▀▀▀▀████▀▀▀▀▀███▀▀██▀▀█`"
"`\n█──────██──────██───────██──██──█`"
"`\n█──██▄▄████──████──███▄▄██──██──█`"
"`\n█▄────▀████──████────█████──██──█`"
"`\n█▀▀██──████──████──███████──██──█`"
"`\n█──────████──████──███████──────█`"
"`\n██▄▄▄▄█████▄▄████▄▄████████▄▄▄▄██`"
"`\n█████████████████████████████████`")
@register(outgoing=True, pattern=r"^\.gtfo$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n███████████████████████████████ `"
"`\n█▀▀▀▀▀▀▀█▀▀▀▀▀▀█▀▀▀▀▀▀▀█▀▀▀▀▀▀█ `"
"`\n█───────█──────█───────█──────█ `"
"`\n█──███──███──███──███▄▄█──██──█ `"
"`\n█──███▄▄███──███─────███──██──█ `"
"`\n█──██───███──███──██████──██──█ `"
"`\n█──▀▀▀──███──███──██████──────█ `"
"`\n█▄▄▄▄▄▄▄███▄▄███▄▄██████▄▄▄▄▄▄█ `"
"`\n███████████████████████████████ `")
@register(outgoing=True, pattern=r"^\.nih$")
async def nih(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n(\\_/)`"
"`\n(●_●)`"
"`\n />💖 *Ini Buat Kamu`"
"\n \n"
r"`(\_/)`"
"`\n(●_●)`"
"`\n💖<\\ *Tapi Bo'ong`")
@register(outgoing=True, pattern=r"^\.fag$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████`"
"`\n█▄█████▄█`"
"`\n█▼▼▼▼▼`"
"`\n█ STFU FAGGOT'S`"
"`\n█▲▲▲▲▲`"
"`\n█████████`"
"`\n ██ ██`")
@register(outgoing=True, pattern=r"^\.tai$")
async def taco(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n{\\__/}"
"\n(●_●)"
"\n( >💩 Mau Tai Ku?")
@register(outgoing=True, pattern=r"^\.paw$")
async def paw(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`(=ↀωↀ=)")
@register(outgoing=True, pattern=r"^\.tf$")
async def tf(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ ")
@register(outgoing=True, pattern=r"^\.gey$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈Lu Bau Hehe`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern=r"^\.gay$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈ANDA GAY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern=r"^\.bot$")
async def bot(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("` \n ╲╲╭━━━━╮ \n╭╮┃▆┈┈▆┃╭╮ \n┃╰┫▽▽▽┣╯┃ \n╰━┫△△△┣━╯`"
"`\n╲╲┃┈┈┈┈┃ \n╲╲┃┈┏┓┈┃ `")
@register(outgoing=True, pattern=r"^\.hey$")
async def hey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\n┈┈▕▂▂▂▂▂▂▏┃HEY!┊😀`"
"`\n┈┈▕▔▇▔▔┳▔▏╰┳╮HEY!┊\n┈┈▕╭━╰╯━╮▏━╯╰━━━\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`"
"`\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`")
@register(outgoing=True, pattern=r"^\.nou$")
async def nou(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮╭╮\n┈┃┃┃┃\n╭┻┗┻┗╮`"
"`\n┃┈▋┈▋┃\n┃┈╭▋━╮━╮\n┃┈┈╭╰╯╰╯╮`"
"`\n┫┈┈ NoU\n┃┈╰╰━━━━╯`"
"`\n┗━━┻━┛`")
@register(outgoing=True, pattern=r"^\.iwi(?: |$)(.*)")
async def faces(siwis):
""" IwI """
textx = await siwis.get_reply_message()
message = siwis.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await siwis.edit("` Anda Harus Memberikan Teks Ke IwI `")
return
reply_text = sub(r"(a|i|u|e|o)", "i", message)
reply_text = sub(r"(A|I|U|E|O)", "I", reply_text)
reply_text = sub(r"\!+", " " + choice(IWIS), reply_text)
reply_text += " " + choice(IWIS)
await siwis.edit(reply_text)
@register(outgoing=True, pattern="^.koc$")
async def koc(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8===✊D💦")
await e.edit("8==✊=D💦💦")
await e.edit("8=✊==D💦💦💦")
await e.edit("8✊===D💦💦💦💦")
await e.edit("8===✊D💦💦💦💦💦")
await e.edit("8==✊=D💦💦💦💦💦💦")
await e.edit("8=✊==D💦💦💦💦💦💦💦")
await e.edit("8✊===D💦💦💦💦💦💦💦💦")
await e.edit("8===✊D💦💦💦💦💦💦💦💦💦")
await e.edit("8==✊=D💦💦💦💦💦💦💦💦💦💦")
await e.edit("8=✊==D Lah Kok Habis?")
await e.edit("😭😭😭😭")
@register(outgoing=True, pattern="^.gas$")
async def gas(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("___________________🚑")
await e.edit("________________🚑___")
await e.edit("______________🚑_____")
await e.edit("___________🚑________")
await e.edit("________🚑___________")
await e.edit("_____🚑______________")
await e.edit("__🚑_________________")
await e.edit("🚑___________________")
await e.edit("_____________________")
await e.edit(choice(FACEREACTS))
@register(outgoing=True, pattern=r"^\.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern=r"^\.(?:penis|dick)\s?(.)?")
async def emoji_penis(e):
emoji = e.pattern_match.group(1)
titid = GAMBAR_TITIT
if emoji:
titid = titid.replace('😋', emoji)
await e.edit(titid)
@register(outgoing=True, pattern=r"^\.(?:kon|kontl)\s?(.)?")
async def emoji_kontl(e):
emoji = e.pattern_match.group(1)
kontl = GAMBAR_KONTL
if emoji:
kontl = kontl.replace('😂', emoji)
await e.edit(kontl)
@register(outgoing=True, pattern=r"^\.ok$")
async def emoji_oke(e):
emoji = e.pattern_match.group(1)
oke = GAMBAR_OK
if emoji:
oke = oke.replace('😂', emoji)
await e.edit(oke)
@register(outgoing=True, pattern=r"^\.skull$")
async def emoji_tengkorak(e):
emoji = e.pattern_match.group(1)
tengkorak = GAMBAR_TENGKORAK
if emoji:
tengkorak = tengkorak.replace('😂', emoji)
await e.edit(tengkorak)
CMD_HELP.update({
"memes":
">`.cowsay`"
"\nUsage: sapi yang mengatakan sesuatu."
"\n\n> .cp"
"\nUsage: Copy paste meme terkenal"
"\n\n>`.vapor`"
"\nUsage: Menguapkan semuanya!"
"\n\n>`.str`"
"\nUsage: Regangkan."
"\n\n>`.10iq`"
"\nUsage: Kamu mundur !!"
"\n\n>`.zal`"
"\nUsage: Munculkan perasaan kacau."
"\n\n>`.Oem`"
"\nPenggunaan: Oeeeem"
"\n\n>`.fp`"
"\nUsage: Telapak Tangan:P"
"\n\n>`.moon`"
"\nUsage: animasi bulan."
"\n\n>`.clock`"
"\nUsage: animasi jam."
"\n\n>`.hi`"
"\nUsage: Sapa semuanya!"
"\n\n>`.coinflip` <Kepala/Ekor>"
"\nUsage: Melempar koin !!"
"\n\n>`.owo`"
"\nUsage: UwU"
"\n\n>`.react`"
"\nUsage: Buat Userbot Anda bereaksi terhadap semuanya."
"\n\n>`.slap`"
"\nUsage: balas tampar mereka dengan benda acak !!"
"\n\n>`.cry`"
"\nUsage: jika kamu melakukan ini, aku akan menangis."
"\n\n>`.shg`"
"\nUsage: Angkat bahu!"
"\n\n>`.run`"
"\nUsage: Biarkan Aku Lari, Lari, LARI!"
"\n\n>`.chase`"
"\nUsage: Sebaiknya Anda mulai berlari"
"\n\n>`.metoo`"
"\nUsage: Haha ya"
"\n\n>`.mock`"
"\nUsage: Lakukan dan temukan kesenangan yang sesungguhnya."
"\n\n>`.clap`"
"\nUsage: Puji orang!"
"\n\n>`.f` <emoji/karakter>"
"\nUsage: F."
"\n\n>`.bt`"
"\nUsage: Percayalah, Anda akan menemukan ini berguna."
"\n\n>`.weeb`"
"\nUsage: Untuk Mengubah Teks Menjadi Weeb-ify."
"\n\n>`.type` <teks>"
"\nUsage: Hanya perintah kecil untuk membuat keyboard Anda menjadi mesin tik!"
"\n\n>`.lfy` <query>"
"\nUsage: Biar saya Google itu untuk Anda dengan cepat!"
"\n\n>`.decide` [Alternatif: (.yes, .no, .maybe)]"
"\nUsage: Buat keputusan cepat."
"\n\n> `.nou` `.bot` `.rock` `.gey` `.tf` `.paw` `.tai` `.nih`"
"\n> `.fag` `.gtfo`; `.stfu` `.lol` `.lool` `.fail` `.leave`"
"\n> `.iwi` `.sayhi` `.koc` `.gas` `.earth` `.love` `.rain`"
"\n> `.penis` `.emo` `.fuck` `.skull` `.monyet`\nUsage: Cobain aja"
"\n\n\n**Semoga Harimu Menyenangkan**\n➥ `Alvin`"
})
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module for having some fun with people. """
import os
import urllib
import requests
from re import sub
from cowpy import cow
from asyncio import sleep
from collections import deque
from random import choice, getrandbits, randint
from userbot import bot, CMD_HELP
from userbot.events import register
from userbot.modules.admin import get_user_from_event
# ================= CONSTANT =================
METOOSTR = [
"Aku Juga Terimakasih",
"Haha Iya, Aku Juga",
"Sama Haha",
"Aku Juga Gabut",
"Sama Sini",
"Haha Iya",
"Aku Juga",
]
ZALG_LIST = [[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
EMOJIS = [
"😂",
"😂",
"👌",
"✌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"🍴",
"💦",
"💦",
"🍑",
"🍆",
"😩",
"😏",
"👉👌",
"👀",
"👅",
"😩",
"🚰",
]
INSULT_STRINGS = [
"Jangan minum dan mengetik.",
"Saya pikir Anda harus pulang atau lebih baik ke rumah sakit jiwa.",
"Perintah tidak ditemukan. Sama seperti otak Anda.",
"Apakah kamu sadar bahwa kamu membodohi dirimu sendiri? Ternyata tidak.",
"Anda bisa mengetik lebih baik dari itu.",
"Bot aturan 544 bagian 9 mencegah saya membalas orang bodoh seperti Anda.",
"Maaf, kami tidak menjual otak.",
"Percayalah kamu tidak normal.",
"Saya yakin otak Anda terasa seperti baru, mengingat Anda tidak pernah menggunakannya.",
"Jika saya ingin bunuh diri, saya akan meningkatkan ego Anda dan melompat ke IQ Anda.",
"Zombie memakan otak ... kamu aman.",
"Anda tidak berevolusi dari kera, mereka berevolusi dari Anda.",
"Kembalilah dan bicara padaku ketika IQ mu melebihi umurmu.",
"Saya tidak mengatakan Anda bodoh, saya hanya mengatakan bahwa Anda tidak beruntung dalam hal berpikir.",
"Kamu berbicara bahasa apa? Karena terdengar seperti omong kosong.",
"Kebodohan bukanlah kejahatan jadi kamu bebas pergi.",
"Anda adalah bukti bahwa evolusi BISA mundur.",
"Aku akan bertanya berapa umurmu tapi aku tahu kamu tidak bisa menghitung setinggi itu.",
"Sebagai orang luar, apa pendapat Anda tentang umat manusia?",
"Otak bukanlah segalanya. Dalam kasusmu mereka bukan apa-apa.",
"Biasanya orang hidup dan belajar. Kamu hidup saja.",
"Aku tidak tahu apa yang membuatmu begitu bodoh, tapi itu benar-benar berhasil.",
"Teruslah berbicara, suatu hari nanti kamu akan mengatakan sesuatu yang cerdas! (Meskipun aku ragu)"
"Shock saya, katakan sesuatu yang cerdas.",
"IQ Anda lebih rendah dari ukuran sepatu Anda.",
"Aduh! Neurotransmiter Anda tidak lagi bekerja.",
"Apakah kamu gila kamu bodoh.",
"Setiap orang berhak untuk menjadi bodoh tetapi Anda menyalahgunakan hak istimewa tersebut.",
"Maaf aku menyakiti perasaanmu saat menyebutmu bodoh. Kupikir kamu sudah tahu itu.",
"Anda harus mencoba mencicipi sianida.",
"Enzim Anda dimaksudkan untuk mencerna racun tikus.",
"Kamu harus mencoba tidur selamanya.",
"Ambil pistol dan tembak dirimu sendiri.",
"Anda bisa membuat rekor dunia dengan melompat dari pesawat tanpa parasut.",
"Berhenti berbicara BS dan melompat di depan kereta peluru yang sedang berjalan.",
"Cobalah mandi dengan Hydrochloric Acid daripada air.",
"Coba ini: jika Anda menahan napas di bawah air selama satu jam, Anda dapat menahannya selamanya.",
"Go Green! Berhenti menghirup Oksigen.",
"Tuhan sedang mencarimu. Kamu harus pergi untuk bertemu dengannya.",
"berikan 100% mu. Sekarang, pergi donor darah.",
"Cobalah melompat dari gedung seratus lantai tetapi Anda hanya dapat melakukannya sekali.",
"Anda harus menyumbangkan otak Anda melihat bahwa Anda tidak pernah menggunakannya.",
"Relawan untuk target dalam jarak tembak.",
"Tembak kepala itu menyenangkan. Dapatkan dirimu sendiri.",
"Anda harus mencoba berenang dengan hiu putih besar.",
"Anda harus mengecat diri Anda dengan warna merah dan berlari dalam bull marathon.",
"Anda bisa tetap di bawah air selama sisa hidup Anda tanpa harus kembali lagi.",
"Bagaimana kalau kamu berhenti bernapas selama 1 hari? Itu akan bagus.",
"Cobalah memprovokasi harimau saat kalian berdua berada di dalam sangkar.",
"Sudahkah Anda mencoba menembak diri Anda sendiri setinggi 100m menggunakan kanon.",
"Anda harus mencoba menahan TNT di mulut Anda dan menyalakannya.",
"Cobalah bermain menangkap dan melempar dengan RDX itu menyenangkan.",
"Saya dengar phogine beracun tapi saya rasa Anda tidak keberatan menghirupnya untuk bersenang-senang.",
"Luncurkan diri Anda ke luar angkasa sambil melupakan oksigen di Bumi.",
"Kamu harus mencoba bermain ular tangga, dengan ular sungguhan dan tanpa tangga.",
"Menari telanjang di beberapa kabel HT.",
"Gunung Berapi Aktif adalah kolam renang terbaik untuk Anda.",
"Anda harus mencoba mandi air panas di gunung berapi.",
"Cobalah untuk menghabiskan satu hari di peti mati dan itu akan menjadi milikmu selamanya.",
"Pukul Uranium dengan neutron yang bergerak lambat di hadapanmu. Ini akan menjadi pengalaman yang berharga.",
"Anda bisa menjadi orang pertama yang menginjak matahari. Selamat mencoba.",
]
UWUS = [
"(・`ω´・)",
";;w;;",
"owo",
"UwU",
">w<",
"^w^",
r"\(^o\) (/o^)/",
"( ^ _ ^)∠☆",
"(ô_ô)",
"~:o",
";-;",
"(*^*)",
"(>_",
"(♥_♥)",
"*(^O^)*",
"((+_+))",
]
IWIS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
FACEREACTS = [
"ʘ‿ʘ",
"ヾ(-_- )ゞ",
"(っ˘ڡ˘ς)",
"(´ж`ς)",
"( ಠ ʖ̯ ಠ)",
"(° ͜ʖ͡°)╭∩╮",
"(ᵟຶ︵ ᵟຶ)",
"(งツ)ว",
"ʚ(•`",
"(っ▀¯▀)つ",
"(◠﹏◠)",
"( ͡ಠ ʖ̯ ͡ಠ)",
"( ఠ ͟ʖ ఠ)",
"(∩`-´)⊃━☆゚.*・。゚",
"(⊃。•́‿•̀。)⊃",
"(._.)",
"{•̃_•̃}",
"(ᵔᴥᵔ)",
"♨_♨",
"⥀.⥀",
"ح˚௰˚づ ",
"(҂◡_◡)",
"ƪ(ړײ)ƪ",
"(っ•́。•́)♪♬",
"◖ᵔᴥᵔ◗ ♪ ♫ ",
"(☞゚ヮ゚)☞",
"[¬º-°]¬",
"(Ծ‸ Ծ)",
"(•̀ᴗ•́)و ̑̑",
"ヾ(´〇`)ノ♪♪♪",
"(ง'̀-'́)ง",
"ლ(•́•́ლ)",
"ʕ •́؈•̀ ₎",
"♪♪ ヽ(ˇ∀ˇ )ゞ",
"щ(゚Д゚щ)",
"( ˇ෴ˇ )",
"눈_눈",
"(๑•́ ₃ •̀๑) ",
"( ˘ ³˘)♥ ",
"ԅ(≖‿≖ԅ)",
"♥‿♥",
"◔_◔",
"⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾",
"乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍",
"( ఠൠఠ )ノ",
"٩(๏_๏)۶",
"┌(ㆆ㉨ㆆ)ʃ",
"ఠ_ఠ",
"(づ。◕‿‿◕。)づ",
"(ノಠ ∩ಠ)ノ彡( \\o°o)\\",
"“ヽ(´▽`)ノ”",
"༼ ༎ຶ ෴ ༎ຶ༽",
"。゚( ゚இ‸இ゚)゚。",
"(づ ̄ ³ ̄)づ",
"(⊙.☉)7",
"ᕕ( ᐛ )ᕗ",
"t(-_-t)",
"(ಥ⌣ಥ)",
"ヽ༼ ಠ益ಠ ༽ノ",
"༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽",
"ミ●﹏☉ミ",
"(⊙_◎)",
"¿ⓧ_ⓧﮌ",
"ಠ_ಠ",
"(´・_・`)",
"ᕦ(ò_óˇ)ᕤ",
"⊙﹏⊙",
"(╯°□°)╯︵ ┻━┻",
r"¯\_(⊙︿⊙)_/¯",
"٩◔̯◔۶",
"°‿‿°",
"ᕙ(⇀‸↼‶)ᕗ",
"⊂(◉‿◉)つ",
"V•ᴥ•V",
"q(❂‿❂)p",
"ಥ_ಥ",
"ฅ^•ﻌ•^ฅ",
"ಥ﹏ಥ",
"( ^_^)o自自o(^_^ )",
"ಠ‿ಠ",
"ヽ(´▽`)/",
"ᵒᴥᵒ#",
"( ͡° ͜ʖ ͡°)",
"┬─┬ ノ( ゜-゜ノ)",
"ヽ(´ー`)ノ",
"☜(⌒▽⌒)☞",
"ε=ε=ε=┌(;*´Д`)ノ",
"(╬ ಠ益ಠ)",
"┬─┬⃰͡ (ᵔᵕᵔ͜ )",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻",
r"¯\_(ツ)_/¯",
"ʕᵔᴥᵔʔ",
"(`・ω・´)",
"ʕ•ᴥ•ʔ",
"ლ(`ー´ლ)",
"ʕʘ̅͜ʘ̅ʔ",
"( ゚Д゚)",
r"¯\(°_o)/¯",
"(。◕‿◕。)",
]
RUNS_STR = [
"Berlari ke Thanos..",
"Berlari jauh, jauh dari bumi..",
"Berlari lebih cepat dari Bolt karena aku pengguna bot !!",
"Berlari ke Mia Khalifa..",
"Grup ini terlalu berbahaya untuk ditangani, aku harus lari.",
"`Berlari Dari Orang Yang Bau Sawi 😬`",
"Aku sangat lelah untuk berlari dan mengejarmu 💔",
"Aku pergi dulu",
"Saya hanya berjalan pergi, karena saya terlalu gemuk untuk lari.",
"Saya Cape!",
"Larii Disini Bau Sawii 😭",
"Saya lari karena saya sangat gabut.",
"Lari... \nkarena diet bukanlah pilihan.",
"Berlari Cepat Dari Orang Gila",
"Jika kamu ingin menangkapku, kamu harus cepat... \nJika kamu ingin tinggal bersamaku, kamu harus menjadi orang yang baik... \nTapi jika kamu ingin melewati aku... \nKamu pasti bercanda. ",
"Siapapun dapat berlari seratus meter, itu hitungan empat puluh dua ribu dua ratus berikutnya.",
"Mengapa semua orang ini mengikuti saya?",
"Apakah anak-anak masih mengejarku?",
"Berlari Sekencang Super Dede.. Apakah Sopan Begitu?",
]
CHASE_STR = [
"Menurutmu kemana kamu akan pergi?",
"Hah? Apa? Apakah mereka lolos?",
"ZZzzZZzz... Hah? Apa? Oh, hanya mereka lagi, lupakan.",
"Kembali kesini!",
"Tidak terlalu cepat...",
"Awas ke dinding!",
"Jangan tinggalkan aku sendiri dengan mereka !!",
"Kamu lari, kamu mati.",
"Bercanda, aku ada dimana-mana",
"Kamu akan menyesali itu ...",
"Kamu juga bisa mencoba /kickme, kudengar itu menyenangkan.",
"Ganggu orang lain, tidak ada yang peduli.",
"Kamu bisa lari, tapi kamu tidak bisa bersembunyi.",
"Apakah hanya itu yang kamu punya?",
"Saya di belakang Anda...",
"Anda punya teman!",
"Kita bisa melakukan ini dengan cara mudah, atau cara sulit.",
"Anda tidak mengerti, bukan?",
"Ya, sebaiknya kau lari!",
"Tolong, ingatkan saya apakah saya peduli?",
"Aku akan lari lebih cepat jika jadi kamu.",
"Itu pasti droid yang kami cari.",
"Semoga peluang selalu menguntungkan Anda.",
"Kata-kata terakhir yang terkenal.",
"Dan mereka menghilang selamanya, tidak pernah terlihat lagi.",
"Oh, lihat aku! Saya sangat keren, saya bisa lari dari bot orang ini",
"Ya ya, cukup ketuk /kickme.",
"Ini, ambil cincin ini dan pergilah ke Mordor saat kamu melakukannya.",
"Legenda mengatakan, mereka masih berjalan...",
"Tidak seperti Harry Potter, orang tuamu tidak bisa melindungimu dariku.",
"Ketakutan menyebabkan kemarahan. Kemarahan mengarah pada kebencian. Kebencian menyebabkan penderitaan. Jika Anda terus berlari dalam ketakutan, Anda mungkin"
"jadilah Vader berikutnya.",
"Beberapa kalkulasi nanti, saya telah memutuskan minat saya pada kejahatan Anda tepat 0.",
"Legenda mengatakan, mereka masih berjalan.",
"Teruskan, kami tidak yakin kami menginginkanmu di sini.",
"Kamu seorang penyihir- Oh. Tunggu. Kamu bukan Harry, terus bergerak.",
"JANGAN BERLARI DI SINI!",
"Hasta la vista, sayang.",
"Siapa yang membiarkan anjing keluar?",
"Ini lucu, karena tidak ada yang peduli.",
"Ah, sayang sekali, Aku suka yang itu.",
"Terus terang, sayangku, aku tidak peduli.",
"Milkshake saya membawa semua anak laki-laki ke halaman... Jadi lari lebih cepat!",
"Anda tidak bisa MENANGANI kebenaran!",
"Dahulu kala, di galaksi yang sangat jauh... Seseorang akan peduli tentang itu, Tapi sekarang tidak lagi.",
"Hei, lihat mereka! Mereka lari dari palu yang tak terelakkan... Manis.",
"Han menembak lebih dulu, Aku juga.",
"Apa yang kamu kejar, kelinci putih?",
"Seperti yang dikatakan The Doctor... LARI!",
]
HELLOSTR = [
"Hai!",
"'Ello, bro!",
"Apa itu crackin?",
"Apa kabarmu?",
"Halo, apa kabar, apa kabar!",
"Halo, siapa di sana, saya sedang berbicara.",
"Kamu tahu siapa ini.",
"Yo!",
"Wassup.",
"Salam dan salam!",
"Halo, sinar matahari!",
"Hei, apa kabar, hai!",
"Apa yang menendang, ayam kecil?",
"Ciluk ba!",
"Halo-bagus!",
"Halo, mahasiswa baru!",
"Saya datang dengan damai!",
"Ahoy, sobat!",
"Hiya!",
]
SHGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
CRI = [
"أ‿أ",
"╥﹏╥",
"(;﹏;)",
"(ToT)",
"(┳Д┳)",
"(ಥ﹏ಥ)",
"(;へ:)",
"(T_T)",
"(πーπ)",
"(T▽T)",
"(⋟﹏⋞)",
"(iДi)",
"(´Д⊂ヽ",
"(;Д;)",
"(>﹏<)",
"(TдT)",
"(つ﹏⊂)",
"༼☯﹏☯༽",
"(ノ﹏ヽ)",
"(ノAヽ)",
"(╥_╥)",
"(T⌓T)",
"(༎ຶ⌑༎ຶ)",
"(☍﹏⁰)。",
"(ಥ_ʖಥ)",
"(つд⊂)",
"(≖͞_≖̥)",
"(இ﹏இ`。)",
"༼ಢ_ಢ༽",
"༼ ༎ຶ ෴ ༎ຶ༽",
]
SLAP_TEMPLATES_EN = [
"{hits} {victim} dengan {item}.",
"{hits} {victim} di wajah dengan {item}.",
"{hits} {victim} sekitar sedikit dengan {item}.",
"{throws} {item} ke {Victim}.",
"mengambil {item} dan {throws} ke wajah {victim}.",
"Menusuk {victim} dengan tombak cinta.",
"{throws} beberapa {item} ke {victim}.",
"mengambil {item} dan {throws} ke wajah {victim}.",
"meluncurkan {item} ke arah umum {korban}.",
"duduk di wajah {victim} sambil membanting {item}.",
"mulai menampar {victim} dengan konyol dengan {item}.",
"pin {victim} ke bawah dan berulang kali {hits} mereka dengan {item}.",
"mengambil {item} dan {hits} {victim} dengannya.",
"mulai menampar {victim} dengan konyol dengan {item}.",
"menahan {victim} dan berulang kali {hits} mereka dengan {item}.",
"memukul {victim} dengan {item}.",
"mengambil {item} dan {hits} {victim} dengannya.",
"mengikat {victim} ke kursi dan {throws} {item} padanya.",
"{hits} {victim} {where} dengan {item}.",
"mengikat {victim} ke tiang dan mencambuk mereka {where} dengan {item}."
"memberikan dorongan ramah untuk membantu {victim} belajar berenang di lahar.",
"mengirim {victim} ke /laut /lahar.",
"mengirim {victim} ke lubang memori.",
"memenggal {victim}.",
"melemparkan {victim} dari sebuah gedung.",
"mengganti semua musik {victim} dengan lagu iri bilang bos.",
"spam email {victim}.",
"membuat {victim} depresi.",
"menampar {victim} tanpa apa-apa.",
"pukul {victim} dengan pesawat garuda.",
"memukul kepala {victim}.",
"taruh {victim} di tong sampah.",
"Menendang {victim} dan melemparnya ke sungai.",
"letakkan {victim} di rumah hantu.",
"menampar {victim} dengan tongkat besi!"]
ITEMS_EN = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Ringgis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_EN = [
"melempar",
"melemparkan",
]
HIT_EN = [
"memukul",
"menendang",
"menampar",
"memukul",
"melempar",
]
WHERE_EN = ["di pipi", "di kepala", "di pantat", "di badan"]
SLAP_TEMPLATES_ID = [
"{hits} {victim} dengan {item}.",
"{throws} sebuah {item} kepada {victim}.",
"mengambil {item} dan {hits} {victim} .",
"Mengambil Sebuah {item} dan {hits} {victim} Dengan itu.",
"Menjatuhkan {victim} Ke Lava.",
"Mengirimkan {victim} ke Kawah.",
"Membuang {victim} Ke Laut.",
"Mengeluarkan {victim} Dari Bumi.",
"Melempar {victim} Ke luar angkasa.",
"Menaruh {victim} di Pluto.",
"Melemparkan sebuah {item} ke {victim}.",
"Melemparkan {item} kepada {victim}.",
"Menampar {victim} menggunakan {item}.",
"Membuang {victim} Ke udara.",
"Menghapus {victim} Dari Daftar Teman.",
"Melemparkan {item} {where} {victim}.",
"Meletakan {item} {where} {victim}.",
"Menyerang {victim} menggunakan {anime}.",
"Mengehack Seluruh akun {victim}"
]
ITEMS_ID = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Ringgis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_ID = [
"Melempar",
"Melemparkan",
]
HIT_ID = [
"Memukul",
"melemparkan",
"Memukuli",
]
WHERE_ID = ["di pipi", "di kepala", "di bokong", "di badan"]
SLAP_TEMPLATES_Jutsu = [
"Menyerang {victim} Menggunakan {hits}.",
"Menyerang {victim} Menggunakan {item}.",
"Melemparkan {throws} kepada {victim} .",
"Melemparkan {throws} {where} {victim}."
]
ITEMS_Jutsu = [
"KAA MEE HAA MEE HAA",
"Chibaku Tensei",
]
THROW_Jutsu = [
"Futon Rasen Shuriken",
"Shuriken",
]
HIT_Jutsu = [
"Rasengan",
"Chidori",
]
GAMBAR_TITIT = """
😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋😋
😋😋😋😋😋😋
😋😋😋 😋😋😋
😋😋 😋😋
"""
GAMBAR_OK = """
░▐▀▀▀▀▀▀▀▀▌▐▀▌▄▄▄▀▀▓▀
░▐▌▓▀▀▀▀▓▌▌▐▐▌▀▌▄▄▀░░
░▐▐▌▐▀▀▌▐▐▌▐▌▐▓▄▀░░░░
░▐▌▌▐▄▄▌▐▌▌▐▐▌▓▀▄░░░░
░▐▐▓▄▄▄▄▓▐▌▐▌▌▄▌▀▀▄░░
░▐▄▄▄▄▄▄▄▄▌▐▄▌▀▀▀▄▄▓▄
"""
GAMBAR_TENGKORAK = """
░░░░░░░░░░░░░▄▐░░░░
░░░░░░░▄▄▄░░▄██▄░░░
░░░░░░▐▀█▀▌░░░░▀█▄░
░░░░░░▐█▄█▌░░░░░░▀█▄
░░░░░░░▀▄▀░░░▄▄▄▄▄▀▀
░░░░░▄▄▄██▀▀▀▀░░░░░
░░░░█▀▄▄▄█░▀▀░░░░░░
░░░░▌░▄▄▄▐▌▀▀▀░░░░░
░▄░▐░░░▄▄░█░▀▀░░░░░
░▀█▌░░░▄░▀█▀░▀░░░░░
░░░░░░░░▄▄▐▌▄▄░░░░░
░░░░░░░░▀███▀█▄░░░░
░░░░░░░▐▌▀▄▀▄▀▐░░░░
░░░░░░░▐▀░░░░░░▐▌░░
░░░░░░░█░░░░░░░░█░░
░░░░░░▐▌░░░░░░░░░█░
"""
GAMBAR_KONTL = """
⣠⡶⠚⠛⠲⢄⡀
⣼⠁ ⠀⠀⠀ ⠳⢤⣄
⢿⠀⢧⡀⠀⠀⠀⠀⠀⢈⡇
⠈⠳⣼⡙⠒⠶⠶⠖⠚⠉⠳⣄
⠀⠀⠈⣇⠀⠀⠀⠀⠀⠀⠀⠈⠳⣄
⠀⠀⠀⠘⣆ ⠀⠀⠀⠀ ⠀⠈⠓⢦⣀
⠀⠀⠀⠀⠈⢳⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠲⢤
⠀⠀⠀⠀⠀⠀⠙⢦⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⢧
⠀⠀⠀⠀⠀⠀⠀⡴⠋⠓⠦⣤⡀⠀⠀⠀⠀⠀⠀⠀⠈⣇
⠀⠀⠀⠀⠀⠀⣸⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡄
⠀⠀⠀⠀⠀⠀⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡇
⠀⠀⠀⠀⠀⠀⢹⡄⠀⠀⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⠃
⠀⠀⠀⠀⠀⠀⠀⠙⢦⣀⣳⡀⠀⠀⠀⠀⠀⠀⠀⠀⣰⠏
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠛⢦⣀⣀⣀⣀⣠⡴⠚⠁⠉⠉⠉
"""
WHERE_Jutsu = ["Di Pipi", "Di Kepala", "Di Bokong", "Di Badan ,Di Pantat"]
normiefont = [
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z']
weebyfont = [
'卂',
'乃',
'匚',
'刀',
'乇',
'下',
'厶',
'卄',
'工',
'丁',
'长',
'乚',
'从',
'𠘨',
'口',
'尸',
'㔿',
'尺',
'丂',
'丅',
'凵',
'リ',
'山',
'乂',
'丫',
'乙']
# ===========================================
@register(outgoing=True, pattern=r"^\.(\w+)say (.*)")
async def univsaye(cowmsg):
""" For .cowsay module, userbot wrapper for cow which says things. """
arg = cowmsg.pattern_match.group(1).lower()
text = cowmsg.pattern_match.group(2)
if arg == "cow":
arg = "default"
if arg not in cow.COWACTERS:
return
cheese = cow.get_cow(arg)
cheese = cheese()
await cowmsg.edit(f"`{cheese.milk(text).replace('`', '´')}`")
@register(outgoing=True, pattern=r"^\.coinflip (.*)")
async def coin(event):
r = choice(["Kepala", "Ekor"])
input_str = event.pattern_match.group(1)
if input_str:
input_str = input_str.lower()
if r == "Kepala":
if input_str == "Kepala":
await event.edit(
"Koin Itu Mendarat Di: **Kepala**.\nKamu Benar.")
elif input_str == "Ekor":
await event.edit(
"Koin Itu Mendarat Di: **Kepala**.\nKamu Salah, Coba Lagi..."
)
else:
await event.edit("Koin Itu Mendarat Di: **Kepala**.")
elif r == "Ekor":
if input_str == "Ekor":
await event.edit(
"Koin Itu Mendarat Di: **Ekor**.\nKamu Benar.")
elif input_str == "Kepala":
await event.edit(
"Koin Itu Mendarat Di: **Ekor**.\nKamu Salah, Coba Lagi..."
)
else:
await event.edit("Koin Itu Mendarat Di: **Ekor**.")
@register(pattern=r"^\.slap(?: |$)(.*)", outgoing=True)
async def who(event):
""" slaps a user, or get slapped if not a reply. """
replied_user = await get_user_from_event(event)
if replied_user:
replied_user = replied_user[0]
else:
return
caption = await slap(replied_user, event)
try:
await event.edit(caption)
except BaseException:
await event.edit(
"`Tidak bisa slap orang ini, perlu mengambil beberapa meteor dan batu!`"
)
async def slap(replied_user, event):
""" Construct a funny slap sentence !! """
user_id = replied_user.id
first_name = replied_user.first_name
username = replied_user.username
if username:
slapped = "@{}".format(username)
else:
slapped = f"[{first_name}](tg://user?id={user_id})"
slap_str = event.pattern_match.group(1)
if slap_str == "en":
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
elif slap_str == "id":
temp = choice(SLAP_TEMPLATES_ID)
item = choice(ITEMS_ID)
hit = choice(HIT_ID)
throw = choice(THROW_ID)
where = choice(WHERE_ID)
elif slap_str == "jutsu":
temp = choice(SLAP_TEMPLATES_Jutsu)
item = choice(ITEMS_Jutsu)
hit = choice(HIT_Jutsu)
throw = choice(THROW_Jutsu)
where = choice(WHERE_Jutsu)
else:
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
caption = "..." + temp.format(
victim=slapped, item=item, hits=hit, throws=throw, where=where)
return caption
@register(outgoing=True, pattern=r"^\.boobs(?: |$)(.*)")
async def boobs(e):
await e.edit("`Berdosa, Mendapatkan Gambar Boobs...`")
await sleep(3)
await e.edit("`Mengirim Gambar Boobs...`")
nsfw = requests.get(
'http://api.oboobs.ru/noise/1').json()[0]["Gambar Boobs"]
urllib.request.urlretrieve(
"http://media.oboobs.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'boobs.jpg')
await e.client.send_file(e.chat_id, "boobs.jpg")
os.remove("boobs.jpg")
await e.delete()
@register(outgoing=True, pattern=r"^\.pantat(?: |$)(.*)")
async def butts(e):
await e.edit("`Berdosa, Mendapatkan Gambar Pantat Yang Indah...`")
await sleep(3)
await e.edit("`Mengirim Gambar Pantat Indah...`")
nsfw = requests.get(
'http://api.obutts.ru/noise/1').json()[0]["Gambar Pantat"]
urllib.request.urlretrieve(
"http://media.obutts.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'butts.jpg')
await e.client.send_file(e.chat_id, "butts.jpg")
os.remove("butts.jpg")
await e.delete()
@register(outgoing=True, pattern=r"^\.(yes|no|maybe|decide)$")
async def decide(event):
decision = event.pattern_match.group(1).lower()
message_id = event.reply_to_msg_id if event.reply_to_msg_id else None
if decision != "decide":
r = requests.get(f"https://yesno.wtf/api?force={decision}").json()
else:
r = requests.get(f"https://yesno.wtf/api").json()
await event.delete()
await event.client.send_message(event.chat_id,
str(r["answer"]).upper(),
reply_to=message_id,
file=r["image"])
@register(outgoing=True, pattern=r"^\.fp$")
async def facepalm(e):
""" Facepalm 🤦♂ """
await e.edit("🤦♂")
@register(outgoing=True, pattern=r"^\.cry$")
async def cry(e):
""" y u du dis, i cry everytime !! """
await e.edit(choice(CRI))
@register(outgoing=True, pattern=r"^\.insult$")
async def insult(e):
""" I make you cry !! """
await e.edit(choice(INSULT_STRINGS))
@register(outgoing=True, pattern=r"^\.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await cp_e.edit("`😂🅱️AhHH👐MaNtAp👅Bro👅UnTuk✌️MeMbuAT👌Ku👐TeRliHat👀LuCu💞HaHAhaA!💦`")
reply_text = choice(EMOJIS)
# choose a random character in the message to be substituted with 🅱️
b_char = choice(message).lower()
for owo in message:
if owo == " ":
reply_text += choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern=r"^\.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await vpr.edit("`B e r i k a n S e b u a h T e k s U n t u k Vapor!`")
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await stret.edit("`Beriiiiiiiiikaaannnn sebuuuuuuuuuah teeeeeeeks!`")
count = randint(3, 10)
reply_text = sub(r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1" * count),
message)
await stret.edit(reply_text)
@register(outgoing=True, pattern=r"^\.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await zgfy.edit(
"`b̜́ͨe͒͜r̠͂ͬi̷̱̋k͖͒ͤa̋ͫ͑n͕͂͗ t̢͘͟e͂̽̈́k͎͂͠s̤͚ͭ m̪͔͑è͜͡n͈ͮḁ͞ͅk̲̮͛u̺͂ͩt̬̗́k͍̙̮á ̺n̨̹ͪ`"
)
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
rand = randint(0, 2)
if rand == 0:
charac = charac.strip() + \
choice(ZALG_LIST[0]).strip()
elif rand == 1:
charac = charac.strip() + \
choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.hi$")
async def hoi(hello):
""" Greet everyone! """
await hello.edit(choice(HELLOSTR))
@register(outgoing=True, pattern=r"^\.owo(?: |$)(.*)")
async def faces(owo):
""" UwU """
textx = await owo.get_reply_message()
message = owo.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await owo.edit("` Mohon Berikan Teks UwU! `")
reply_text = sub(r"(r|l)", "w", message)
reply_text = sub(r"(R|L)", "W", reply_text)
reply_text = sub(r"n([aeiou])", r"ny\1", reply_text)
reply_text = sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text)
reply_text = sub(r"\!+", " " + choice(UWUS), reply_text)
reply_text = reply_text.replace("ove", "uv")
reply_text += " " + choice(UWUS)
await owo.edit(reply_text)
@register(outgoing=True, pattern=r"^\.react$")
async def react_meme(react):
""" Make your userbot react to everything. """
await react.edit(choice(FACEREACTS))
@register(outgoing=True, pattern=r"^\.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern=r"^\.chase$")
async def police(chase):
""" Lari bro lari, aku akan segera menangkapmu !! """
await chase.edit(choice(CHASE_STR))
@register(outgoing=True, pattern=r"^\.run$")
async def runner_lol(run):
""" Lari, lari, LARIII! """
await run.edit(choice(RUNS_STR))
@register(outgoing=True, pattern=r"^\.metoo$")
async def metoo(hahayes):
""" Haha yes """
await hahayes.edit(choice(METOOSTR))
@register(outgoing=True, pattern=r"^\.oem$")
async def oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern=r"^\.Oem$")
async def Oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern=r"^\.10iq$")
async def iqless(e):
await e.edit("♿")
@register(outgoing=True, pattern="^.fuck$")
async def iqless(e):
await e.edit("🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕")
@register(outgoing=True, pattern=r"^\.moon$")
async def moon(event):
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.bunga$")
async def moon(event):
deq = deque(list("🌼🌻🌺🌹🌸🌷"))
try:
for x in range(35):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.waktu$")
async def moon(event):
deq = deque(list("🎑🌄🌅🌇🌆🌃🌌"))
try:
for x in range(100):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.buah$")
async def moon(event):
deq = deque(list("🍉🍓🍇🍎🍍🍐🍌"))
try:
for x in range(35):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.clock$")
async def clock(event):
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.rain$")
async def rain(event):
deq = deque(list("☀️🌤⛅️🌥☁️🌧⛈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.love$")
async def love(event):
deq = deque(list("❤️🧡💛💚💙💜🖤💕💞💓💗💖💘💝"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.earth$")
async def earth(event):
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.hati$")
async def earth(event):
deq = deque(list("🖤💜💙💚💛🧡❤️🤍"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.monyet$")
async def earth(event):
deq = deque(list("🙈🙉🙈🙉🙈🙉🙈🙉"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.emo$")
async def earth(event):
deq = deque(list("🙂😁😄😃😂🤣😭🐵🙊🙉🙈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await mock.edit("`bEriKan PeSan UnTuK MoCk!`")
for charac in message:
if charac.isalpha() and randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.weeb(?: |$)(.*)")
async def weebify(e):
args = e.pattern_match.group(1)
if not args:
get = await e.get_reply_message()
args = get.text
if not args:
await e.edit("`Apa Yang Anda Lakukan Tuan ツ`")
return
string = ' '.join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
weebycharacter = weebyfont[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, weebycharacter)
await e.edit(string)
@register(outgoing=True, pattern=r"^\.clap(?: |$)(.*)")
async def claptext(memereview):
""" Praise people! """
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await memereview.edit("`Tuan, Mohon Balas Ke Pesan Orang Yang Ingin Anda Puji ツ`")
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
@register(outgoing=True, pattern=r"^\.teksbiru$")
async def bluetext(bt_e):
""" Believe me, you will find this useful. """
if await bt_e.get_reply_message() and bt_e.is_group:
await bt_e.edit(
"/TEKSBIRU /APAKAH /ANDA.\n"
"/SEDANG /GABUT /KARNA /TERTARIK /MELIHAT /TEKS /BIRU /PASTI /ANDA /BOSAN?")
@register(outgoing=True, pattern=r"^\.f (.*)")
async def payf(event):
paytext = event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8, paytext * 8, paytext * 2, paytext * 2, paytext * 2,
paytext * 6, paytext * 6, paytext * 2, paytext * 2, paytext * 2,
paytext * 2, paytext * 2)
await event.edit(pay)
@register(outgoing=True, pattern=r"^\.lfy (.*)")
async def let_me_google_that_for_you(lmgtfy_q):
textx = await lmgtfy_q.get_reply_message()
qry = lmgtfy_q.pattern_match.group(1)
if qry:
query = str(qry)
elif textx:
query = textx
query = query.message
query_encoded = query.replace(" ", "+")
lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}"
payload = {'format': 'json', 'url': lfy_url}
r = requests.get('http://is.gd/create.php', params=payload)
await lmgtfy_q.edit("Ini Dia, Bantu Dirimu Sendiri."
f"\n[{query}]({r.json()['shorturl']})")
@register(outgoing=True, pattern=r"^\.sayhi$")
async def sayhi(e):
await e.edit(
"\n💰💰💰💰💰💰💰💰💰💰💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷🔷🔷🔷🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰💰💰💰💰💰💰💰💰💰💰💰")
@register(pattern=r".scam(?: |$)(.*)", outgoing=True)
async def scam(event):
""" Just a small command to fake chat actions for fun !! """
options = [
'mengetik', 'kontak', 'game', 'lokasi', 'suara', 'bulat', 'video',
'foto', 'dokumen', 'batal'
]
input_str = event.pattern_match.group(1)
args = input_str.split()
if len(args) == 0: # Let bot decide action and time
scam_action = choice(options)
scam_time = randint(30, 60)
elif len(args) == 1: # User decides time/action, bot decides the other.
try:
scam_action = str(args[0]).lower()
scam_time = randint(30, 60)
except ValueError:
scam_action = choice(options)
scam_time = int(args[0])
elif len(args) == 2: # User decides both action and time
scam_action = str(args[0]).lower()
scam_time = int(args[1])
else:
await event.edit("`Tidak Valid`")
return
try:
if (scam_time > 300):
await event.delete()
async with event.client.action(event.chat_id, scam_action):
await sleep(scam_time)
except BaseException:
return
@register(pattern=r".type(?: |$)(.*)", outgoing=True)
async def typewriter(typew):
""" Just a small command to make your keyboard become a typewriter! """
textx = await typew.get_reply_message()
message = typew.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await typew.edit("`Berikan Sebuah Teks Untuk Type!`")
sleep_time = 0.03
typing_symbol = "|"
old_text = ""
await typew.edit(typing_symbol)
await sleep(sleep_time)
for character in message:
old_text = old_text + "" + character
typing_text = old_text + "" + typing_symbol
await typew.edit(typing_text)
await sleep(sleep_time)
await typew.edit(old_text)
await sleep(sleep_time)
@register(outgoing=True, pattern=r"^\.leave$")
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`Tuan Telah Meninggalkan Grup ツ`")
@register(outgoing=True, pattern=r"^\.fail$")
async def fail(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `"
"`\n████▌▄▌▄▐▐▌█████ `"
"`\n████▌▄▌▄▐▐▌▀████ `"
"`\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `")
@register(outgoing=True, pattern=r"^\.lol$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `"
"`\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `"
"`\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `"
"`\n╱┗━━━┛╰━━━╯┗━━━┛╱ `")
@register(outgoing=True, pattern=r"^\.rock$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮┈┈┈┈┈┈┈┈┈┈┈┈ `"
"`\n┈┃┃┈╭╮┈┏╮╭╮╭╮┃╭ `"
"`\n┈┃┃┈┃┃┈┣┫┃┃┃┈┣┫ `"
"`\n┈┃┣┳┫┃┈┃╰╰╯╰╯┃╰ `"
"`\n╭┻┻┻┫┃┈┈╭╮┃┃━┳━ `"
"`\n┃╱╭━╯┃┈┈┃┃┃┃┈┃┈ `"
"`\n╰╮╱╱╱┃┈┈╰╯╰╯┈┃┈ `")
@register(outgoing=True, pattern=r"^\.lool$")
async def lool(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`"
"`\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`"
"`\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `")
@register(outgoing=True, pattern=r"^\.stfu$")
async def stfu(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n██████████████████████████████`"
"`\n██▀▀▀▀████▀▀▀▀████▀▀▀▀▀███▀▀██▀▀█`"
"`\n█──────██──────██───────██──██──█`"
"`\n█──██▄▄████──████──███▄▄██──██──█`"
"`\n█▄────▀████──████────█████──██──█`"
"`\n█▀▀██──████──████──███████──██──█`"
"`\n█──────████──████──███████──────█`"
"`\n██▄▄▄▄█████▄▄████▄▄████████▄▄▄▄██`"
"`\n█████████████████████████████████`")
@register(outgoing=True, pattern=r"^\.gtfo$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n███████████████████████████████ `"
"`\n█▀▀▀▀▀▀▀█▀▀▀▀▀▀█▀▀▀▀▀▀▀█▀▀▀▀▀▀█ `"
"`\n█───────█──────█───────█──────█ `"
"`\n█──███──███──███──███▄▄█──██──█ `"
"`\n█──███▄▄███──███─────███──██──█ `"
"`\n█──██───███──███──██████──██──█ `"
"`\n█──▀▀▀──███──███──██████──────█ `"
"`\n█▄▄▄▄▄▄▄███▄▄███▄▄██████▄▄▄▄▄▄█ `"
"`\n███████████████████████████████ `")
@register(outgoing=True, pattern=r"^\.nih$")
async def nih(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n(\\_/)`"
"`\n(●_●)`"
"`\n />💖 *Ini Buat Kamu`"
"\n \n"
r"`(\_/)`"
"`\n(●_●)`"
"`\n💖<\\ *Tapi Bo'ong`")
@register(outgoing=True, pattern=r"^\.fag$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████`"
"`\n█▄█████▄█`"
"`\n█▼▼▼▼▼`"
"`\n█ STFU FAGGOT'S`"
"`\n█▲▲▲▲▲`"
"`\n█████████`"
"`\n ██ ██`")
@register(outgoing=True, pattern=r"^\.tai$")
async def taco(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n{\\__/}"
"\n(●_●)"
"\n( >💩 Mau Tai Ku?")
@register(outgoing=True, pattern=r"^\.paw$")
async def paw(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`(=ↀωↀ=)")
@register(outgoing=True, pattern=r"^\.tf$")
async def tf(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ ")
@register(outgoing=True, pattern=r"^\.gey$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈Lu Bau Hehe`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern=r"^\.gay$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈ANDA GAY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern=r"^\.bot$")
async def bot(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("` \n ╲╲╭━━━━╮ \n╭╮┃▆┈┈▆┃╭╮ \n┃╰┫▽▽▽┣╯┃ \n╰━┫△△△┣━╯`"
"`\n╲╲┃┈┈┈┈┃ \n╲╲┃┈┏┓┈┃ `")
@register(outgoing=True, pattern=r"^\.hey$")
async def hey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\n┈┈▕▂▂▂▂▂▂▏┃HEY!┊😀`"
"`\n┈┈▕▔▇▔▔┳▔▏╰┳╮HEY!┊\n┈┈▕╭━╰╯━╮▏━╯╰━━━\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`"
"`\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`")
@register(outgoing=True, pattern=r"^\.nou$")
async def nou(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮╭╮\n┈┃┃┃┃\n╭┻┗┻┗╮`"
"`\n┃┈▋┈▋┃\n┃┈╭▋━╮━╮\n┃┈┈╭╰╯╰╯╮`"
"`\n┫┈┈ NoU\n┃┈╰╰━━━━╯`"
"`\n┗━━┻━┛`")
@register(outgoing=True, pattern=r"^\.iwi(?: |$)(.*)")
async def faces(siwis):
""" IwI """
textx = await siwis.get_reply_message()
message = siwis.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await siwis.edit("` Anda Harus Memberikan Teks Ke IwI `")
return
reply_text = sub(r"(a|i|u|e|o)", "i", message)
reply_text = sub(r"(A|I|U|E|O)", "I", reply_text)
reply_text = sub(r"\!+", " " + choice(IWIS), reply_text)
reply_text += " " + choice(IWIS)
await siwis.edit(reply_text)
@register(outgoing=True, pattern="^.koc$")
async def koc(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8===✊D💦")
await e.edit("8==✊=D💦💦")
await e.edit("8=✊==D💦💦💦")
await e.edit("8✊===D💦💦💦💦")
await e.edit("8===✊D💦💦💦💦💦")
await e.edit("8==✊=D💦💦💦💦💦💦")
await e.edit("8=✊==D💦💦💦💦💦💦💦")
await e.edit("8✊===D💦💦💦💦💦💦💦💦")
await e.edit("8===✊D💦💦💦💦💦💦💦💦💦")
await e.edit("8==✊=D💦💦💦💦💦💦💦💦💦💦")
await e.edit("8=✊==D Lah Kok Habis?")
await e.edit("😭😭😭😭")
@register(outgoing=True, pattern="^.gas$")
async def gas(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("___________________🚑")
await e.edit("________________🚑___")
await e.edit("______________🚑_____")
await e.edit("___________🚑________")
await e.edit("________🚑___________")
await e.edit("_____🚑______________")
await e.edit("__🚑_________________")
await e.edit("🚑___________________")
await e.edit("_____________________")
await e.edit(choice(FACEREACTS))
@register(outgoing=True, pattern=r"^\.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern=r"^\.(?:penis|dick)\s?(.)?")
async def emoji_penis(e):
emoji = e.pattern_match.group(1)
titid = GAMBAR_TITIT
if emoji:
titid = titid.replace('😋', emoji)
await e.edit(titid)
@register(outgoing=True, pattern=r"^\.(?:kon|kontl)\s?(.)?")
async def emoji_kontl(e):
emoji = e.pattern_match.group(1)
kontl = GAMBAR_KONTL
if emoji:
kontl = kontl.replace('😂', emoji)
await e.edit(kontl)
@register(outgoing=True, pattern=r"^\.ok$")
async def emoji_oke(e):
emoji = e.pattern_match.group(1)
oke = GAMBAR_OK
if emoji:
oke = oke.replace('😂', emoji)
await e.edit(oke)
@register(outgoing=True, pattern=r"^\.skull$")
async def emoji_tengkorak(e):
emoji = e.pattern_match.group(1)
tengkorak = GAMBAR_TENGKORAK
if emoji:
tengkorak = tengkorak.replace('😂', emoji)
await e.edit(tengkorak)
CMD_HELP.update({
"memes":
">`.cowsay`"
"\nUsage: sapi yang mengatakan sesuatu."
"\n\n> .cp"
"\nUsage: Copy paste meme terkenal"
"\n\n>`.vapor`"
"\nUsage: Menguapkan semuanya!"
"\n\n>`.str`"
"\nUsage: Regangkan."
"\n\n>`.10iq`"
"\nUsage: Kamu mundur !!"
"\n\n>`.zal`"
"\nUsage: Munculkan perasaan kacau."
"\n\n>`.Oem`"
"\nPenggunaan: Oeeeem"
"\n\n>`.fp`"
"\nUsage: Telapak Tangan:P"
"\n\n>`.moon`"
"\nUsage: animasi bulan."
"\n\n>`.clock`"
"\nUsage: animasi jam."
"\n\n>`.hi`"
"\nUsage: Sapa semuanya!"
"\n\n>`.coinflip` <Kepala/Ekor>"
"\nUsage: Melempar koin !!"
"\n\n>`.owo`"
"\nUsage: UwU"
"\n\n>`.react`"
"\nUsage: Buat Userbot Anda bereaksi terhadap semuanya."
"\n\n>`.slap`"
"\nUsage: balas tampar mereka dengan benda acak !!"
"\n\n>`.cry`"
"\nUsage: jika kamu melakukan ini, aku akan menangis."
"\n\n>`.shg`"
"\nUsage: Angkat bahu!"
"\n\n>`.run`"
"\nUsage: Biarkan Aku Lari, Lari, LARI!"
"\n\n>`.chase`"
"\nUsage: Sebaiknya Anda mulai berlari"
"\n\n>`.metoo`"
"\nUsage: Haha ya"
"\n\n>`.mock`"
"\nUsage: Lakukan dan temukan kesenangan yang sesungguhnya."
"\n\n>`.clap`"
"\nUsage: Puji orang!"
"\n\n>`.f` <emoji/karakter>"
"\nUsage: F."
"\n\n>`.bt`"
"\nUsage: Percayalah, Anda akan menemukan ini berguna."
"\n\n>`.weeb`"
"\nUsage: Untuk Mengubah Teks Menjadi Weeb-ify."
"\n\n>`.type` <teks>"
"\nUsage: Hanya perintah kecil untuk membuat keyboard Anda menjadi mesin tik!"
"\n\n>`.lfy` <query>"
"\nUsage: Biar saya Google itu untuk Anda dengan cepat!"
"\n\n>`.decide` [Alternatif: (.yes, .no, .maybe)]"
"\nUsage: Buat keputusan cepat."
"\n\n> `.nou` `.bot` `.rock` `.gey` `.tf` `.paw` `.tai` `.nih`"
"\n> `.fag` `.gtfo`; `.stfu` `.lol` `.lool` `.fail` `.leave`"
"\n> `.iwi` `.sayhi` `.koc` `.gas` `.earth` `.love` `.rain`"
"\n> `.penis` `.emo` `.fuck` `.skull` `.monyet`\nUsage: Cobain aja"
"\n\n\n**Semoga Harimu Menyenangkan**\n➥ `Alvin`"
})
|
import math
import lavalink
import ksoftapi
import discord
from discord.ext import commands
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.kclient = bot.kclient
if not hasattr(bot, 'lavalink'):
bot.lavalink = lavalink.Client(bot.user.id)
bot.lavalink.add_node('localhost', 1616, 'proto', 'in', 'default-node') # Host, Port, Password, Region, Name
bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')
lavalink.add_event_hook(self.track_hook)
def cog_unload(self):
""" Cog unload handler. This removes any event hooks that were registered. """
self.bot.lavalink._event_hooks.clear()
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(error.original)
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild_id)
await self.connect_to(guild_id, None)
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
async def cog_before_invoke(self, ctx):
""" Command before-invoke handler. """
guild_check = ctx.guild is not None
if guild_check:
await self.ensure_voice(ctx)
# Ensure that the bot and command author share a mutual voicechannel.
return guild_check
async def ensure_voice(self, ctx):
""" This check ensures that the bot and command author are in the same voicechannel. """
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = ctx.command.name in ('play',)
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandInvokeError('Join a voice channel first :loud_sound:')
if not player.is_connected:
if not should_connect:
raise commands.CommandInvokeError('Not connected :mute:')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak: # Check user limit too?
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions. :disappointed_relieved:')
player.store('channel', ctx.channel.id)
await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id))
else:
if int(player.channel_id) != ctx.author.voice.channel.id:
raise commands.CommandInvokeError('You need to be in my voice channel :loud_sound:')
async def connect_to(self, guild_id: int, channel_id: str):
""" Connects to the given voicechannel ID. A channel_id of `None` means disconnect. """
ws = self.bot._connection._get_websocket(guild_id)
await ws.voice_state(str(guild_id), channel_id)
@commands.command(name='play', aliases=['p', 'sing'])
async def play(self, ctx, *, query):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not query.startswith('http'):
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Song not found :x: Please try again :mag_right:')
em = discord.Embed(colour=discord.Colour(0x59FFC8))
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
# Add all of the tracks from the playlist to the queue.
player.add(requester=ctx.author.id, track=track)
em.title = 'Playlist Enqueued!'
em.description = f'{results['playlistInfo']['name']} - {len(tracks)} tracks'
else:
track = results['tracks'][0]
em.title = 'Track Enqueued'
em.description = f'[{track['info']['title']}]({track['info']['uri']})'
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{track["info"]["identifier"]}/hqdefault.jpg")
em.add_field(name='Channel', value=track['info']['author'])
if track['info']['isStream']:
duration = 'Live'
else:
duration = lavalink.format_time(track['info']['length']).lstrip('00:')
em.add_field(name='Duration', value=duration)
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
msg = await ctx.send(embed=em)
if not player.is_playing:
await player.play()
await player.reset_equalizer()
await msg.delete(delay=1)
await self.now(ctx)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
@commands.command(name='seek')
async def seek(self, ctx, seconds=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if not seconds:
return await ctx.send('You need to specify the amount of seconds to seek :fast_forward:')
try:
track_time = player.position + int(seconds) * 1000
await player.seek(track_time)
except ValueError:
return await ctx.send('Specify valid amount of seconds :clock3:')
await ctx.send(f'Moved track to **{lavalink.format_time(track_time)}**')
@commands.command(name='skip', aliases=['forceskip', 'fs', 'next'])
async def skip(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
await ctx.send('⏭ | Skipped.')
await player.skip()
@commands.command(name='now', aliases=['current', 'currentsong', 'playing', 'np'])
async def now(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
song = 'Nothing'
if player.current:
if player.current.stream:
dur = 'LIVE'
pos = ''
count = total = 1
else:
count = player.position
pos = lavalink.format_time(count)
total = player.current.duration
dur = lavalink.format_time(total)
if pos == dur: # When called immediatly after enqueue
count = 0
pos = '00:00:00'
dur = dur.lstrip('00:')
pos = pos[-len(dur):]
bar_len = 30 # bar length
filled_len = int(bar_len * count // float(total))
bar = '═' * filled_len + '◈' + '─' * (bar_len - filled_len)
song = f'[{player.current.title}]({player.current.uri})\n`{pos} {bar} {dur}`'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
requester = ctx.guild.get_member(player.current.requester)
em.set_footer(text=f"Requested by: {requester}", icon_url=requester.avatar_url)
await ctx.send(embed=em)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='save', aliases=['star'])
async def savetodm(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if player.current:
if player.current.stream:
dur = 'Live'
else:
dur = lavalink.format_time(player.current.duration).lstrip('00:')
song = f'[{player.current.title}]({player.current.uri})'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
em.add_field(name='Channel', value=player.current.author)
em.add_field(name='Duration', value=dur)
user = ctx.author
await user.send(embed=em)
await ctx.send(f"Current song has been sent to you {ctx.author.mention} :floppy_disk:")
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='queue', aliases=['q', 'playlist'])
async def queue(self, ctx, page: int=1):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Queue empty! Why not queue something? :cd:')
items_per_page = 10
pages = math.ceil(len(player.queue) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue_list = ''
for i, track in enumerate(player.queue[start:end], start=start):
queue_list += f'`{i + 1}.` [**{track.title}**]({track.uri})\n'
embed = discord.Embed(colour=ctx.guild.me.top_role.colour,
description=f'**{len(player.queue)} tracks**\n\n{queue_list}')
embed.set_footer(text=f'Viewing page {page}/{pages}')
await ctx.send(embed=embed)
@commands.command(name='pause', aliases=['resume'])
async def pause(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if player.paused:
await player.set_pause(False)
await ctx.message.add_reaction('▶')
else:
await player.set_pause(True)
await ctx.message.add_reaction('⏸')
@commands.command(name='volume', aliases=['vol'])
async def volume(self, ctx, volume: int=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not volume:
return await ctx.send(f'🔈 | {player.volume}%')
await player.set_volume(volume)
await ctx.send(f'🔈 | Set to {player.volume}%')
@commands.command(name='shuffle')
async def shuffle(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.shuffle = not player.shuffle
await ctx.send('🔀 | Shuffle ' + ('enabled' if player.shuffle else 'disabled'))
@commands.command(name='repeat')
async def repeat(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.repeat = not player.repeat
await ctx.send('🔁 | Repeat ' + ('enabled' if player.repeat else 'disabled'))
@commands.command(name='remove', aliases=['dequeue', 'pop'])
async def remove(self, ctx, index: int):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Nothing queued :cd:')
if index > len(player.queue) or index < 1:
return await ctx.send('Index has to be >=1 and <=queue size')
index = index - 1
removed = player.queue.pop(index)
await ctx.send('Removed **' + removed.title + '** from the queue.')
@commands.command(name='disconnect', aliases=['dis', 'stop', 'leave'])
async def disconnect(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
return await ctx.send('You\'re not in my voice channel :loud_sound:')
if not player.is_connected:
return await ctx.send('Not connected :mute:')
player.queue.clear()
# Stop the current track so Lavalink consumes less resources.
await player.stop()
# Disconnect from the voice channel.
await self.connect_to(ctx.guild.id, None)
await ctx.send('Disconnected :mute:')
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
@commands.command(name='lyrics', aliases=['ly'])
async def get_lyrics(self, ctx, *, query: str=""):
"""Get lyrics of current song"""
if not query:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('I\'m not currently playing anything :warning:')
query = player.current.title
try:
async with ctx.typing():
results = await self.kclient.music.lyrics(query, limit=1)
except ksoftapi.NoResults:
await ctx.send(f'No lyrics found for `{query}`')
else:
lyrics = results[0].lyrics
result = results[0]
embed = discord.Embed(title=f'{result.name} - {result.artist}', color=discord.Color(0xCCFF00), description=lyrics[:2048])
embed.set_thumbnail(url=result.album_art)
embed.set_author(name="Lyrics:")
lyrics = lyrics[2048:]
embeds = [embed] # create embeds' list for long lyrics
while len(lyrics) > 0 and len(embeds) < 10: # limiting embeds to 10
embed = discord.Embed(color=discord.Color(0xCCFF00), description=lyrics[:2048])
lyrics = lyrics[len(embeds)*2048:]
embeds.append(embed)
embeds[-1].set_footer(text="Source: KSoft.Si") # set footer for last embed
for embed in embeds:
await ctx.send(embed=embed)
@commands.command(name='equalizer', aliases=['eq'])
async def equalizer(self, ctx, *args):
"""Equalizer"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if len(args) == 0:
await ctx.send('Specify `band gain` or `preset` to change frequencies :control_knobs:')
elif len(args) == 1:
presets ={
'reset': 'Default',
'bassboost': [0.08, 0.12, 0.2, 0.18, 0.15, 0.1, 0.05, 0.0, 0.02, -0.04, -0.06, -0.08, -0.10, -0.12, -0.14],
'jazz': [-0.13, -0.11, -0.1, -0.1, 0.14, 0.2, -0.18, 0.0, 0.24, 0.22, 0.2, 0.0, 0.0, 0.0, 0.0],
'pop': [-0.02, -0.01, 0.08, 0.1, 0.15, 0.1, 0.03, -0.02, -0.035, -0.05, -0.05, -0.05, -0.05, -0.05, -0.05],
'treble': [-0.1, -0.12, -0.12, -0.12, -0.08, -0.04, 0.0, 0.3, 0.34, 0.4, 0.35, 0.3, 0.3, 0.3, 0.3]
}
preset = args[0].lower()
if preset in ['reset', 'default']:
await player.reset_equalizer()
elif preset in presets:
gain_list = enumerate(presets[preset])
await player.set_gains(*gain_list)
elif preset == '--list':
em = discord.Embed(title=':control_knobs: EQ presets:', color=discord.Color(0xFF6EFF), description='\n'.join(presets.keys()))
return await ctx.send(embed=em)
else:
return await ctx.send('Invalid preset specified :control_knobs:\nType `~eq --list` for all presets')
elif len(args) == 2:
try:
band = int(args[0])
gain = float(args[1])
await player.set_gain(band, gain)
except ValueError:
return await ctx.send('Specify valid `band gain` values :control_knobs:')
else:
return await ctx.send('Specify `band gain` or `preset` :control_knobs:')
# Print final EQ settings
eq_frequencies = [f"`{gain}`" for gain in player.equalizer]
await ctx.send(":level_slider: Current Values:\n" + ' '.join(eq_frequencies))
def setup(bot):
bot.add_cog(Music(bot)) | import math
import lavalink
import ksoftapi
import discord
from discord.ext import commands
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.kclient = bot.kclient
if not hasattr(bot, 'lavalink'):
bot.lavalink = lavalink.Client(bot.user.id)
bot.lavalink.add_node('localhost', 1616, 'proto', 'in', 'default-node') # Host, Port, Password, Region, Name
bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')
lavalink.add_event_hook(self.track_hook)
def cog_unload(self):
""" Cog unload handler. This removes any event hooks that were registered. """
self.bot.lavalink._event_hooks.clear()
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(error.original)
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild_id)
await self.connect_to(guild_id, None)
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
async def cog_before_invoke(self, ctx):
""" Command before-invoke handler. """
guild_check = ctx.guild is not None
if guild_check:
await self.ensure_voice(ctx)
# Ensure that the bot and command author share a mutual voicechannel.
return guild_check
async def ensure_voice(self, ctx):
""" This check ensures that the bot and command author are in the same voicechannel. """
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = ctx.command.name in ('play',)
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandInvokeError('Join a voice channel first :loud_sound:')
if not player.is_connected:
if not should_connect:
raise commands.CommandInvokeError('Not connected :mute:')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak: # Check user limit too?
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions. :disappointed_relieved:')
player.store('channel', ctx.channel.id)
await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id))
else:
if int(player.channel_id) != ctx.author.voice.channel.id:
raise commands.CommandInvokeError('You need to be in my voice channel :loud_sound:')
async def connect_to(self, guild_id: int, channel_id: str):
""" Connects to the given voicechannel ID. A channel_id of `None` means disconnect. """
ws = self.bot._connection._get_websocket(guild_id)
await ws.voice_state(str(guild_id), channel_id)
@commands.command(name='play', aliases=['p', 'sing'])
async def play(self, ctx, *, query):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not query.startswith('http'):
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Song not found :x: Please try again :mag_right:')
em = discord.Embed(colour=discord.Colour(0x59FFC8))
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
# Add all of the tracks from the playlist to the queue.
player.add(requester=ctx.author.id, track=track)
em.title = 'Playlist Enqueued!'
em.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} tracks'
else:
track = results['tracks'][0]
em.title = 'Track Enqueued'
em.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})'
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{track['info']['identifier']}/hqdefault.jpg")
em.add_field(name='Channel', value=track['info']['author'])
if track['info']['isStream']:
duration = 'Live'
else:
duration = lavalink.format_time(track['info']['length']).lstrip('00:')
em.add_field(name='Duration', value=duration)
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
msg = await ctx.send(embed=em)
if not player.is_playing:
await player.play()
await player.reset_equalizer()
await msg.delete(delay=1)
await self.now(ctx)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
@commands.command(name='seek')
async def seek(self, ctx, seconds=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if not seconds:
return await ctx.send('You need to specify the amount of seconds to seek :fast_forward:')
try:
track_time = player.position + int(seconds) * 1000
await player.seek(track_time)
except ValueError:
return await ctx.send('Specify valid amount of seconds :clock3:')
await ctx.send(f'Moved track to **{lavalink.format_time(track_time)}**')
@commands.command(name='skip', aliases=['forceskip', 'fs', 'next'])
async def skip(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
await ctx.send('⏭ | Skipped.')
await player.skip()
@commands.command(name='now', aliases=['current', 'currentsong', 'playing', 'np'])
async def now(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
song = 'Nothing'
if player.current:
if player.current.stream:
dur = 'LIVE'
pos = ''
count = total = 1
else:
count = player.position
pos = lavalink.format_time(count)
total = player.current.duration
dur = lavalink.format_time(total)
if pos == dur: # When called immediatly after enqueue
count = 0
pos = '00:00:00'
dur = dur.lstrip('00:')
pos = pos[-len(dur):]
bar_len = 30 # bar length
filled_len = int(bar_len * count // float(total))
bar = '═' * filled_len + '◈' + '─' * (bar_len - filled_len)
song = f'[{player.current.title}]({player.current.uri})\n`{pos} {bar} {dur}`'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
requester = ctx.guild.get_member(player.current.requester)
em.set_footer(text=f"Requested by: {requester}", icon_url=requester.avatar_url)
await ctx.send(embed=em)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='save', aliases=['star'])
async def savetodm(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if player.current:
if player.current.stream:
dur = 'Live'
else:
dur = lavalink.format_time(player.current.duration).lstrip('00:')
song = f'[{player.current.title}]({player.current.uri})'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
em.add_field(name='Channel', value=player.current.author)
em.add_field(name='Duration', value=dur)
user = ctx.author
await user.send(embed=em)
await ctx.send(f"Current song has been sent to you {ctx.author.mention} :floppy_disk:")
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='queue', aliases=['q', 'playlist'])
async def queue(self, ctx, page: int=1):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Queue empty! Why not queue something? :cd:')
items_per_page = 10
pages = math.ceil(len(player.queue) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue_list = ''
for i, track in enumerate(player.queue[start:end], start=start):
queue_list += f'`{i + 1}.` [**{track.title}**]({track.uri})\n'
embed = discord.Embed(colour=ctx.guild.me.top_role.colour,
description=f'**{len(player.queue)} tracks**\n\n{queue_list}')
embed.set_footer(text=f'Viewing page {page}/{pages}')
await ctx.send(embed=embed)
@commands.command(name='pause', aliases=['resume'])
async def pause(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if player.paused:
await player.set_pause(False)
await ctx.message.add_reaction('▶')
else:
await player.set_pause(True)
await ctx.message.add_reaction('⏸')
@commands.command(name='volume', aliases=['vol'])
async def volume(self, ctx, volume: int=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not volume:
return await ctx.send(f'🔈 | {player.volume}%')
await player.set_volume(volume)
await ctx.send(f'🔈 | Set to {player.volume}%')
@commands.command(name='shuffle')
async def shuffle(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.shuffle = not player.shuffle
await ctx.send('🔀 | Shuffle ' + ('enabled' if player.shuffle else 'disabled'))
@commands.command(name='repeat')
async def repeat(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.repeat = not player.repeat
await ctx.send('🔁 | Repeat ' + ('enabled' if player.repeat else 'disabled'))
@commands.command(name='remove', aliases=['dequeue', 'pop'])
async def remove(self, ctx, index: int):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Nothing queued :cd:')
if index > len(player.queue) or index < 1:
return await ctx.send('Index has to be >=1 and <=queue size')
index = index - 1
removed = player.queue.pop(index)
await ctx.send('Removed **' + removed.title + '** from the queue.')
@commands.command(name='disconnect', aliases=['dis', 'stop', 'leave'])
async def disconnect(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
return await ctx.send('You\'re not in my voice channel :loud_sound:')
if not player.is_connected:
return await ctx.send('Not connected :mute:')
player.queue.clear()
# Stop the current track so Lavalink consumes less resources.
await player.stop()
# Disconnect from the voice channel.
await self.connect_to(ctx.guild.id, None)
await ctx.send('Disconnected :mute:')
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
@commands.command(name='lyrics', aliases=['ly'])
async def get_lyrics(self, ctx, *, query: str=""):
"""Get lyrics of current song"""
if not query:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('I\'m not currently playing anything :warning:')
query = player.current.title
try:
async with ctx.typing():
results = await self.kclient.music.lyrics(query, limit=1)
except ksoftapi.NoResults:
await ctx.send(f'No lyrics found for `{query}`')
else:
lyrics = results[0].lyrics
result = results[0]
embed = discord.Embed(title=f'{result.name} - {result.artist}', color=discord.Color(0xCCFF00), description=lyrics[:2048])
embed.set_thumbnail(url=result.album_art)
embed.set_author(name="Lyrics:")
lyrics = lyrics[2048:]
embeds = [embed] # create embeds' list for long lyrics
while len(lyrics) > 0 and len(embeds) < 10: # limiting embeds to 10
embed = discord.Embed(color=discord.Color(0xCCFF00), description=lyrics[:2048])
lyrics = lyrics[len(embeds)*2048:]
embeds.append(embed)
embeds[-1].set_footer(text="Source: KSoft.Si") # set footer for last embed
for embed in embeds:
await ctx.send(embed=embed)
@commands.command(name='equalizer', aliases=['eq'])
async def equalizer(self, ctx, *args):
"""Equalizer"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if len(args) == 0:
await ctx.send('Specify `band gain` or `preset` to change frequencies :control_knobs:')
elif len(args) == 1:
presets ={
'reset': 'Default',
'bassboost': [0.08, 0.12, 0.2, 0.18, 0.15, 0.1, 0.05, 0.0, 0.02, -0.04, -0.06, -0.08, -0.10, -0.12, -0.14],
'jazz': [-0.13, -0.11, -0.1, -0.1, 0.14, 0.2, -0.18, 0.0, 0.24, 0.22, 0.2, 0.0, 0.0, 0.0, 0.0],
'pop': [-0.02, -0.01, 0.08, 0.1, 0.15, 0.1, 0.03, -0.02, -0.035, -0.05, -0.05, -0.05, -0.05, -0.05, -0.05],
'treble': [-0.1, -0.12, -0.12, -0.12, -0.08, -0.04, 0.0, 0.3, 0.34, 0.4, 0.35, 0.3, 0.3, 0.3, 0.3]
}
preset = args[0].lower()
if preset in ['reset', 'default']:
await player.reset_equalizer()
elif preset in presets:
gain_list = enumerate(presets[preset])
await player.set_gains(*gain_list)
elif preset == '--list':
em = discord.Embed(title=':control_knobs: EQ presets:', color=discord.Color(0xFF6EFF), description='\n'.join(presets.keys()))
return await ctx.send(embed=em)
else:
return await ctx.send('Invalid preset specified :control_knobs:\nType `~eq --list` for all presets')
elif len(args) == 2:
try:
band = int(args[0])
gain = float(args[1])
await player.set_gain(band, gain)
except ValueError:
return await ctx.send('Specify valid `band gain` values :control_knobs:')
else:
return await ctx.send('Specify `band gain` or `preset` :control_knobs:')
# Print final EQ settings
eq_frequencies = [f"`{gain}`" for gain in player.equalizer]
await ctx.send(":level_slider: Current Values:\n" + ' '.join(eq_frequencies))
def setup(bot):
bot.add_cog(Music(bot)) |
from werkzeug.wrappers import Request
from flask import Flask, redirect, url_for, request, flash
from flask_sqlalchemy import SQLAlchemy
import os
import requests
import random
from contact_form import ContactForm
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.contrib.gitlab import make_gitlab_blueprint, gitlab
from discord_webhook import DiscordWebhook
import flask
from os import path
from flask_dance.consumer import oauth_authorized
app = Flask(__name__, template_folder="templates", static_folder='static')
# Various environmental variables
app.secret_key = os.environ.get("FLASK_SECRET")
discord_url = os.environ.get("WEBHOOK")
FLASK_HOST = os.environ.get("FLASK_HOST")
app.config["GITHUB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITHUB_CLIENT_ID")
app.config["GITHUB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITHUB_SECRET")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Github blueprint
github_bp = make_github_blueprint()
github_bp.redirect_url = FLASK_HOST+"/docs"
app.register_blueprint(github_bp, url_prefix="/login")
app.config["GITLAB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITLAB_ID")
app.config["GITLAB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITLAB_SECRET")
gitlab_bp = make_gitlab_blueprint()
app.register_blueprint(gitlab_bp, url_prefix="/login")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Database model & connection
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite"
db = SQLAlchemy(app)
git_token = os.environ.get("GITHUB_TOKEN")
print(git_token)
@oauth_authorized.connect
def redirect_to_docs(blueprint, token):
blueprint.token = token
user = []
git_hash = []
resp = github.get("/user")
user = User.query.filter_by(username=resp.json()['login']).first()
if not user:
user = User(username=resp.json()['login'],
github_hash=str(random.getrandbits(128)))
db.session.add(user)
db.session.commit()
DiscordWebhook(url=discord_url, content=f"New user: {resp.json()["login"]}. Check out profile at https://github.com/{resp.json()["login"]}").execute()
git_hash = user.github_hash
return redirect(f"/docs?username={resp.json()["login"]}&token={git_hash}")
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
github_hash = db.Column(db.String(80), unique=True, nullable=True)
# gitlab_hash = db.Column(db.String(80), unique=True, nullable=True)
def __repr__(self):
return '<User %r>' % self.username
if path.exists("db.sqlite") == True:
print("Database exists")
else:
print("Creating database")
db.create_all()
# Routing and repository parsing
@app.route("/signup")
def signup():
resp = github.get("/user")
if not github.authorized:
return redirect(url_for("github.login"))
print(resp)
assert resp.ok
user = User.query.filter_by(username=resp.json()['login']).first()
username = resp.json()['login']
github_hash = user.github_hash
return redirect(f"/docs?username={username}&token={github_hash}")
def parseGithubRepos(repos):
parsedRepos = []
displayForks = request.args.get('forks')
for repo in repos:
parsedRepo = {
'name': repo['full_name'],
'description': repo['description'],
'issues': repo['open_issues'],
'owner': repo['owner']['login'],
'stars': repo['stargazers_count'],
'forks': repo['forks_count'],
'url': repo['html_url'],
'size': repo['size'],
'language': repo['language']
}
if parsedRepo['description'] == None:
parsedRepo['description'] = "No description provided"
if displayForks == 'hidden':
if repo['fork'] == False:
parsedRepos.append(parsedRepo)
else:
parsedRepos.append(parsedRepo)
# if repo['fork'] == False: parsedRepos.append(parsedRepo)
parsedRepos.sort(key=lambda repo: repo["stars"], reverse=True)
return parsedRepos
@app.route("/widget/<username>")
def thing(username):
token = request.args.get('token')
db.session.commit()
user = User.query.filter_by(username=username).first()
resp = {}
theme = request.args.get('theme')
if theme != 'dark': theme = 'light'
if user == None:
return "User not found"
else:
repos = []
if user.github_hash == token:
page = 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page=1", auth=("Uzay-G", git_token)).json()
while resp != []:
print(resp, "\n\n\n")
repos += parseGithubRepos(resp)
page += 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page={page}", auth=("Uzay-G", git_token)).json()
if type(resp) is dict:
return f'ERROR: {resp['message']}'
return flask.render_template('widget.html', repos=repos, theme=theme)
else:
return "You do not have a valid api token"
@app.route("/")
def serveMain():
form = ContactForm()
return flask.render_template('index.html', form=form)
@app.route("/docs")
def docs():
form = ContactForm()
return flask.render_template('docs.html', username=request.args.get('username'), token=request.args.get("token"), hostname=FLASK_HOST, form=form)
@app.route("/contact", methods=['POST'])
def contact():
form = ContactForm()
if form.validate_on_submit():
flash('Your message was received')
DiscordWebhook(url=discord_url, content=f"Contact @hackathon: name: {form.name.data}, email: {form.email.data}, message: {form.message.data}").execute()
else:
flash('Your message was not transferred correctly.')
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
# @app.route("/signup_gitlab")
# def signup_gitlab():
# resp = gitlab.get("/user")
# if not gitlab.authorized:
# return redirect(url_for("gitlab.login"))
# print(resp)
# assert resp.ok
# user = User.query.filter_by(username=resp.json()['login']).first()
# username = resp.json()['login']
# gitlab_hash = user.gitlab_hash
# return redirect(f"/docs?username={username}&token={gitlab_hash}")
# def getGitlabRepoLanguage(repo):
# resp = requests.get(f"https://gitlab.com/api/v4/projects/{repo["id"]}/languages").json()
# return next(iter(resp))
# def parseGitlabRepos(repos):
# parsedRepos = []
# for repo in repos:
# parsedRepo = {}
# parsedRepo['name'] = repo['name']
# if repo['description'] == None:
# parsedRepo['description'] = "No description provided"
# else:
# parsedRepo['description'] = repo['description']
# try:
# parsedRepo['issues'] = repo['open_issues_count']
# except:
# parsedRepo['issues'] = 0
# parsedRepo['owner'] = repo['namespace']['name']
# parsedRepo['stars'] = repo['star_count']
# parsedRepo['forks'] = repo['forks_count']
# parsedRepo['url'] = repo['web_url']
# try:
# parsedRepo['size'] = repo['statistics']['repository_size'],
# except:
# parsedRepo['size'] = None
# parsedRepo['language'] = getGitlabRepoLanguage(repo)
# parsedRepos.append(parsedRepo)
# return parsedRepos
| from werkzeug.wrappers import Request
from flask import Flask, redirect, url_for, request, flash
from flask_sqlalchemy import SQLAlchemy
import os
import requests
import random
from contact_form import ContactForm
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.contrib.gitlab import make_gitlab_blueprint, gitlab
from discord_webhook import DiscordWebhook
import flask
from os import path
from flask_dance.consumer import oauth_authorized
app = Flask(__name__, template_folder="templates", static_folder='static')
# Various environmental variables
app.secret_key = os.environ.get("FLASK_SECRET")
discord_url = os.environ.get("WEBHOOK")
FLASK_HOST = os.environ.get("FLASK_HOST")
app.config["GITHUB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITHUB_CLIENT_ID")
app.config["GITHUB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITHUB_SECRET")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Github blueprint
github_bp = make_github_blueprint()
github_bp.redirect_url = FLASK_HOST+"/docs"
app.register_blueprint(github_bp, url_prefix="/login")
app.config["GITLAB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITLAB_ID")
app.config["GITLAB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITLAB_SECRET")
gitlab_bp = make_gitlab_blueprint()
app.register_blueprint(gitlab_bp, url_prefix="/login")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Database model & connection
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite"
db = SQLAlchemy(app)
git_token = os.environ.get("GITHUB_TOKEN")
print(git_token)
@oauth_authorized.connect
def redirect_to_docs(blueprint, token):
blueprint.token = token
user = []
git_hash = []
resp = github.get("/user")
user = User.query.filter_by(username=resp.json()['login']).first()
if not user:
user = User(username=resp.json()['login'],
github_hash=str(random.getrandbits(128)))
db.session.add(user)
db.session.commit()
DiscordWebhook(url=discord_url, content=f"New user: {resp.json()['login']}. Check out profile at https://github.com/{resp.json()['login']}").execute()
git_hash = user.github_hash
return redirect(f"/docs?username={resp.json()['login']}&token={git_hash}")
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
github_hash = db.Column(db.String(80), unique=True, nullable=True)
# gitlab_hash = db.Column(db.String(80), unique=True, nullable=True)
def __repr__(self):
return '<User %r>' % self.username
if path.exists("db.sqlite") == True:
print("Database exists")
else:
print("Creating database")
db.create_all()
# Routing and repository parsing
@app.route("/signup")
def signup():
resp = github.get("/user")
if not github.authorized:
return redirect(url_for("github.login"))
print(resp)
assert resp.ok
user = User.query.filter_by(username=resp.json()['login']).first()
username = resp.json()['login']
github_hash = user.github_hash
return redirect(f"/docs?username={username}&token={github_hash}")
def parseGithubRepos(repos):
parsedRepos = []
displayForks = request.args.get('forks')
for repo in repos:
parsedRepo = {
'name': repo['full_name'],
'description': repo['description'],
'issues': repo['open_issues'],
'owner': repo['owner']['login'],
'stars': repo['stargazers_count'],
'forks': repo['forks_count'],
'url': repo['html_url'],
'size': repo['size'],
'language': repo['language']
}
if parsedRepo['description'] == None:
parsedRepo['description'] = "No description provided"
if displayForks == 'hidden':
if repo['fork'] == False:
parsedRepos.append(parsedRepo)
else:
parsedRepos.append(parsedRepo)
# if repo['fork'] == False: parsedRepos.append(parsedRepo)
parsedRepos.sort(key=lambda repo: repo["stars"], reverse=True)
return parsedRepos
@app.route("/widget/<username>")
def thing(username):
token = request.args.get('token')
db.session.commit()
user = User.query.filter_by(username=username).first()
resp = {}
theme = request.args.get('theme')
if theme != 'dark': theme = 'light'
if user == None:
return "User not found"
else:
repos = []
if user.github_hash == token:
page = 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page=1", auth=("Uzay-G", git_token)).json()
while resp != []:
print(resp, "\n\n\n")
repos += parseGithubRepos(resp)
page += 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page={page}", auth=("Uzay-G", git_token)).json()
if type(resp) is dict:
return f'ERROR: {resp["message"]}'
return flask.render_template('widget.html', repos=repos, theme=theme)
else:
return "You do not have a valid api token"
@app.route("/")
def serveMain():
form = ContactForm()
return flask.render_template('index.html', form=form)
@app.route("/docs")
def docs():
form = ContactForm()
return flask.render_template('docs.html', username=request.args.get('username'), token=request.args.get("token"), hostname=FLASK_HOST, form=form)
@app.route("/contact", methods=['POST'])
def contact():
form = ContactForm()
if form.validate_on_submit():
flash('Your message was received')
DiscordWebhook(url=discord_url, content=f"Contact @hackathon: name: {form.name.data}, email: {form.email.data}, message: {form.message.data}").execute()
else:
flash('Your message was not transferred correctly.')
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
# @app.route("/signup_gitlab")
# def signup_gitlab():
# resp = gitlab.get("/user")
# if not gitlab.authorized:
# return redirect(url_for("gitlab.login"))
# print(resp)
# assert resp.ok
# user = User.query.filter_by(username=resp.json()['login']).first()
# username = resp.json()['login']
# gitlab_hash = user.gitlab_hash
# return redirect(f"/docs?username={username}&token={gitlab_hash}")
# def getGitlabRepoLanguage(repo):
# resp = requests.get(f"https://gitlab.com/api/v4/projects/{repo['id']}/languages").json()
# return next(iter(resp))
# def parseGitlabRepos(repos):
# parsedRepos = []
# for repo in repos:
# parsedRepo = {}
# parsedRepo['name'] = repo['name']
# if repo['description'] == None:
# parsedRepo['description'] = "No description provided"
# else:
# parsedRepo['description'] = repo['description']
# try:
# parsedRepo['issues'] = repo['open_issues_count']
# except:
# parsedRepo['issues'] = 0
# parsedRepo['owner'] = repo['namespace']['name']
# parsedRepo['stars'] = repo['star_count']
# parsedRepo['forks'] = repo['forks_count']
# parsedRepo['url'] = repo['web_url']
# try:
# parsedRepo['size'] = repo['statistics']['repository_size'],
# except:
# parsedRepo['size'] = None
# parsedRepo['language'] = getGitlabRepoLanguage(repo)
# parsedRepos.append(parsedRepo)
# return parsedRepos
|
from colab_ssh.utils.packages.installer import create_deb_installer
from colab_ssh.utils.ui.render_html import render_template
from subprocess import Popen, PIPE
import shlex
from colab_ssh._command import run_command, run_with_pipe
import os
import time
from colab_ssh.get_tunnel_config import get_argo_tunnel_config
from .utils.expose_env_variable import expose_env_variable
import importlib
import sys
import signal
deb_install = create_deb_installer()
def launch_ssh_cloudflared(
password="",
verbose=False,
prevent_interrupt=False,
kill_other_processes=False):
# Kill any cloudflared process if running
if kill_other_processes:
os.system("kill -9 $(ps aux | grep 'cloudflared' | awk '{print $2}')")
# Download cloudflared
if not os.path.isfile("cloudflared"):
run_command(
"wget -q -nc https://bin.equinox.io/c/VdrWdbjqyF/cloudflared-stable-linux-amd64.tgz")
run_command("tar zxf cloudflared-stable-linux-amd64.tgz")
else:
if verbose:
print("DEBUG: Skipping cloudflared installation")
# Install the openssh server
deb_install("openssh-server", verbose=verbose)
# Set the password
run_with_pipe("echo root:{} | chpasswd".format(password))
# Configure the openSSH server
run_command("mkdir -p /var/run/sshd")
os.system("echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config")
if password:
os.system('echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config')
expose_env_variable("LD_LIBRARY_PATH")
expose_env_variable("COLAB_TPU_ADDR")
expose_env_variable("COLAB_GPU")
expose_env_variable("TBE_CREDS_ADDR")
expose_env_variable("TF_FORCE_GPU_ALLOW_GROWTH")
expose_env_variable("TPU_NAME")
expose_env_variable("XRT_TPU_CONFIG")
os.system('service ssh start')
extra_params = []
info = None
# Prepare the cloudflared command
popen_command = f'./cloudflared tunnel --url ssh://localhost:22 --logfile ./cloudflared.log --metrics localhost:45678 {' '.join(extra_params)}'
preexec_fn = None
if prevent_interrupt:
popen_command = 'nohup ' + popen_command
preexec_fn = os.setpgrp
popen_command = shlex.split(popen_command)
# Initial sleep time
sleep_time = 2.0
# Create tunnel and retry if failed
for i in range(10):
proc = Popen(popen_command, stdout=PIPE, preexec_fn=preexec_fn)
if verbose:
print(f"DEBUG: Cloudflared process: PID={proc.pid}")
time.sleep(sleep_time)
try:
info = get_argo_tunnel_config()
break
except Exception as e:
os.kill(proc.pid, signal.SIGKILL)
if verbose:
print(f"DEBUG: Exception: {e.args[0]}")
print(f"DEBUG: Killing {proc.pid}. Retrying...")
# Increase the sleep time and try again
sleep_time *= 1.5
if verbose:
print("DEBUG:", info)
if info:
return info
else:
print(proc.stdout.readlines())
raise Exception(
"It looks like something went wrong, please make sure your token is valid")
proc.stdout.close()
| from colab_ssh.utils.packages.installer import create_deb_installer
from colab_ssh.utils.ui.render_html import render_template
from subprocess import Popen, PIPE
import shlex
from colab_ssh._command import run_command, run_with_pipe
import os
import time
from colab_ssh.get_tunnel_config import get_argo_tunnel_config
from .utils.expose_env_variable import expose_env_variable
import importlib
import sys
import signal
deb_install = create_deb_installer()
def launch_ssh_cloudflared(
password="",
verbose=False,
prevent_interrupt=False,
kill_other_processes=False):
# Kill any cloudflared process if running
if kill_other_processes:
os.system("kill -9 $(ps aux | grep 'cloudflared' | awk '{print $2}')")
# Download cloudflared
if not os.path.isfile("cloudflared"):
run_command(
"wget -q -nc https://bin.equinox.io/c/VdrWdbjqyF/cloudflared-stable-linux-amd64.tgz")
run_command("tar zxf cloudflared-stable-linux-amd64.tgz")
else:
if verbose:
print("DEBUG: Skipping cloudflared installation")
# Install the openssh server
deb_install("openssh-server", verbose=verbose)
# Set the password
run_with_pipe("echo root:{} | chpasswd".format(password))
# Configure the openSSH server
run_command("mkdir -p /var/run/sshd")
os.system("echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config")
if password:
os.system('echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config')
expose_env_variable("LD_LIBRARY_PATH")
expose_env_variable("COLAB_TPU_ADDR")
expose_env_variable("COLAB_GPU")
expose_env_variable("TBE_CREDS_ADDR")
expose_env_variable("TF_FORCE_GPU_ALLOW_GROWTH")
expose_env_variable("TPU_NAME")
expose_env_variable("XRT_TPU_CONFIG")
os.system('service ssh start')
extra_params = []
info = None
# Prepare the cloudflared command
popen_command = f'./cloudflared tunnel --url ssh://localhost:22 --logfile ./cloudflared.log --metrics localhost:45678 {" ".join(extra_params)}'
preexec_fn = None
if prevent_interrupt:
popen_command = 'nohup ' + popen_command
preexec_fn = os.setpgrp
popen_command = shlex.split(popen_command)
# Initial sleep time
sleep_time = 2.0
# Create tunnel and retry if failed
for i in range(10):
proc = Popen(popen_command, stdout=PIPE, preexec_fn=preexec_fn)
if verbose:
print(f"DEBUG: Cloudflared process: PID={proc.pid}")
time.sleep(sleep_time)
try:
info = get_argo_tunnel_config()
break
except Exception as e:
os.kill(proc.pid, signal.SIGKILL)
if verbose:
print(f"DEBUG: Exception: {e.args[0]}")
print(f"DEBUG: Killing {proc.pid}. Retrying...")
# Increase the sleep time and try again
sleep_time *= 1.5
if verbose:
print("DEBUG:", info)
if info:
return info
else:
print(proc.stdout.readlines())
raise Exception(
"It looks like something went wrong, please make sure your token is valid")
proc.stdout.close()
|
import logging
import os
import subprocess
import tempfile
from argparse import Namespace
from pathlib import Path
from .error import EvalError
from .manifest import Repo, load_manifest, update_lock_file
from .path import EVALREPO_PATH, LOCK_PATH, MANIFEST_PATH, nixpkgs_path
from .prefetch import prefetch
logger = logging.getLogger(__name__)
def eval_repo(repo: Repo, repo_path: Path) -> None:
with tempfile.TemporaryDirectory() as d:
eval_path = Path(d).joinpath("default.nix")
with open(eval_path, "w") as f:
f.write(
f"""
with import <nixpkgs> {{}};
import {EVALREPO_PATH} {{
name = "{repo.name}";
url = "{repo.url}";
src = {repo_path.joinpath(repo.file)};
inherit pkgs lib;
}}
"""
)
# fmt: off
cmd = [
"nix-env",
"-f", str(eval_path),
"-qa", "*",
"--meta",
"--xml",
"--allowed-uris", "https://static.rust-lang.org",
"--option", "restrict-eval", "true",
"--option", "allow-import-from-derivation", "true",
"--drv-path",
"--show-trace",
"-I", f"nixpkgs={nixpkgs_path()}",
"-I", str(repo_path),
"-I", str(eval_path),
"-I", str(EVALREPO_PATH),
]
# fmt: on
logger.info(f"Evaluate repository {repo.name}")
env = dict(PATH=os.environ["PATH"], NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM="1")
proc = subprocess.Popen(cmd, env=env, stdout=subprocess.DEVNULL)
try:
res = proc.wait(10)
except subprocess.TimeoutExpired:
raise EvalError(f"evaluation for {repo.name} timed out of after 10 seconds")
if res != 0:
raise EvalError(f"{repo.name} does not evaluate:\n$ {" ".join(cmd)}")
def update(repo: Repo) -> Repo:
repo, locked_version, repo_path = prefetch(repo)
if repo_path:
eval_repo(repo, repo_path)
repo.locked_version = locked_version
return repo
def update_command(args: Namespace) -> None:
logging.basicConfig(level=logging.INFO)
manifest = load_manifest(MANIFEST_PATH, LOCK_PATH)
for repo in manifest.repos:
try:
update(repo)
except EvalError as err:
if repo.locked_version is None:
# likely a repository added in a pull request, make it fatal then
logger.error(
f"repository {repo.name} failed to evaluate: {err}. This repo is not yet in our lock file!!!!"
)
raise
# Do not print stack traces
logger.error(f"repository {repo.name} failed to evaluate: {err}")
except Exception:
# for non-evaluation errors we want the stack trace
logger.exception(f"Failed to updated repository {repo.name}")
update_lock_file(manifest.repos, LOCK_PATH)
| import logging
import os
import subprocess
import tempfile
from argparse import Namespace
from pathlib import Path
from .error import EvalError
from .manifest import Repo, load_manifest, update_lock_file
from .path import EVALREPO_PATH, LOCK_PATH, MANIFEST_PATH, nixpkgs_path
from .prefetch import prefetch
logger = logging.getLogger(__name__)
def eval_repo(repo: Repo, repo_path: Path) -> None:
with tempfile.TemporaryDirectory() as d:
eval_path = Path(d).joinpath("default.nix")
with open(eval_path, "w") as f:
f.write(
f"""
with import <nixpkgs> {{}};
import {EVALREPO_PATH} {{
name = "{repo.name}";
url = "{repo.url}";
src = {repo_path.joinpath(repo.file)};
inherit pkgs lib;
}}
"""
)
# fmt: off
cmd = [
"nix-env",
"-f", str(eval_path),
"-qa", "*",
"--meta",
"--xml",
"--allowed-uris", "https://static.rust-lang.org",
"--option", "restrict-eval", "true",
"--option", "allow-import-from-derivation", "true",
"--drv-path",
"--show-trace",
"-I", f"nixpkgs={nixpkgs_path()}",
"-I", str(repo_path),
"-I", str(eval_path),
"-I", str(EVALREPO_PATH),
]
# fmt: on
logger.info(f"Evaluate repository {repo.name}")
env = dict(PATH=os.environ["PATH"], NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM="1")
proc = subprocess.Popen(cmd, env=env, stdout=subprocess.DEVNULL)
try:
res = proc.wait(10)
except subprocess.TimeoutExpired:
raise EvalError(f"evaluation for {repo.name} timed out of after 10 seconds")
if res != 0:
raise EvalError(f"{repo.name} does not evaluate:\n$ {' '.join(cmd)}")
def update(repo: Repo) -> Repo:
repo, locked_version, repo_path = prefetch(repo)
if repo_path:
eval_repo(repo, repo_path)
repo.locked_version = locked_version
return repo
def update_command(args: Namespace) -> None:
logging.basicConfig(level=logging.INFO)
manifest = load_manifest(MANIFEST_PATH, LOCK_PATH)
for repo in manifest.repos:
try:
update(repo)
except EvalError as err:
if repo.locked_version is None:
# likely a repository added in a pull request, make it fatal then
logger.error(
f"repository {repo.name} failed to evaluate: {err}. This repo is not yet in our lock file!!!!"
)
raise
# Do not print stack traces
logger.error(f"repository {repo.name} failed to evaluate: {err}")
except Exception:
# for non-evaluation errors we want the stack trace
logger.exception(f"Failed to updated repository {repo.name}")
update_lock_file(manifest.repos, LOCK_PATH)
|
import json
import os
import httpx
import time
def get_cities(cfg):
return cfg['cities'].keys()
def get_usable_bounding_boxes(nominal_boxes, cfg):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
boxes = []
working = nominal_boxes.copy()
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
city_total=0
# print(' area_km2 count type bounding_box')
while len(working) > 0:
box = working.pop()
temp = list(map(str, box))
str_box = ",".join(temp)
box_area = est_area(box)
divide_flag = False
if box_area > cfg["max_area"]:
total_imgs = -1
divide_flag = True
else:
time.sleep(cfg["time_delay"])
try:
box_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=str_box,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_imgs = int(box_pics['photos']['total'])
divide_flag = (total_imgs >= cfg["density_limit"] and box_area > cfg["min_area"])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
# print('%10.4f %5i %s %s' % (box_area/1.E6, total_imgs, 'branch'
# if divide_flag else 'leaf ', box))
if divide_flag:
new_box_1 = box.copy()
new_box_2 = box.copy()
if box[2] - box[0] > box[3] - box[1]: #wide
border = (box[0] + box[2])/2
new_box_1[2] = border
new_box_2[0] = border
else: #tall
border = (box[1] + box[3])/2
new_box_1[3] = border
new_box_2[1] = border
working.append(new_box_1)
working.append(new_box_2)
elif total_imgs == 0:
continue
else:
city_total += total_imgs
boxes.append(box)
print(city_total)
return boxes
def read_metadata(file_root, cities, url_field):
metadata = {}
urls = {}
# for key in cfg['cities']:
# city=key.replace(" ", "_")
for city in cities:
urls[city]=set()
file_path=f'{file_root}/{city}/metadata.json'
if os.path.exists(file_path):
with open(file_path, 'r') as f:
loaded = json.load(f)
for img in loaded['images']:
if url_field in img and not img[url_field] in urls:
urls[city].add(img[url_field])
metadata[city]= loaded
return metadata, urls
def get_known_urls(file_root, cities):
urls = {}
for key in cities:
city=key.replace(" ", "_")
file_path=f'{file_root}/{city}/urls.txt'
city_urls=set()
if os.path.exists(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
city_urls.add(line.strip())
urls[key] = city_urls
return urls
def write_urls(urls, cfg):
for key in cfg['cities']:
city=key.replace(" ", "_")
directory=os.path.join('/data', city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory, 'urls')
if cfg['cities'][key]['download'] != 'photos':
print(f"printing {len(urls[city])} urls for city {city} at {file_path}")
try:
with open(file_path, 'w') as f:
for url in urls[city]:
f.write(f'{url}\n')
f.flush()
f.close()
except Exception as err:
print(f"error: {err} opening file {file_path}")
def get_metadata(cfg, file_root):
metadata = None
cities = get_cities(cfg)
url_field = cfg['url_field']
urls = get_known_urls(file_root, cities)
metadata, urls = read_metadata(file_root, cities, url_field)
if cfg['refresh_metadata']:
print('fetching metadata')
metadata,urls = fetch_metadata(cfg, metadata, urls)
print('writing metadata')
write_metadata(metadata, cfg, file_root)
print('writing url list')
write_urls(urls, cfg)
return metadata
def fetch_metadata(cfg, metadata, urls):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
inserted_ids=[]
for key in cfg['cities']:
count=0
dl_limit = cfg['cities'][key]['download_limit']
if dl_limit != -1 and dl_limit > 1000:
boxes = get_usable_bounding_boxes(list(cfg['cities'][key]['bounding_boxes']), cfg)
else:
boxes = list(cfg['cities'][key]['bounding_boxes'])
city_urls = urls[key]
if not key in metadata:
metadata[key]={}
metadata[key]['image_count'] = 0
metadata[key]['images'] = []
total = 0
for bbox in tqdm(boxes, desc=key):
temp = list(map(str, bbox))
bbox_str = ",".join(temp)
time.sleep(cfg["time_delay"])
total_pages=0
try:
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_pages = city_pics['photos']['pages']
total += int(city_pics['photos']['total'])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
for p in range(1, total_pages):
try:
time.sleep(cfg["time_delay"])
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"],
page=p)
for ph in city_pics['photos']['photo']:
# metadata[key]['images'].append(ph)
if dl_limit != -1 and count > dl_limit:
break
if cfg["url_field"] in ph and not ph[cfg["url_field"]] in city_urls:
metadata[key]['images'].append(ph)
city_urls.add(ph[cfg["url_field"]])
metadata[key]['image_count']+=1
count += 1
except FlickrError as err:
print(f'Error retrieving page {p} for bounding box {bbox}')
print(f'{err}')
# metadata[key]['image_count'] = total
# print(f"length of inserted ids for {key}: {len(inserted_ids)}")
# print(f"total for {key}: {len(metadata[key]["images"])}")
return metadata, urls
def write_metadata(metadata, cfg, file_root):
for key in metadata:
city=key.replace(" ", "_")
directory=os.path.join(file_root,city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory,'metadata.json')
dl_flag =cfg['cities'][key]['download']
if cfg['cities'][key]['download'] != 'photos':
with open(file_path, 'w') as f:
json.dump(metadata[key], f, indent=2)
| import json
import os
import httpx
import time
def get_cities(cfg):
return cfg['cities'].keys()
def get_usable_bounding_boxes(nominal_boxes, cfg):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
boxes = []
working = nominal_boxes.copy()
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
city_total=0
# print(' area_km2 count type bounding_box')
while len(working) > 0:
box = working.pop()
temp = list(map(str, box))
str_box = ",".join(temp)
box_area = est_area(box)
divide_flag = False
if box_area > cfg["max_area"]:
total_imgs = -1
divide_flag = True
else:
time.sleep(cfg["time_delay"])
try:
box_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=str_box,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_imgs = int(box_pics['photos']['total'])
divide_flag = (total_imgs >= cfg["density_limit"] and box_area > cfg["min_area"])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
# print('%10.4f %5i %s %s' % (box_area/1.E6, total_imgs, 'branch'
# if divide_flag else 'leaf ', box))
if divide_flag:
new_box_1 = box.copy()
new_box_2 = box.copy()
if box[2] - box[0] > box[3] - box[1]: #wide
border = (box[0] + box[2])/2
new_box_1[2] = border
new_box_2[0] = border
else: #tall
border = (box[1] + box[3])/2
new_box_1[3] = border
new_box_2[1] = border
working.append(new_box_1)
working.append(new_box_2)
elif total_imgs == 0:
continue
else:
city_total += total_imgs
boxes.append(box)
print(city_total)
return boxes
def read_metadata(file_root, cities, url_field):
metadata = {}
urls = {}
# for key in cfg['cities']:
# city=key.replace(" ", "_")
for city in cities:
urls[city]=set()
file_path=f'{file_root}/{city}/metadata.json'
if os.path.exists(file_path):
with open(file_path, 'r') as f:
loaded = json.load(f)
for img in loaded['images']:
if url_field in img and not img[url_field] in urls:
urls[city].add(img[url_field])
metadata[city]= loaded
return metadata, urls
def get_known_urls(file_root, cities):
urls = {}
for key in cities:
city=key.replace(" ", "_")
file_path=f'{file_root}/{city}/urls.txt'
city_urls=set()
if os.path.exists(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
city_urls.add(line.strip())
urls[key] = city_urls
return urls
def write_urls(urls, cfg):
for key in cfg['cities']:
city=key.replace(" ", "_")
directory=os.path.join('/data', city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory, 'urls')
if cfg['cities'][key]['download'] != 'photos':
print(f"printing {len(urls[city])} urls for city {city} at {file_path}")
try:
with open(file_path, 'w') as f:
for url in urls[city]:
f.write(f'{url}\n')
f.flush()
f.close()
except Exception as err:
print(f"error: {err} opening file {file_path}")
def get_metadata(cfg, file_root):
metadata = None
cities = get_cities(cfg)
url_field = cfg['url_field']
urls = get_known_urls(file_root, cities)
metadata, urls = read_metadata(file_root, cities, url_field)
if cfg['refresh_metadata']:
print('fetching metadata')
metadata,urls = fetch_metadata(cfg, metadata, urls)
print('writing metadata')
write_metadata(metadata, cfg, file_root)
print('writing url list')
write_urls(urls, cfg)
return metadata
def fetch_metadata(cfg, metadata, urls):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
inserted_ids=[]
for key in cfg['cities']:
count=0
dl_limit = cfg['cities'][key]['download_limit']
if dl_limit != -1 and dl_limit > 1000:
boxes = get_usable_bounding_boxes(list(cfg['cities'][key]['bounding_boxes']), cfg)
else:
boxes = list(cfg['cities'][key]['bounding_boxes'])
city_urls = urls[key]
if not key in metadata:
metadata[key]={}
metadata[key]['image_count'] = 0
metadata[key]['images'] = []
total = 0
for bbox in tqdm(boxes, desc=key):
temp = list(map(str, bbox))
bbox_str = ",".join(temp)
time.sleep(cfg["time_delay"])
total_pages=0
try:
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_pages = city_pics['photos']['pages']
total += int(city_pics['photos']['total'])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
for p in range(1, total_pages):
try:
time.sleep(cfg["time_delay"])
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"],
page=p)
for ph in city_pics['photos']['photo']:
# metadata[key]['images'].append(ph)
if dl_limit != -1 and count > dl_limit:
break
if cfg["url_field"] in ph and not ph[cfg["url_field"]] in city_urls:
metadata[key]['images'].append(ph)
city_urls.add(ph[cfg["url_field"]])
metadata[key]['image_count']+=1
count += 1
except FlickrError as err:
print(f'Error retrieving page {p} for bounding box {bbox}')
print(f'{err}')
# metadata[key]['image_count'] = total
# print(f"length of inserted ids for {key}: {len(inserted_ids)}")
# print(f"total for {key}: {len(metadata[key]['images'])}")
return metadata, urls
def write_metadata(metadata, cfg, file_root):
for key in metadata:
city=key.replace(" ", "_")
directory=os.path.join(file_root,city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory,'metadata.json')
dl_flag =cfg['cities'][key]['download']
if cfg['cities'][key]['download'] != 'photos':
with open(file_path, 'w') as f:
json.dump(metadata[key], f, indent=2)
|
from sklearn.cluster import MiniBatchKMeans
import numpy as np
import torch
from models import TransformerModel, Seq2SeqTransformer, generate_square_subsequent_mask
from models import LM_NAME, MLM_NAME, MT_NAME, NLAYERS, NUM2WORD
import os
from data_preprocessing import DATA_DIR_DEV, SAVE_DATA_MT_TRAIN
from data_preprocessing import SAVE_VOCAB_SRC, SAVE_VOCAB_TRG, PAD_WORD
import pickle
from torchtext.legacy.data import Dataset, BucketIterator
import pandas as pd
from analytics_helper import MostFreqToken, GetInter, GetMI, GetInterValues
from analytics_helper import MIN_SAMPLE_SIZE_DEV, MIN_SAMPLE_SIZE_FULL
from analytics_helper import N_FREQUENT_DEV, N_FREQUENT_FULL
from analytics_helper import N_CLUSTER_DEV, N_CLUSTER_FULL
from data_preprocessing import SAVE_MODEL_PATH, DEVELOPMENT_MODE
from MT_helpers import patch_trg, create_mask
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if DEVELOPMENT_MODE:
min_sample_size=MIN_SAMPLE_SIZE_DEV
N_frequent=N_FREQUENT_DEV
N_cluster=N_CLUSTER_DEV
data_dir=DATA_DIR_DEV
else:
min_sample_size=MIN_SAMPLE_SIZE_FULL
N_frequent=N_FREQUENT_FULL
N_cluster=N_CLUSTER_FULL
data_dir=DATA_DIR_FULL
MI_results_INP={LM_NAME.split('.')[0]:[],
f"{MLM_NAME.split(".")[0]}_SAME":[],
f"{MLM_NAME.split(".")[0]}_DIFF":[],
MT_NAME.split('.')[0]:[]}
MI_results_OUT={LM_NAME.split('.')[0]:[],
MLM_NAME.split('.')[0]:[]}
MODELS_INP=[LM_NAME, MLM_NAME, MT_NAME]
vocab_pkl_src = os.path.join(data_dir, SAVE_VOCAB_SRC)
vocab_pkl_trg = os.path.join(data_dir, SAVE_VOCAB_TRG)
train_pkl = os.path.join(data_dir, SAVE_DATA_MT_TRAIN)
field_src = pickle.load(open(vocab_pkl_src, 'rb'))
field_trg = pickle.load(open(vocab_pkl_trg, 'rb'))
src_pad_idx = field_src.vocab.stoi[PAD_WORD]
trg_pad_idx = field_trg.vocab.stoi[PAD_WORD]
train_examples = pickle.load(open(train_pkl, 'rb'))
fields = {'src':field_src , 'trg':field_trg}
train = Dataset(examples=train_examples, fields=fields)
train_iter = BucketIterator(train, batch_size=1, device=device, train=True, shuffle=False)
frequent_vocab = MostFreqToken(field_src, N_frequent, min_sample_size)
# token_reps_list saves NLAYERS dicts, for ith dict, the key is the token ID,
# the value is the representation of the ID in the ith layer.
token_reps_model_INP={}
token_reps_model_OUT={}
for this_model_name in MODELS_INP:
token_reps_list=[]
for _ in range(NLAYERS):
this_token_reps={}
for this_token_id in frequent_vocab:
this_token_reps[this_token_id]=[]
token_reps_list.append(this_token_reps)
if this_model_name.startswith("MLM"):
token_reps_model_INP[f"{MLM_NAME.split(".")[0]}_SAME"]=token_reps_list
token_reps_model_INP[f"{MLM_NAME.split(".")[0]}_DIFF"]=token_reps_list
token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list
elif this_model_name.startswith("LM"):
token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list
token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list
elif this_model_name.startswith("MT"):
token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list
sample_size_dict_INP={}
sample_size_dict_OUT={}
for this_model_name in MODELS_INP:
if this_model_name.startswith("MLM"):
this_sample_size_dict_INP_SAME={}
this_sample_size_dict_INP_DIFF={}
this_sample_size_dict_OUT={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP_SAME[this_token_id]=0
this_sample_size_dict_INP_DIFF[this_token_id]=0
this_sample_size_dict_OUT[this_token_id]=0
sample_size_dict_INP[f"{this_model_name.split(".")[0]}_SAME"]=this_sample_size_dict_INP_SAME
sample_size_dict_INP[f"{this_model_name.split(".")[0]}_DIFF"]=this_sample_size_dict_INP_DIFF
sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT
elif this_model_name.startswith("LM"):
this_sample_size_dict_INP={}
this_sample_size_dict_OUT={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP[this_token_id]=0
this_sample_size_dict_OUT[this_token_id]=0
sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP
sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT
elif this_model_name.startswith("MT"):
this_sample_size_dict_INP={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP[this_token_id]=0
sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP
for batch in train_iter:
src_seq_MT = batch.src.to(device)
target_sample_INP_MT=GetInter(src_seq_MT.detach().numpy(), frequent_vocab)
src_seq_MLM_SAME = batch.src.to(device)
target_sample_INP_MLM_SAME=GetInter(src_seq_MLM_SAME.detach().numpy(), frequent_vocab)
src_seq=batch.src.to(device)
src_seq_MLM_DIFF = src_seq.clone()
src_mask = generate_square_subsequent_mask(src_seq.size(0))
rand_value = torch.rand(src_seq.shape)
rand_mask = (rand_value < 0.15) * (input != src_pad_idx)
mask_idx=(rand_mask.flatten() == True).nonzero().view(-1)
src_seq_MLM_DIFF = src_seq_MLM_DIFF.flatten()
src_seq_MLM_DIFF[mask_idx] = 103
src_seq_MLM_DIFF = src_seq_MLM_DIFF.view(src_seq.size())
target_sample_INP_MLM_DIFF=GetInter(src_seq_MLM_DIFF.detach().numpy(), frequent_vocab)
src_seq_LM = batch.src[:-1]
target_sample_INP_LM=GetInter(src_seq_LM.detach().numpy(), frequent_vocab)
trg = batch.trg
trg_seq_MT, gold = map(lambda x: x.to(device), patch_trg(trg, trg_pad_idx))
trg_seq_MT = trg_seq_MT.to(device)
trg_seq_LM = src_seq[1:].to(device)
target_sample_OUT_LM=GetInter(trg_seq_LM.detach().numpy(), frequent_vocab)
trg_seq_MLM = src_seq
target_sample_OUT_MLM=GetInter(trg_seq_MLM.detach().numpy(), frequent_vocab)
for this_model_name in MODELS_INP:
this_model = torch.load(os.path.join(SAVE_MODEL_PATH,this_model_name))
this_model.eval()
if this_model_name.startswith("MT") and len(target_sample_INP_MT)>0:
src_mask, trg_mask, src_padding_mask, trg_padding_mask = create_mask(src_seq_MT, trg_seq_MT, src_pad_idx, trg_pad_idx)
_ = this_model(src=src_seq_MT,
src_mask=src_mask,
trg=trg_seq_MT,
tgt_mask=trg_mask,
src_padding_mask=src_padding_mask,
tgt_padding_mask=trg_padding_mask,
memory_key_padding_mask=src_padding_mask)
token_reps_list=token_reps_model_INP[MT_NAME.split('.')[0]]
this_sample_size_dict=sample_size_dict_INP[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_MT, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)
elif this_model_name.startswith("MLM"):
if len(target_sample_INP_MLM_SAME)>0:
src_mask = generate_square_subsequent_mask(src_seq_MLM_SAME.size(0))
src_padding_mask = (src_seq_MLM_SAME == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_MLM_SAME, src_mask.to(device),src_padding_mask.to(device))
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split(".")[0]}_SAME"]
this_sample_size_dict=sample_size_dict_INP[f"{this_model_name.split(".")[0]}_SAME"]
GetInterValues(this_model, target_sample_INP_MLM_SAME, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)
if len(target_sample_INP_MLM_DIFF)>0 and len(target_sample_OUT_MLM)>0:
src_mask = generate_square_subsequent_mask(src_seq_MLM_DIFF.size(0))
src_padding_mask = (src_seq_MLM_DIFF == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_MLM_DIFF.to(device), src_mask.to(device),src_padding_mask.to(device))
token_reps_list_INP=token_reps_model_INP[f"{MLM_NAME.split(".")[0]}_DIFF"]
this_sample_size_dict_INP=sample_size_dict_INP[f"{this_model_name.split(".")[0]}_DIFF"]
token_reps_list_OUT=token_reps_model_OUT[MLM_NAME.split('.')[0]]
this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_MLM_DIFF, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)
GetInterValues(this_model, target_sample_OUT_MLM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)
elif this_model_name.startswith("LM") and len(target_sample_INP_LM)>0 and len(target_sample_OUT_LM)>0:
src_mask = generate_square_subsequent_mask(src_seq_LM.size(0))
src_padding_mask = (src_seq_LM == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_LM, src_mask.to(device),src_padding_mask.to(device))
token_reps_list_INP=token_reps_model_INP[this_model_name.split('.')[0]]
token_reps_list_OUT=token_reps_model_OUT[this_model_name.split('.')[0]]
this_sample_size_dict_INP=sample_size_dict_INP[this_model_name.split('.')[0]]
this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_LM, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)
GetInterValues(this_model, target_sample_OUT_LM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)
# we only need to keep the minimum sample size that has been collected
this_min_sample_size_inp=float('inf')
this_min_sample_size_out=float('inf')
for model_name, this_sample_size_dict in sample_size_dict_INP.items():
for token_id, size in this_sample_size_dict.items():
if size<this_min_sample_size_inp:
this_min_sample_size_inp=size
for model_name, this_sample_size_dict in sample_size_dict_OUT.items():
for token_id, size in this_sample_size_dict.items():
if size<this_min_sample_size_out:
this_min_sample_size_out=size
is_enough=True
if this_min_sample_size_inp>=min_sample_size and this_min_sample_size_out>=min_sample_size:
for model_name, reps_dict in token_reps_model_INP.items():
if is_enough is False:
break
for this_layer in reps_dict:
if is_enough is False:
break
for token_id, rep_list in this_layer.items():
if len(rep_list)<min_sample_size:
is_enough=False
break
for model_name, reps_list in token_reps_model_OUT.items():
if is_enough is False:
break
for this_layer in reps_dict:
if is_enough is False:
break
for token_id, rep_list in this_layer.items():
if len(rep_list)<min_sample_size:
is_enough=False
break
else:
is_enough=False
if is_enough:
break
if is_enough is False:
assert 1==0, "We have not collected enough data!"
for this_model_name in MODELS_INP:
if this_model_name.startswith("MLM"):
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split(".")[0]}_SAME"]
result_list=MI_results_INP[f"{MLM_NAME.split(".")[0]}_SAME"]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split(".")[0]}_DIFF"]
result_list=MI_results_INP[f"{MLM_NAME.split(".")[0]}_DIFF"]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]
result_list=MI_results_OUT[MLM_NAME.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
elif this_model_name.startswith("MT"):
token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]
result_list=MI_results_INP[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
elif this_model_name.startswith("LM"):
token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]
result_list=MI_results_INP[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]
result_list=MI_results_OUT[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
print("result",MI_results_INP)
print("result",MI_results_OUT)
| from sklearn.cluster import MiniBatchKMeans
import numpy as np
import torch
from models import TransformerModel, Seq2SeqTransformer, generate_square_subsequent_mask
from models import LM_NAME, MLM_NAME, MT_NAME, NLAYERS, NUM2WORD
import os
from data_preprocessing import DATA_DIR_DEV, SAVE_DATA_MT_TRAIN
from data_preprocessing import SAVE_VOCAB_SRC, SAVE_VOCAB_TRG, PAD_WORD
import pickle
from torchtext.legacy.data import Dataset, BucketIterator
import pandas as pd
from analytics_helper import MostFreqToken, GetInter, GetMI, GetInterValues
from analytics_helper import MIN_SAMPLE_SIZE_DEV, MIN_SAMPLE_SIZE_FULL
from analytics_helper import N_FREQUENT_DEV, N_FREQUENT_FULL
from analytics_helper import N_CLUSTER_DEV, N_CLUSTER_FULL
from data_preprocessing import SAVE_MODEL_PATH, DEVELOPMENT_MODE
from MT_helpers import patch_trg, create_mask
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if DEVELOPMENT_MODE:
min_sample_size=MIN_SAMPLE_SIZE_DEV
N_frequent=N_FREQUENT_DEV
N_cluster=N_CLUSTER_DEV
data_dir=DATA_DIR_DEV
else:
min_sample_size=MIN_SAMPLE_SIZE_FULL
N_frequent=N_FREQUENT_FULL
N_cluster=N_CLUSTER_FULL
data_dir=DATA_DIR_FULL
MI_results_INP={LM_NAME.split('.')[0]:[],
f"{MLM_NAME.split('.')[0]}_SAME":[],
f"{MLM_NAME.split('.')[0]}_DIFF":[],
MT_NAME.split('.')[0]:[]}
MI_results_OUT={LM_NAME.split('.')[0]:[],
MLM_NAME.split('.')[0]:[]}
MODELS_INP=[LM_NAME, MLM_NAME, MT_NAME]
vocab_pkl_src = os.path.join(data_dir, SAVE_VOCAB_SRC)
vocab_pkl_trg = os.path.join(data_dir, SAVE_VOCAB_TRG)
train_pkl = os.path.join(data_dir, SAVE_DATA_MT_TRAIN)
field_src = pickle.load(open(vocab_pkl_src, 'rb'))
field_trg = pickle.load(open(vocab_pkl_trg, 'rb'))
src_pad_idx = field_src.vocab.stoi[PAD_WORD]
trg_pad_idx = field_trg.vocab.stoi[PAD_WORD]
train_examples = pickle.load(open(train_pkl, 'rb'))
fields = {'src':field_src , 'trg':field_trg}
train = Dataset(examples=train_examples, fields=fields)
train_iter = BucketIterator(train, batch_size=1, device=device, train=True, shuffle=False)
frequent_vocab = MostFreqToken(field_src, N_frequent, min_sample_size)
# token_reps_list saves NLAYERS dicts, for ith dict, the key is the token ID,
# the value is the representation of the ID in the ith layer.
token_reps_model_INP={}
token_reps_model_OUT={}
for this_model_name in MODELS_INP:
token_reps_list=[]
for _ in range(NLAYERS):
this_token_reps={}
for this_token_id in frequent_vocab:
this_token_reps[this_token_id]=[]
token_reps_list.append(this_token_reps)
if this_model_name.startswith("MLM"):
token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_SAME"]=token_reps_list
token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]=token_reps_list
token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list
elif this_model_name.startswith("LM"):
token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list
token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list
elif this_model_name.startswith("MT"):
token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list
sample_size_dict_INP={}
sample_size_dict_OUT={}
for this_model_name in MODELS_INP:
if this_model_name.startswith("MLM"):
this_sample_size_dict_INP_SAME={}
this_sample_size_dict_INP_DIFF={}
this_sample_size_dict_OUT={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP_SAME[this_token_id]=0
this_sample_size_dict_INP_DIFF[this_token_id]=0
this_sample_size_dict_OUT[this_token_id]=0
sample_size_dict_INP[f"{this_model_name.split('.')[0]}_SAME"]=this_sample_size_dict_INP_SAME
sample_size_dict_INP[f"{this_model_name.split('.')[0]}_DIFF"]=this_sample_size_dict_INP_DIFF
sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT
elif this_model_name.startswith("LM"):
this_sample_size_dict_INP={}
this_sample_size_dict_OUT={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP[this_token_id]=0
this_sample_size_dict_OUT[this_token_id]=0
sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP
sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT
elif this_model_name.startswith("MT"):
this_sample_size_dict_INP={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP[this_token_id]=0
sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP
for batch in train_iter:
src_seq_MT = batch.src.to(device)
target_sample_INP_MT=GetInter(src_seq_MT.detach().numpy(), frequent_vocab)
src_seq_MLM_SAME = batch.src.to(device)
target_sample_INP_MLM_SAME=GetInter(src_seq_MLM_SAME.detach().numpy(), frequent_vocab)
src_seq=batch.src.to(device)
src_seq_MLM_DIFF = src_seq.clone()
src_mask = generate_square_subsequent_mask(src_seq.size(0))
rand_value = torch.rand(src_seq.shape)
rand_mask = (rand_value < 0.15) * (input != src_pad_idx)
mask_idx=(rand_mask.flatten() == True).nonzero().view(-1)
src_seq_MLM_DIFF = src_seq_MLM_DIFF.flatten()
src_seq_MLM_DIFF[mask_idx] = 103
src_seq_MLM_DIFF = src_seq_MLM_DIFF.view(src_seq.size())
target_sample_INP_MLM_DIFF=GetInter(src_seq_MLM_DIFF.detach().numpy(), frequent_vocab)
src_seq_LM = batch.src[:-1]
target_sample_INP_LM=GetInter(src_seq_LM.detach().numpy(), frequent_vocab)
trg = batch.trg
trg_seq_MT, gold = map(lambda x: x.to(device), patch_trg(trg, trg_pad_idx))
trg_seq_MT = trg_seq_MT.to(device)
trg_seq_LM = src_seq[1:].to(device)
target_sample_OUT_LM=GetInter(trg_seq_LM.detach().numpy(), frequent_vocab)
trg_seq_MLM = src_seq
target_sample_OUT_MLM=GetInter(trg_seq_MLM.detach().numpy(), frequent_vocab)
for this_model_name in MODELS_INP:
this_model = torch.load(os.path.join(SAVE_MODEL_PATH,this_model_name))
this_model.eval()
if this_model_name.startswith("MT") and len(target_sample_INP_MT)>0:
src_mask, trg_mask, src_padding_mask, trg_padding_mask = create_mask(src_seq_MT, trg_seq_MT, src_pad_idx, trg_pad_idx)
_ = this_model(src=src_seq_MT,
src_mask=src_mask,
trg=trg_seq_MT,
tgt_mask=trg_mask,
src_padding_mask=src_padding_mask,
tgt_padding_mask=trg_padding_mask,
memory_key_padding_mask=src_padding_mask)
token_reps_list=token_reps_model_INP[MT_NAME.split('.')[0]]
this_sample_size_dict=sample_size_dict_INP[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_MT, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)
elif this_model_name.startswith("MLM"):
if len(target_sample_INP_MLM_SAME)>0:
src_mask = generate_square_subsequent_mask(src_seq_MLM_SAME.size(0))
src_padding_mask = (src_seq_MLM_SAME == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_MLM_SAME, src_mask.to(device),src_padding_mask.to(device))
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_SAME"]
this_sample_size_dict=sample_size_dict_INP[f"{this_model_name.split('.')[0]}_SAME"]
GetInterValues(this_model, target_sample_INP_MLM_SAME, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)
if len(target_sample_INP_MLM_DIFF)>0 and len(target_sample_OUT_MLM)>0:
src_mask = generate_square_subsequent_mask(src_seq_MLM_DIFF.size(0))
src_padding_mask = (src_seq_MLM_DIFF == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_MLM_DIFF.to(device), src_mask.to(device),src_padding_mask.to(device))
token_reps_list_INP=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]
this_sample_size_dict_INP=sample_size_dict_INP[f"{this_model_name.split('.')[0]}_DIFF"]
token_reps_list_OUT=token_reps_model_OUT[MLM_NAME.split('.')[0]]
this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_MLM_DIFF, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)
GetInterValues(this_model, target_sample_OUT_MLM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)
elif this_model_name.startswith("LM") and len(target_sample_INP_LM)>0 and len(target_sample_OUT_LM)>0:
src_mask = generate_square_subsequent_mask(src_seq_LM.size(0))
src_padding_mask = (src_seq_LM == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_LM, src_mask.to(device),src_padding_mask.to(device))
token_reps_list_INP=token_reps_model_INP[this_model_name.split('.')[0]]
token_reps_list_OUT=token_reps_model_OUT[this_model_name.split('.')[0]]
this_sample_size_dict_INP=sample_size_dict_INP[this_model_name.split('.')[0]]
this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_LM, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)
GetInterValues(this_model, target_sample_OUT_LM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)
# we only need to keep the minimum sample size that has been collected
this_min_sample_size_inp=float('inf')
this_min_sample_size_out=float('inf')
for model_name, this_sample_size_dict in sample_size_dict_INP.items():
for token_id, size in this_sample_size_dict.items():
if size<this_min_sample_size_inp:
this_min_sample_size_inp=size
for model_name, this_sample_size_dict in sample_size_dict_OUT.items():
for token_id, size in this_sample_size_dict.items():
if size<this_min_sample_size_out:
this_min_sample_size_out=size
is_enough=True
if this_min_sample_size_inp>=min_sample_size and this_min_sample_size_out>=min_sample_size:
for model_name, reps_dict in token_reps_model_INP.items():
if is_enough is False:
break
for this_layer in reps_dict:
if is_enough is False:
break
for token_id, rep_list in this_layer.items():
if len(rep_list)<min_sample_size:
is_enough=False
break
for model_name, reps_list in token_reps_model_OUT.items():
if is_enough is False:
break
for this_layer in reps_dict:
if is_enough is False:
break
for token_id, rep_list in this_layer.items():
if len(rep_list)<min_sample_size:
is_enough=False
break
else:
is_enough=False
if is_enough:
break
if is_enough is False:
assert 1==0, "We have not collected enough data!"
for this_model_name in MODELS_INP:
if this_model_name.startswith("MLM"):
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_SAME"]
result_list=MI_results_INP[f"{MLM_NAME.split('.')[0]}_SAME"]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]
result_list=MI_results_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]
result_list=MI_results_OUT[MLM_NAME.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
elif this_model_name.startswith("MT"):
token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]
result_list=MI_results_INP[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
elif this_model_name.startswith("LM"):
token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]
result_list=MI_results_INP[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]
result_list=MI_results_OUT[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
print("result",MI_results_INP)
print("result",MI_results_OUT)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.