hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bfa12344ac2672d04af709f36d9c0e347ff905c3 | 5,698 | py | Python | core/loaders.py | nishantb21/yolov3 | 0305b01d805b7b2429e8304d5f27eff52bbd262e | [
"MIT"
] | null | null | null | core/loaders.py | nishantb21/yolov3 | 0305b01d805b7b2429e8304d5f27eff52bbd262e | [
"MIT"
] | null | null | null | core/loaders.py | nishantb21/yolov3 | 0305b01d805b7b2429e8304d5f27eff52bbd262e | [
"MIT"
] | null | null | null | import os
import torch
from tqdm import tqdm
from torchvision import transforms, datasets
from PIL import ImageOps
import numpy as np
class DatasetLoader():
def __init__(self):
self.dataset_loader = None
def get_loader(self):
return self.dataset_loader
class ImageNetDatasetLoader(DatasetLoader):
def __init__(self, root_dir, height, width, batch_size):
super(ImageNetDatasetLoader, self).__init__()
data_transform = transforms.Compose([
transforms.RandomResizedCrop((height, width)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
imagenet_dataset = datasets.ImageFolder(root=root_dir, transform=data_transform)
self.dataset_loader = torch.utils.data.DataLoader(imagenet_dataset, batch_size=batch_size, shuffle=True, num_workers=10)
class COCODataset(datasets.CocoDetection):
def __init__(self, root, annFile, height, width, number_classes, anchors, scaling_factors, transform):
super(COCODatasetLoader, self).__init__(root, annFile)
self.height = height
self.width = width
self.number_classes = number_classes
self.anchors = anchors
self.scaling_factors = scaling_factors
self.number_classes = number_classes
self.transform = transform
def __len__(self):
return super(COCODatasetLoader, self).__len__()
def resize_image(self, image, bboxes):
curr_width, curr_height = image.size
bboxes = np.array(bboxes)
if curr_width > curr_height:
scale = self.width / curr_width
new_width = self.width
new_height = int(scale * curr_height)
padding = (self.height - new_height) // 2
padding_tuple = (0, padding, 0, padding)
else:
scale = self.height / curr_height
new_height = self.height
new_width = int(scale * curr_width)
padding = (self.width - new_width) // 2
padding_tuple = (padding, 0, padding, 0)
image = image.resize((new_width, new_height))
image = ImageOps.expand(image, border=padding_tuple, fill=0)
padding_tuple = np.array(padding_tuple[:2])
bboxes = bboxes * scale
bboxes[..., :2] += padding_tuple
return image, bboxes
def get_closest_anchor(self, bbox):
def map_fn(anchor):
return np.absolute(anchor[0] - bbox[2]) + np.absolute(anchor[1] - bbox[3])
output = np.apply_along_axis(map_fn, 1, self.anchors)
return np.argmin(output)
def create_output_matrices(self, bboxes, classes):
output_sizes = [np.array([self.width, self.height]) // i for i in self.scaling_factors]
outputs = [np.zeros(np.append(size, (4 + 1 + self.number_classes) * 3)) for size in output_sizes]
print(bboxes.shape[0])
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
anchor_idx = self.get_closest_anchor(bboxes[i])
factor_idx = anchor_idx // 3
factor = self.scaling_factors[factor_idx]
bbox[:2] = bbox[:2] / (bbox[2:] / 2)
softmax_classes = np.zeros(self.number_classes)
softmax_classes[classes[i] - 1] = 1.0
scaled = bbox[:2] / factor
indices = np.array(np.floor_divide(bbox[:2], factor), dtype="int")
offset = (anchor_idx % 3) * (4 + 1 + self.number_classes)
print("Writing to ", indices[0], indices[1], offset, factor_idx)
outputs[factor_idx][indices[0], indices[1], offset:(offset + 2)] = scaled - indices
outputs[factor_idx][indices[0], indices[1], (offset + 2):(offset + 4)] = np.log(bbox[2:4] / self.anchors[anchor_idx])
outputs[factor_idx][indices[0], indices[1], (offset + 4)] = 1.0
outputs[factor_idx][indices[0], indices[1], (offset + 5):(offset + 5 + self.number_classes)] = softmax_classes
return outputs
def __getitem__(self, idx):
image, metadata = super(COCODatasetLoader, self).__getitem__(idx)
metadata = [(i["bbox"], i["category_id"]) for i in metadata if i["category_id"] <= self.number_classes]
bboxes, classes = zip(*metadata)
image, bboxes = self.resize_image(image, bboxes)
outputs = self.create_output_matrices(bboxes, classes)
return self.transform(image), transforms.ToTensor()(outputs)
class COCODatasetLoader(DatasetLoader):
def __init__(self, root, annFile, height, width, number_classes, anchors, scaling_factors):
super(DatasetLoader).__init__()
data_transform
if __name__ == "__main__":
root = "/data/nishantb/coco/val2017"
annFile = "/data/nishantb/coco/annotations/instances_val2017.json"
anchors = np.array([[ 20.1142666 , 23.5920818 ],
[ 61.54272606, 64.4243818 ],
[ 78.43208441, 147.16067311],
[173.79737463, 100.61049946],
[130.99313932, 262.08766581],
[419.42090078, 136.95277798],
[267.46770012, 219.12108881],
[258.48081984, 397.93091738],
[461.50743771, 331.21772574]])
factors = [8, 16, 32]
obj = COCODatasetLoader(root, annFile, 512, 512, 80, anchors, factors)
outputs = obj[0][1]
for y in outputs:
for j in range(3):
offset = (4 + 1 + 80) * j
idx, idy = np.where(y[..., offset + 4] > 0)
print(idx.shape, idy.shape)
print(np.squeeze(np.dstack((idx, idy))).shape)
output = y[idx, idy, offset: offset + 4]
print(output.shape) | 41.897059 | 129 | 0.62285 |
c294ba377d1e35ec870bb075369fe4ec1dec6b13 | 3,911 | py | Python | pvdn/metrics/convert.py | larsOhne/pvdn | 0b6a8d0463909009152973f2edddbe49a054d4a2 | [
"CC0-1.0"
] | 13 | 2021-01-02T06:08:01.000Z | 2021-12-15T17:23:19.000Z | pvdn/metrics/convert.py | larsOhne/pvdn | 0b6a8d0463909009152973f2edddbe49a054d4a2 | [
"CC0-1.0"
] | 2 | 2021-01-28T07:53:28.000Z | 2021-07-29T15:50:40.000Z | pvdn/metrics/convert.py | larsOhne/pvdn | 0b6a8d0463909009152973f2edddbe49a054d4a2 | [
"CC0-1.0"
] | 1 | 2021-02-11T10:58:39.000Z | 2021-02-11T10:58:39.000Z | import json
import numpy as np
from tqdm import tqdm
import os
from warnings import warn
def gt_to_results_format(gt_bbox_dir):
"""
Converts the ground truth bounding box annotation files to the format used for the custom
evaluation metric.
:param gt_bbox_dir: Directory path of the stored bounding box annotation files -> str
:return: Dictionary containing the bounding boxes in the custom format, where each key is the
unique image id containing a list of bounding boxes of the shape [nbr_boxes, 4]
"""
path = os.path.abspath(gt_bbox_dir)
box_files = os.listdir(path)
output_dict = {}
for file in tqdm(box_files):
id = file.split(".")[0].lstrip("0")
with open(os.path.join(path, file), "r") as f:
annots = json.load(f)
boxes = np.array(annots["bounding_boxes"])
labels = np.array(annots["labels"])
output_dict[str(id)] = {}
output_dict[str(id)]["boxes"] = boxes.tolist()
output_dict[str(id)]["scores"] = labels.tolist()
return output_dict
def coco_to_results_format(coco_path: str, output_path: str = None,
coco_img_size: tuple = (960, 960)) -> None:
"""
Converts the results saved in the coco detection results format to the format used in the
custom bounding box evaluation metric.
:param coco_path: Path of the coco .json file -> str
:param output_path: Path under which the final results file has to be saved. If no path is
provided, the result format is returned as a dictionary. -> str
:param coco_img_size: Size of the images which were used to generate the results in the coco
file. -> tuple
:return: None, if the output path is specified. If the output path is not specified, the
result format is returned as a dictionary.
"""
if output_path:
if not os.path.splitext(output_path)[-1] == ".json":
raise AttributeError(f"The output path extension should be .json, not "
f"{os.path.splitext(output_path)[-1]}.")
if not os.path.isfile(coco_path):
raise FileNotFoundError(f"File {coco_path} does not exist.")
if not os.path.splitext(coco_path)[-1] == ".json":
raise AttributeError(f"The coco prediction file should have the extension .json, not "
f"{os.path.splitext(coco_path)[-1]}.")
coco_path = os.path.abspath(coco_path)
with open(coco_path, "r") as f:
preds = json.load(f)
pvdn_format = {}
fault_counter = True
h_orig, w_orig = coco_img_size
for pred in tqdm(preds):
fault_counter = False
k = str(pred["image_id"])
bbox = np.array(pred["bbox"])
bbox[2:] += bbox[:2]
bbox[0] = (bbox[0] / w_orig) * 1280
bbox[2] = (bbox[2] / w_orig) * 1280
bbox[1] = (bbox[1] / h_orig) * 960
bbox[3] = (bbox[3] / h_orig) * 960
if k in pvdn_format.keys():
pvdn_format[k]["boxes"].append(bbox.tolist())
pvdn_format[k]["scores"].append(pred["score"])
else:
pvdn_format[k] = {"boxes": [bbox.tolist()], "scores": [pred["score"]]}
if fault_counter:
warn(f"There were no predictions found in the provided file {coco_path}.")
if output_path:
with open(os.path.abspath(output_path), "w") as f:
json.dump(pvdn_format, f)
else:
return pvdn_format
def result_to_coco_format(results: dict) -> list:
"""
TODO: provide parameter description
"""
coco_preds = []
for id, items in results.items():
for bbox, score in zip(items["boxes"], items["scores"]):
bbox = np.array(bbox)
bbox[2:] = bbox[2:] - bbox[:2]
coco_preds.append({"image_id": id, "category_id": 0,
"bbox": bbox.tolist(), "score": score})
return coco_preds
| 39.908163 | 97 | 0.613909 |
067f232d9187e859e5843245d72e94f4353d64ec | 488,688 | py | Python | seleniumbase/fixtures/base_case.py | aichemydev/SeleniumBase | 34479f7377217013a1bbff23cfd12abe8363bc13 | [
"MIT"
] | null | null | null | seleniumbase/fixtures/base_case.py | aichemydev/SeleniumBase | 34479f7377217013a1bbff23cfd12abe8363bc13 | [
"MIT"
] | null | null | null | seleniumbase/fixtures/base_case.py | aichemydev/SeleniumBase | 34479f7377217013a1bbff23cfd12abe8363bc13 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
r""" ------------> ------------> ------------> ------------>
______ __ _ ____
/ ____/__ / /__ ____ (_)_ ______ ___ / _ \____ ________
\__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_) / __ \/ ___/ _ \
___/ / __/ / __/ / / / / /_/ / / / / / / /_) / (_/ /__ / __/
/____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/
------------> ------------> ------------> ------------>
The BaseCase class is the main gateway for using The SeleniumBase Framework.
It inherits Python's unittest.TestCase class, and runs with Pytest or Nose.
All tests using BaseCase automatically launch WebDriver browsers for tests.
Usage:
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_anything(self):
# Write your code here. Example:
self.open("https://github.com/")
self.type("input.header-search-input", "SeleniumBase\n")
self.click('a[href="/seleniumbase/SeleniumBase"]')
self.assert_element("div.repository-content")
....
SeleniumBase methods expand and improve on existing WebDriver commands.
Improvements include making WebDriver more robust, reliable, and flexible.
Page elements are given enough time to load before WebDriver acts on them.
Code becomes greatly simplified and easier to maintain.
"""
import codecs
import json
import logging
import os
import re
import sys
import time
import unittest
import urllib3
from selenium.common.exceptions import (
ElementClickInterceptedException as ECI_Exception,
ElementNotInteractableException as ENI_Exception,
MoveTargetOutOfBoundsException,
StaleElementReferenceException,
WebDriverException,
)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.remote_connection import LOGGER
from seleniumbase import config as sb_config
from seleniumbase.config import settings
from seleniumbase.core import log_helper
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import css_to_xpath
from seleniumbase.fixtures import js_utils
from seleniumbase.fixtures import page_actions
from seleniumbase.fixtures import page_utils
from seleniumbase.fixtures import shared_utils
from seleniumbase.fixtures import xpath_to_css
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
urllib3.disable_warnings()
LOGGER.setLevel(logging.WARNING)
if sys.version_info[0] < 3:
reload(sys) # noqa: F821
sys.setdefaultencoding("utf8")
class BaseCase(unittest.TestCase):
""" <Class seleniumbase.BaseCase> """
def __init__(self, *args, **kwargs):
super(BaseCase, self).__init__(*args, **kwargs)
self.driver = None
self.environment = None
self.env = None # Add a shortened version of self.environment
self.__page_sources = []
self.__extra_actions = []
self.__js_start_time = 0
self.__set_c_from_switch = False
self.__called_setup = False
self.__called_teardown = False
self.__start_time_ms = None
self.__requests_timeout = None
self.__screenshot_count = 0
self.__will_be_skipped = False
self.__passed_then_skipped = False
self.__last_url_of_deferred_assert = "data:,"
self.__last_page_load_url = "data:,"
self.__last_page_screenshot = None
self.__last_page_screenshot_png = None
self.__last_page_url = None
self.__last_page_source = None
self.__skip_reason = None
self.__dont_record_open = False
self.__dont_record_js_click = False
self.__new_window_on_rec_open = True
self.__overrided_default_timeouts = False
self.__added_pytest_html_extra = None
self.__deferred_assert_count = 0
self.__deferred_assert_failures = []
self.__device_width = None
self.__device_height = None
self.__device_pixel_ratio = None
self.__driver_browser_map = {}
self.__changed_jqc_theme = False
self.__jqc_default_theme = None
self.__jqc_default_color = None
self.__jqc_default_width = None
# Requires self._* instead of self.__* for external class use
self._language = "English"
self._presentation_slides = {}
self._presentation_transition = {}
self._rec_overrides_switch = True # Recorder-Mode uses set_c vs switch
self._sb_test_identifier = None
self._html_report_extra = [] # (Used by pytest_plugin.py)
self._default_driver = None
self._drivers_list = []
self._chart_data = {}
self._chart_count = 0
self._chart_label = {}
self._chart_xcount = 0
self._chart_first_series = {}
self._chart_series_count = {}
self._tour_steps = {}
def open(self, url):
""" Navigates the current browser window to the specified page. """
self.__check_scope()
if type(url) is str:
url = url.strip() # Remove leading and trailing whitespace
if (type(url) is not str) or not self.__looks_like_a_page_url(url):
# url should start with one of the following:
# "http:", "https:", "://", "data:", "file:",
# "about:", "chrome:", "opera:", or "edge:".
msg = 'Did you forget to prefix your URL with "http:" or "https:"?'
raise Exception('Invalid URL: "%s"\n%s' % (url, msg))
self.__last_page_load_url = None
js_utils.clear_out_console_logs(self.driver)
if url.startswith("://"):
# Convert URLs such as "://google.com" into "https://google.com"
url = "https" + url
if self.recorder_mode and not self.__dont_record_open:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["_url_", origin, url, time_stamp]
self.__extra_actions.append(action)
if self.recorder_mode and self.__new_window_on_rec_open:
c_url = self.driver.current_url
if ("http:") in c_url or ("https:") in c_url or ("file:") in c_url:
if self.get_domain_url(url) != self.get_domain_url(c_url):
self.open_new_window(switch_to=True)
self.driver.get(url)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def get(self, url):
"""If "url" looks like a page URL, open the URL in the web browser.
Otherwise, return self.get_element(URL_AS_A_SELECTOR)
Examples:
self.get("https://seleniumbase.io") # Navigates to the URL
self.get("input.class") # Finds and returns the WebElement
"""
self.__check_scope()
if self.__looks_like_a_page_url(url):
self.open(url)
else:
return self.get_element(url) # url is treated like a selector
def click(
self, selector, by=By.CSS_SELECTOR, timeout=None, delay=0, scroll=True
):
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
original_selector = selector
original_by = by
selector, by = self.__recalculate_selector(selector, by)
if delay and (type(delay) in [int, float]) and delay > 0:
time.sleep(delay)
if page_utils.is_link_text_selector(selector) or by == By.LINK_TEXT:
if not self.is_link_text_visible(selector):
# Handle a special case of links hidden in dropdowns
self.click_link_text(selector, timeout=timeout)
return
if (
page_utils.is_partial_link_text_selector(selector)
or by == By.PARTIAL_LINK_TEXT
):
if not self.is_partial_link_text_visible(selector):
# Handle a special case of partial links hidden in dropdowns
self.click_partial_link_text(selector, timeout=timeout)
return
if self.__is_shadow_selector(selector):
self.__shadow_click(selector)
return
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
if scroll and not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
try:
if self.browser == "ie" and by == By.LINK_TEXT:
# An issue with clicking Link Text on IE means using jquery
self.__jquery_click(selector, by=by)
elif self.browser == "safari":
if by == By.LINK_TEXT:
self.__jquery_click(selector, by=by)
else:
self.__js_click(selector, by=by)
else:
href = None
new_tab = False
onclick = None
try:
if self.headless and element.tag_name == "a":
# Handle a special case of opening a new tab (headless)
href = element.get_attribute("href").strip()
onclick = element.get_attribute("onclick")
target = element.get_attribute("target")
if target == "_blank":
new_tab = True
if new_tab and self.__looks_like_a_page_url(href):
if onclick:
try:
self.execute_script(onclick)
except Exception:
pass
current_window = self.driver.current_window_handle
self.open_new_window()
try:
self.open(href)
except Exception:
pass
self.switch_to_window(current_window)
return
except Exception:
pass
# Normal click
element.click()
except StaleElementReferenceException:
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
try:
self.__scroll_to_element(element, selector, by)
except Exception:
pass
if self.browser == "safari":
if by == By.LINK_TEXT:
self.__jquery_click(selector, by=by)
else:
self.__js_click(selector, by=by)
else:
element.click()
except ENI_Exception:
self.wait_for_ready_state_complete()
time.sleep(0.1)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
href = None
new_tab = False
onclick = None
try:
if element.tag_name == "a":
# Handle a special case of opening a new tab (non-headless)
href = element.get_attribute("href").strip()
onclick = element.get_attribute("onclick")
target = element.get_attribute("target")
if target == "_blank":
new_tab = True
if new_tab and self.__looks_like_a_page_url(href):
if onclick:
try:
self.execute_script(onclick)
except Exception:
pass
current_window = self.driver.current_window_handle
self.open_new_window()
try:
self.open(href)
except Exception:
pass
self.switch_to_window(current_window)
return
except Exception:
pass
self.__scroll_to_element(element, selector, by)
if self.browser == "firefox" or self.browser == "safari":
if by == By.LINK_TEXT or "contains(" in selector:
self.__jquery_click(selector, by=by)
else:
self.__js_click(selector, by=by)
else:
element.click()
except (WebDriverException, MoveTargetOutOfBoundsException):
self.wait_for_ready_state_complete()
try:
self.__js_click(selector, by=by)
except Exception:
try:
self.__jquery_click(selector, by=by)
except Exception:
# One more attempt to click on the element
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
element.click()
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def slow_click(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Similar to click(), but pauses for a brief moment before clicking.
When used in combination with setting the user-agent, you can often
bypass bot-detection by tricking websites into thinking that you're
not a bot. (Useful on websites that block web automation tools.)
To set the user-agent, use: ``--agent=AGENT``.
Here's an example message from GitHub's bot-blocker:
``You have triggered an abuse detection mechanism...``
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if not self.demo_mode and not self.slow_mode:
self.click(selector, by=by, timeout=timeout, delay=1.05)
elif self.slow_mode:
self.click(selector, by=by, timeout=timeout, delay=0.65)
else:
# Demo Mode already includes a small delay
self.click(selector, by=by, timeout=timeout, delay=0.25)
def double_click(self, selector, by=By.CSS_SELECTOR, timeout=None):
from selenium.webdriver.common.action_chains import ActionChains
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
original_selector = selector
original_by = by
selector, by = self.__recalculate_selector(selector, by)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
self.wait_for_ready_state_complete()
# Find the element one more time in case scrolling hid it
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout
)
pre_action_url = self.driver.current_url
try:
if self.browser == "safari":
# Jump to the "except" block where the other script should work
raise Exception("This Exception will be caught.")
actions = ActionChains(self.driver)
actions.double_click(element).perform()
except Exception:
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
double_click_script = (
"""var targetElement1 = document.querySelector('%s');
var clickEvent1 = document.createEvent('MouseEvents');
clickEvent1.initEvent('dblclick', true, true);
targetElement1.dispatchEvent(clickEvent1);"""
% css_selector
)
if ":contains\\(" not in css_selector:
self.execute_script(double_click_script)
else:
double_click_script = (
"""jQuery('%s').dblclick();""" % css_selector
)
self.safe_execute_script(double_click_script)
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def click_chain(
self, selectors_list, by=By.CSS_SELECTOR, timeout=None, spacing=0
):
"""This method clicks on a list of elements in succession.
@Params
selectors_list - The list of selectors to click on.
by - The type of selector to search by (Default: CSS_Selector).
timeout - How long to wait for the selector to be visible.
spacing - The amount of time to wait between clicks (in seconds).
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
for selector in selectors_list:
self.click(selector, by=by, timeout=timeout)
if spacing > 0:
time.sleep(spacing)
def update_text(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
"""This method updates an element's text field with new text.
Has multiple parts:
* Waits for the element to be visible.
* Waits for the element to be interactive.
* Clears the text field.
* Types in the new text.
* Hits Enter/Submit (if the text ends in "\n").
@Params
selector - the selector of the text field
text - the new text to type into the text field
by - the type of selector to search by (Default: CSS Selector)
timeout - how long to wait for the selector to be visible
retry - if True, use JS if the Selenium text update fails
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__shadow_type(selector, text)
return
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
try:
element.clear() # May need https://stackoverflow.com/a/50691625
backspaces = Keys.BACK_SPACE * 42 # Is the answer to everything
element.send_keys(backspaces) # In case autocomplete keeps text
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
try:
element.clear()
except Exception:
pass # Clearing the text field first might not be necessary
except Exception:
pass # Clearing the text field first might not be necessary
self.__demo_mode_pause_if_active(tiny=True)
pre_action_url = self.driver.current_url
if type(text) is int or type(text) is float:
text = str(text)
try:
if not text.endswith("\n"):
element.send_keys(text)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
element.clear()
if not text.endswith("\n"):
element.send_keys(text)
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
if (
retry
and element.get_attribute("value") != text
and not text.endswith("\n")
):
logging.debug("update_text() is falling back to JavaScript!")
self.set_value(selector, text, by=by)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def add_text(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
"""The more-reliable version of driver.send_keys()
Similar to update_text(), but won't clear the text field first."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__shadow_type(selector, text, clear_first=False)
return
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
pre_action_url = self.driver.current_url
if type(text) is int or type(text) is float:
text = str(text)
try:
if not text.endswith("\n"):
element.send_keys(text)
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
if not text.endswith("\n"):
element.send_keys(text)
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def type(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
"""Same as self.update_text()
This method updates an element's text field with new text.
Has multiple parts:
* Waits for the element to be visible.
* Waits for the element to be interactive.
* Clears the text field.
* Types in the new text.
* Hits Enter/Submit (if the text ends in "\n").
@Params
selector - the selector of the text field
text - the new text to type into the text field
by - the type of selector to search by (Default: CSS Selector)
timeout - how long to wait for the selector to be visible
retry - if True, use JS if the Selenium text update fails
DO NOT confuse self.type() with Python type()! They are different!
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def submit(self, selector, by=By.CSS_SELECTOR):
""" Alternative to self.driver.find_element_by_*(SELECTOR).submit() """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
element.submit()
self.__demo_mode_pause_if_active()
def clear(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""This method clears an element's text field.
A clear() is already included with most methods that type text,
such as self.type(), self.update_text(), etc.
Does not use Demo Mode highlights, mainly because we expect
that some users will be calling an unnecessary clear() before
calling a method that already includes clear() as part of it.
In case websites trigger an autofill after clearing a field,
add backspaces to make sure autofill doesn't undo the clear.
@Params
selector - the selector of the text field
by - the type of selector to search by (Default: CSS Selector)
timeout - how long to wait for the selector to be visible
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__shadow_clear(selector)
return
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.scroll_to(selector, by=by, timeout=timeout)
try:
element.clear()
backspaces = Keys.BACK_SPACE * 42 # Autofill Defense
element.send_keys(backspaces)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
element.clear()
try:
backspaces = Keys.BACK_SPACE * 42 # Autofill Defense
element.send_keys(backspaces)
except Exception:
pass
except Exception:
element.clear()
def focus(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Make the current page focus on an interactable element.
If the element is not interactable, only scrolls to it.
The "tab" key is another way of setting the page focus."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.scroll_to(selector, by=by, timeout=timeout)
try:
element.send_keys(Keys.NULL)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
try:
element.send_keys(Keys.NULL)
except ENI_Exception:
# Non-interactable element. Skip focus and continue.
pass
self.__demo_mode_pause_if_active()
def refresh_page(self):
self.__check_scope()
self.__last_page_load_url = None
js_utils.clear_out_console_logs(self.driver)
self.driver.refresh()
self.wait_for_ready_state_complete()
def refresh(self):
""" The shorter version of self.refresh_page() """
self.refresh_page()
def get_current_url(self):
self.__check_scope()
current_url = self.driver.current_url
if "%" in current_url and sys.version_info[0] >= 3:
try:
from urllib.parse import unquote
current_url = unquote(current_url, errors="strict")
except Exception:
pass
return current_url
def get_origin(self):
self.__check_scope()
return self.execute_script("return window.location.origin;")
def get_page_source(self):
self.wait_for_ready_state_complete()
return self.driver.page_source
def get_page_title(self):
self.wait_for_ready_state_complete()
self.wait_for_element_present("title", timeout=settings.SMALL_TIMEOUT)
time.sleep(0.03)
return self.driver.title
def get_title(self):
""" The shorter version of self.get_page_title() """
return self.get_page_title()
def get_user_agent(self):
self.__check_scope()
user_agent = self.driver.execute_script("return navigator.userAgent;")
return user_agent
def get_locale_code(self):
self.__check_scope()
locale_code = self.driver.execute_script(
"return navigator.language || navigator.languages[0];"
)
return locale_code
def go_back(self):
self.__check_scope()
self.__last_page_load_url = None
self.driver.back()
if self.browser == "safari":
self.wait_for_ready_state_complete()
self.driver.refresh()
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def go_forward(self):
self.__check_scope()
self.__last_page_load_url = None
self.driver.forward()
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def open_start_page(self):
"""Navigates the current browser window to the start_page.
You can set the start_page on the command-line in three ways:
'--start_page=URL', '--start-page=URL', or '--url=URL'.
If the start_page is not set, then "data:," will be used."""
self.__check_scope()
start_page = self.start_page
if type(start_page) is str:
start_page = start_page.strip() # Remove extra whitespace
if start_page and len(start_page) >= 4:
if page_utils.is_valid_url(start_page):
self.open(start_page)
else:
new_start_page = "https://" + start_page
if page_utils.is_valid_url(new_start_page):
self.__dont_record_open = True
self.open(new_start_page)
self.__dont_record_open = False
else:
logging.info('Invalid URL: "%s"!' % start_page)
self.open("data:,")
else:
self.open("data:,")
def open_if_not_url(self, url):
""" Opens the url in the browser if it's not the current url. """
self.__check_scope()
if self.driver.current_url != url:
self.open(url)
def is_element_present(self, selector, by=By.CSS_SELECTOR):
self.wait_for_ready_state_complete()
selector, by = self.__recalculate_selector(selector, by)
return page_actions.is_element_present(self.driver, selector, by)
def is_element_visible(self, selector, by=By.CSS_SELECTOR):
self.wait_for_ready_state_complete()
selector, by = self.__recalculate_selector(selector, by)
return page_actions.is_element_visible(self.driver, selector, by)
def is_element_enabled(self, selector, by=By.CSS_SELECTOR):
self.wait_for_ready_state_complete()
selector, by = self.__recalculate_selector(selector, by)
return page_actions.is_element_enabled(self.driver, selector, by)
def is_text_visible(self, text, selector="html", by=By.CSS_SELECTOR):
self.wait_for_ready_state_complete()
time.sleep(0.01)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.is_text_visible(self.driver, text, selector, by)
def is_attribute_present(
self, selector, attribute, value=None, by=By.CSS_SELECTOR
):
"""Returns True if the element attribute/value is found.
If the value is not specified, the attribute only needs to exist."""
self.wait_for_ready_state_complete()
time.sleep(0.01)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.is_attribute_present(
self.driver, selector, attribute, value, by
)
def is_link_text_visible(self, link_text):
self.wait_for_ready_state_complete()
time.sleep(0.01)
return page_actions.is_element_visible(
self.driver, link_text, by=By.LINK_TEXT
)
def is_partial_link_text_visible(self, partial_link_text):
self.wait_for_ready_state_complete()
time.sleep(0.01)
return page_actions.is_element_visible(
self.driver, partial_link_text, by=By.PARTIAL_LINK_TEXT
)
def is_link_text_present(self, link_text):
"""Returns True if the link text appears in the HTML of the page.
The element doesn't need to be visible,
such as elements hidden inside a dropdown selection."""
self.wait_for_ready_state_complete()
soup = self.get_beautiful_soup()
html_links = soup.find_all("a")
for html_link in html_links:
if html_link.text.strip() == link_text.strip():
return True
return False
def is_partial_link_text_present(self, link_text):
"""Returns True if the partial link appears in the HTML of the page.
The element doesn't need to be visible,
such as elements hidden inside a dropdown selection."""
self.wait_for_ready_state_complete()
soup = self.get_beautiful_soup()
html_links = soup.find_all("a")
for html_link in html_links:
if link_text.strip() in html_link.text.strip():
return True
return False
def get_link_attribute(self, link_text, attribute, hard_fail=True):
"""Finds a link by link text and then returns the attribute's value.
If the link text or attribute cannot be found, an exception will
get raised if hard_fail is True (otherwise None is returned)."""
self.wait_for_ready_state_complete()
soup = self.get_beautiful_soup()
html_links = soup.find_all("a")
for html_link in html_links:
if html_link.text.strip() == link_text.strip():
if html_link.has_attr(attribute):
attribute_value = html_link.get(attribute)
return attribute_value
if hard_fail:
raise Exception(
"Unable to find attribute {%s} from link text {%s}!"
% (attribute, link_text)
)
else:
return None
if hard_fail:
raise Exception("Link text {%s} was not found!" % link_text)
else:
return None
def get_link_text_attribute(self, link_text, attribute, hard_fail=True):
"""Same as self.get_link_attribute()
Finds a link by link text and then returns the attribute's value.
If the link text or attribute cannot be found, an exception will
get raised if hard_fail is True (otherwise None is returned)."""
return self.get_link_attribute(link_text, attribute, hard_fail)
def get_partial_link_text_attribute(
self, link_text, attribute, hard_fail=True
):
"""Finds a link by partial link text and then returns the attribute's
value. If the partial link text or attribute cannot be found, an
exception will get raised if hard_fail is True (otherwise None
is returned)."""
self.wait_for_ready_state_complete()
soup = self.get_beautiful_soup()
html_links = soup.find_all("a")
for html_link in html_links:
if link_text.strip() in html_link.text.strip():
if html_link.has_attr(attribute):
attribute_value = html_link.get(attribute)
return attribute_value
if hard_fail:
raise Exception(
"Unable to find attribute {%s} from "
"partial link text {%s}!" % (attribute, link_text)
)
else:
return None
if hard_fail:
raise Exception(
"Partial Link text {%s} was not found!" % link_text
)
else:
return None
def click_link_text(self, link_text, timeout=None):
""" This method clicks link text on a page """
# If using phantomjs, might need to extract and open the link directly
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
if self.browser == "phantomjs":
if self.is_link_text_visible(link_text):
element = self.wait_for_link_text_visible(
link_text, timeout=timeout
)
element.click()
return
self.open(self.__get_href_from_link_text(link_text))
return
if self.browser == "safari":
if self.demo_mode:
self.wait_for_link_text_present(link_text, timeout=timeout)
try:
self.__jquery_slow_scroll_to(link_text, by=By.LINK_TEXT)
except Exception:
element = self.wait_for_link_text_visible(
link_text, timeout=timeout
)
self.__slow_scroll_to_element(element)
o_bs = "" # original_box_shadow
loops = settings.HIGHLIGHTS
selector = self.convert_to_css_selector(
link_text, by=By.LINK_TEXT
)
selector = self.__make_css_match_first_element_only(selector)
try:
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
self.__highlight_with_jquery(selector, loops, o_bs)
except Exception:
pass # JQuery probably couldn't load. Skip highlighting.
self.__jquery_click(link_text, by=By.LINK_TEXT)
return
if not self.is_link_text_present(link_text):
self.wait_for_link_text_present(link_text, timeout=timeout)
pre_action_url = self.get_current_url()
try:
element = self.wait_for_link_text_visible(link_text, timeout=0.2)
self.__demo_mode_highlight_if_active(link_text, by=By.LINK_TEXT)
try:
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_link_text_visible(
link_text, timeout=timeout
)
element.click()
except Exception:
found_css = False
text_id = self.get_link_attribute(link_text, "id", False)
if text_id:
link_css = '[id="%s"]' % link_text
found_css = True
if not found_css:
href = self.__get_href_from_link_text(link_text, False)
if href:
if href.startswith("/") or page_utils.is_valid_url(href):
link_css = '[href="%s"]' % href
found_css = True
if not found_css:
ngclick = self.get_link_attribute(link_text, "ng-click", False)
if ngclick:
link_css = '[ng-click="%s"]' % ngclick
found_css = True
if not found_css:
onclick = self.get_link_attribute(link_text, "onclick", False)
if onclick:
link_css = '[onclick="%s"]' % onclick
found_css = True
success = False
if found_css:
if self.is_element_visible(link_css):
self.click(link_css)
success = True
else:
# The link text might be hidden under a dropdown menu
success = self.__click_dropdown_link_text(
link_text, link_css
)
if not success:
element = self.wait_for_link_text_visible(
link_text, timeout=settings.MINI_TIMEOUT
)
element.click()
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def click_partial_link_text(self, partial_link_text, timeout=None):
""" This method clicks the partial link text on a page. """
# If using phantomjs, might need to extract and open the link directly
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if self.browser == "phantomjs":
if self.is_partial_link_text_visible(partial_link_text):
element = self.wait_for_partial_link_text(partial_link_text)
element.click()
return
soup = self.get_beautiful_soup()
html_links = soup.fetch("a")
for html_link in html_links:
if partial_link_text in html_link.text:
for html_attribute in html_link.attrs:
if html_attribute[0] == "href":
href = html_attribute[1]
if href.startswith("//"):
link = "http:" + href
elif href.startswith("/"):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
self.open(link)
return
raise Exception(
"Could not parse link from partial link_text "
"{%s}" % partial_link_text
)
raise Exception(
"Partial link text {%s} was not found!" % partial_link_text
)
if not self.is_partial_link_text_present(partial_link_text):
self.wait_for_partial_link_text_present(
partial_link_text, timeout=timeout
)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
try:
element = self.wait_for_partial_link_text(
partial_link_text, timeout=0.2
)
self.__demo_mode_highlight_if_active(
partial_link_text, by=By.LINK_TEXT
)
try:
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_partial_link_text(
partial_link_text, timeout=timeout
)
element.click()
except Exception:
found_css = False
text_id = self.get_partial_link_text_attribute(
partial_link_text, "id", False
)
if text_id:
link_css = '[id="%s"]' % partial_link_text
found_css = True
if not found_css:
href = self.__get_href_from_partial_link_text(
partial_link_text, False
)
if href:
if href.startswith("/") or page_utils.is_valid_url(href):
link_css = '[href="%s"]' % href
found_css = True
if not found_css:
ngclick = self.get_partial_link_text_attribute(
partial_link_text, "ng-click", False
)
if ngclick:
link_css = '[ng-click="%s"]' % ngclick
found_css = True
if not found_css:
onclick = self.get_partial_link_text_attribute(
partial_link_text, "onclick", False
)
if onclick:
link_css = '[onclick="%s"]' % onclick
found_css = True
success = False
if found_css:
if self.is_element_visible(link_css):
self.click(link_css)
success = True
else:
# The link text might be hidden under a dropdown menu
success = self.__click_dropdown_partial_link_text(
partial_link_text, link_css
)
if not success:
element = self.wait_for_partial_link_text(
partial_link_text, timeout=settings.MINI_TIMEOUT
)
element.click()
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def get_text(self, selector, by=By.CSS_SELECTOR, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__get_shadow_text(selector)
self.wait_for_ready_state_complete()
time.sleep(0.01)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout
)
try:
element_text = element.text
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.14)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout
)
element_text = element.text
return element_text
def get_attribute(
self,
selector,
attribute,
by=By.CSS_SELECTOR,
timeout=None,
hard_fail=True,
):
""" This method uses JavaScript to get the value of an attribute. """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.01)
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
try:
attribute_value = element.get_attribute(attribute)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.14)
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
attribute_value = element.get_attribute(attribute)
if attribute_value is not None:
return attribute_value
else:
if hard_fail:
raise Exception(
"Element {%s} has no attribute {%s}!"
% (selector, attribute)
)
else:
return None
def set_attribute(
self,
selector,
attribute,
value,
by=By.CSS_SELECTOR,
timeout=None,
scroll=False,
):
"""This method uses JavaScript to set/update an attribute.
Only the first matching selector from querySelector() is used."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if scroll and self.is_element_visible(selector, by=by):
try:
self.scroll_to(selector, by=by, timeout=timeout)
except Exception:
pass
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
value = re.escape(value)
value = self.__escape_quotes_if_needed(value)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""document.querySelector('%s').setAttribute('%s','%s');"""
% (css_selector, attribute, value)
)
self.execute_script(script)
def set_attributes(self, selector, attribute, value, by=By.CSS_SELECTOR):
"""This method uses JavaScript to set/update a common attribute.
All matching selectors from querySelectorAll() are used.
Example => (Make all links on a website redirect to Google):
self.set_attributes("a", "href", "https://google.com")"""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
value = re.escape(value)
value = self.__escape_quotes_if_needed(value)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = """var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
$elements[index].setAttribute('%s','%s');}""" % (
css_selector,
attribute,
value,
)
try:
self.execute_script(script)
except Exception:
pass
def set_attribute_all(
self, selector, attribute, value, by=By.CSS_SELECTOR
):
"""Same as set_attributes(), but using querySelectorAll naming scheme.
This method uses JavaScript to set/update a common attribute.
All matching selectors from querySelectorAll() are used.
Example => (Make all links on a website redirect to Google):
self.set_attribute_all("a", "href", "https://google.com")"""
self.set_attributes(selector, attribute, value, by=by)
def remove_attribute(
self, selector, attribute, by=By.CSS_SELECTOR, timeout=None
):
"""This method uses JavaScript to remove an attribute.
Only the first matching selector from querySelector() is used."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_visible(selector, by=by):
try:
self.scroll_to(selector, by=by, timeout=timeout)
except Exception:
pass
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = """document.querySelector('%s').removeAttribute('%s');""" % (
css_selector,
attribute,
)
self.execute_script(script)
def remove_attributes(self, selector, attribute, by=By.CSS_SELECTOR):
"""This method uses JavaScript to remove a common attribute.
All matching selectors from querySelectorAll() are used."""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = """var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
$elements[index].removeAttribute('%s');}""" % (
css_selector,
attribute,
)
try:
self.execute_script(script)
except Exception:
pass
def get_property_value(
self, selector, property, by=By.CSS_SELECTOR, timeout=None
):
"""Returns the property value of a page element's computed style.
Example:
opacity = self.get_property_value("html body a", "opacity")
self.assertTrue(float(opacity) > 0, "Element not visible!")"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't run action if can't convert to CSS_Selector for JavaScript
raise Exception(
"Exception: Could not convert {%s}(by=%s) to CSS_SELECTOR!"
% (selector, by)
)
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
script = """var $elm = document.querySelector('%s');
$val = window.getComputedStyle($elm).getPropertyValue('%s');
return $val;""" % (
selector,
property,
)
value = self.execute_script(script)
if value is not None:
return value
else:
return "" # Return an empty string if the property doesn't exist
def get_image_url(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Extracts the URL from an image element on the page. """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.get_attribute(
selector, attribute="src", by=by, timeout=timeout
)
def find_elements(self, selector, by=By.CSS_SELECTOR, limit=0):
"""Returns a list of matching WebElements.
Elements could be either hidden or visible on the page.
If "limit" is set and > 0, will only return that many elements."""
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.05)
elements = self.driver.find_elements(by=by, value=selector)
if limit and limit > 0 and len(elements) > limit:
elements = elements[:limit]
return elements
def find_visible_elements(self, selector, by=By.CSS_SELECTOR, limit=0):
"""Returns a list of matching WebElements that are visible.
If "limit" is set and > 0, will only return that many elements."""
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.05)
v_elems = page_actions.find_visible_elements(self.driver, selector, by)
if limit and limit > 0 and len(v_elems) > limit:
v_elems = v_elems[:limit]
return v_elems
def click_visible_elements(
self, selector, by=By.CSS_SELECTOR, limit=0, timeout=None
):
"""Finds all matching page elements and clicks visible ones in order.
If a click reloads or opens a new page, the clicking will stop.
If no matching elements appear, an Exception will be raised.
If "limit" is set and > 0, will only click that many elements.
Also clicks elements that become visible from previous clicks.
Works best for actions such as clicking all checkboxes on a page.
Example: self.click_visible_elements('input[type="checkbox"]')"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_present(selector, by=by, timeout=timeout)
elements = self.find_elements(selector, by=by)
if self.browser == "safari":
if not limit:
limit = 0
num_elements = len(elements)
if num_elements == 0:
raise Exception(
"No matching elements found for selector {%s}!" % selector
)
elif num_elements < limit or limit == 0:
limit = num_elements
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
last_css_chunk = css_selector.split(" ")[-1]
if ":" in last_css_chunk:
self.__js_click_all(css_selector)
self.wait_for_ready_state_complete()
return
else:
for i in range(1, limit + 1):
new_selector = css_selector + ":nth-of-type(%s)" % str(i)
if self.is_element_visible(new_selector):
self.__js_click(new_selector)
self.wait_for_ready_state_complete()
return
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
click_count = 0
for element in elements:
if limit and limit > 0 and click_count >= limit:
return
try:
if element.is_displayed():
self.__scroll_to_element(element)
element.click()
click_count += 1
self.wait_for_ready_state_complete()
except ECI_Exception:
continue # ElementClickInterceptedException (Overlay likely)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.12)
try:
if element.is_displayed():
self.__scroll_to_element(element)
element.click()
click_count += 1
self.wait_for_ready_state_complete()
except (StaleElementReferenceException, ENI_Exception):
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
return # Probably on new page / Elements are all stale
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
def click_nth_visible_element(
self, selector, number, by=By.CSS_SELECTOR, timeout=None
):
"""Finds all matching page elements and clicks the nth visible one.
Example: self.click_nth_visible_element('[type="checkbox"]', 5)
(Clicks the 5th visible checkbox on the page.)"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
self.wait_for_element_present(selector, by=by, timeout=timeout)
elements = self.find_visible_elements(selector, by=by)
if len(elements) < number:
raise Exception(
"Not enough matching {%s} elements of type {%s} to "
"click number %s!" % (selector, by, number)
)
number = number - 1
if number < 0:
number = 0
element = elements[number]
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
try:
self.__scroll_to_element(element)
element.click()
except (StaleElementReferenceException, ENI_Exception):
time.sleep(0.12)
self.wait_for_ready_state_complete()
self.wait_for_element_present(selector, by=by, timeout=timeout)
elements = self.find_visible_elements(selector, by=by)
if len(elements) < number:
raise Exception(
"Not enough matching {%s} elements of type {%s} to "
"click number %s!" % (selector, by, number)
)
number = number - 1
if number < 0:
number = 0
element = elements[number]
element.click()
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
def click_if_visible(self, selector, by=By.CSS_SELECTOR):
"""If the page selector exists and is visible, clicks on the element.
This method only clicks on the first matching element found.
(Use click_visible_elements() to click all matching elements.)"""
self.wait_for_ready_state_complete()
if self.is_element_visible(selector, by=by):
self.click(selector, by=by)
def click_active_element(self):
self.wait_for_ready_state_complete()
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
self.execute_script("document.activeElement.click();")
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def is_checked(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Determines if a checkbox or a radio button element is checked.
Returns True if the element is checked.
Returns False if the element is not checked.
If the element is not present on the page, raises an exception.
If the element is not a checkbox or radio, raises an exception."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
kind = self.get_attribute(selector, "type", by=by, timeout=timeout)
if kind != "checkbox" and kind != "radio":
raise Exception("Expecting a checkbox or a radio button element!")
is_checked = self.get_attribute(
selector, "checked", by=by, timeout=timeout, hard_fail=False
)
if is_checked:
return True
else: # (NoneType)
return False
def is_selected(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Same as is_checked() """
return self.is_checked(selector, by=by, timeout=timeout)
def check_if_unchecked(self, selector, by=By.CSS_SELECTOR):
""" If a checkbox or radio button is not checked, will check it. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
if not self.is_checked(selector, by=by):
if self.is_element_visible(selector, by=by):
self.click(selector, by=by)
else:
selector = self.convert_to_css_selector(selector, by=by)
self.__dont_record_js_click = True
self.js_click(selector, by=By.CSS_SELECTOR)
self.__dont_record_js_click = False
def select_if_unselected(self, selector, by=By.CSS_SELECTOR):
""" Same as check_if_unchecked() """
self.check_if_unchecked(selector, by=by)
def uncheck_if_checked(self, selector, by=By.CSS_SELECTOR):
""" If a checkbox is checked, will uncheck it. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
if self.is_checked(selector, by=by):
if self.is_element_visible(selector, by=by):
self.click(selector, by=by)
else:
selector = self.convert_to_css_selector(selector, by=by)
self.__dont_record_js_click = True
self.js_click(selector, by=By.CSS_SELECTOR)
self.__dont_record_js_click = False
def unselect_if_selected(self, selector, by=By.CSS_SELECTOR):
""" Same as uncheck_if_checked() """
self.uncheck_if_checked(selector, by=by)
def is_element_in_an_iframe(self, selector, by=By.CSS_SELECTOR):
"""Returns True if the selector's element is located in an iframe.
Otherwise returns False."""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_present(selector, by=by):
return False
soup = self.get_beautiful_soup()
iframe_list = soup.select("iframe")
for iframe in iframe_list:
iframe_identifier = None
if iframe.has_attr("name") and len(iframe["name"]) > 0:
iframe_identifier = iframe["name"]
elif iframe.has_attr("id") and len(iframe["id"]) > 0:
iframe_identifier = iframe["id"]
elif iframe.has_attr("class") and len(iframe["class"]) > 0:
iframe_class = " ".join(iframe["class"])
iframe_identifier = '[class="%s"]' % iframe_class
else:
continue
self.switch_to_frame(iframe_identifier)
if self.is_element_present(selector, by=by):
self.switch_to_default_content()
return True
self.switch_to_default_content()
return False
def switch_to_frame_of_element(self, selector, by=By.CSS_SELECTOR):
"""Set driver control to the iframe containing element (assuming the
element is in a single-nested iframe) and returns the iframe name.
If element is not in an iframe, returns None, and nothing happens.
May not work if multiple iframes are nested within each other."""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_present(selector, by=by):
return None
soup = self.get_beautiful_soup()
iframe_list = soup.select("iframe")
for iframe in iframe_list:
iframe_identifier = None
if iframe.has_attr("name") and len(iframe["name"]) > 0:
iframe_identifier = iframe["name"]
elif iframe.has_attr("id") and len(iframe["id"]) > 0:
iframe_identifier = iframe["id"]
elif iframe.has_attr("class") and len(iframe["class"]) > 0:
iframe_class = " ".join(iframe["class"])
iframe_identifier = '[class="%s"]' % iframe_class
else:
continue
try:
self.switch_to_frame(iframe_identifier, timeout=1)
if self.is_element_present(selector, by=by):
return iframe_identifier
except Exception:
pass
self.switch_to_default_content()
try:
self.switch_to_frame(selector, timeout=1)
return selector
except Exception:
if self.is_element_present(selector, by=by):
return ""
raise Exception(
"Could not switch to iframe containing "
"element {%s}!" % selector
)
def hover_on_element(self, selector, by=By.CSS_SELECTOR):
self.__check_scope()
original_selector = selector
original_by = by
selector, by = self.__recalculate_selector(selector, by)
if page_utils.is_xpath_selector(selector):
selector = self.convert_to_css_selector(selector, By.XPATH)
by = By.CSS_SELECTOR
self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
self.scroll_to(selector, by=by)
time.sleep(0.05) # Settle down from scrolling before hovering
if self.browser != "chrome":
return page_actions.hover_on_element(self.driver, selector)
# Using Chrome
# (Pure hover actions won't work on early chromedriver versions)
try:
return page_actions.hover_on_element(self.driver, selector)
except WebDriverException as e:
driver_capabilities = self.driver.__dict__["capabilities"]
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
major_chrome_version = chrome_version.split(".")[0]
chrome_dict = self.driver.__dict__["capabilities"]["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(" ")[0]
major_chromedriver_version = chromedriver_version.split(".")[0]
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version
)
if major_chromedriver_version < major_chrome_version:
# Upgrading the driver is required for performing hover actions
message = (
"\n"
"You need a newer chromedriver to perform hover actions!\n"
"Your version of chromedriver is: %s\n"
"And your version of Chrome is: %s\n"
"You can fix this issue by running:\n>>> %s\n"
% (chromedriver_version, chrome_version, install_sb)
)
raise Exception(message)
else:
raise Exception(e)
def hover_and_click(
self,
hover_selector,
click_selector,
hover_by=By.CSS_SELECTOR,
click_by=By.CSS_SELECTOR,
timeout=None,
):
"""When you want to hover over an element or dropdown menu,
and then click an element that appears after that."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
original_selector = hover_selector
original_by = hover_by
hover_selector, hover_by = self.__recalculate_selector(
hover_selector, hover_by
)
hover_selector = self.convert_to_css_selector(hover_selector, hover_by)
hover_by = By.CSS_SELECTOR
click_selector, click_by = self.__recalculate_selector(
click_selector, click_by
)
dropdown_element = self.wait_for_element_visible(
hover_selector, by=hover_by, timeout=timeout
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
self.scroll_to(hover_selector, by=hover_by)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
the_selectors = [hover_selector, click_selector]
action = ["ho_cl", the_selectors, origin, time_stamp]
self.__extra_actions.append(action)
outdated_driver = False
element = None
try:
if self.mobile_emulator:
# On mobile, click to hover the element
dropdown_element.click()
elif self.browser == "safari":
# Use the workaround for hover-clicking on Safari
raise Exception("This Exception will be caught.")
else:
page_actions.hover_element(self.driver, dropdown_element)
except Exception:
outdated_driver = True
element = self.wait_for_element_present(
click_selector, click_by, timeout
)
if click_by == By.LINK_TEXT:
self.open(self.__get_href_from_link_text(click_selector))
elif click_by == By.PARTIAL_LINK_TEXT:
self.open(
self.__get_href_from_partial_link_text(click_selector)
)
else:
self.__dont_record_js_click = True
self.js_click(click_selector, by=click_by)
self.__dont_record_js_click = False
if outdated_driver:
pass # Already did the click workaround
elif self.mobile_emulator:
self.click(click_selector, by=click_by)
elif not outdated_driver:
element = page_actions.hover_and_click(
self.driver,
hover_selector,
click_selector,
hover_by,
click_by,
timeout,
)
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return element
def hover_and_double_click(
self,
hover_selector,
click_selector,
hover_by=By.CSS_SELECTOR,
click_by=By.CSS_SELECTOR,
timeout=None,
):
"""When you want to hover over an element or dropdown menu,
and then double-click an element that appears after that."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
original_selector = hover_selector
original_by = hover_by
hover_selector, hover_by = self.__recalculate_selector(
hover_selector, hover_by
)
hover_selector = self.convert_to_css_selector(hover_selector, hover_by)
hover_by = By.CSS_SELECTOR
click_selector, click_by = self.__recalculate_selector(
click_selector, click_by
)
dropdown_element = self.wait_for_element_visible(
hover_selector, by=hover_by, timeout=timeout
)
self.__demo_mode_highlight_if_active(original_selector, original_by)
self.scroll_to(hover_selector, by=hover_by)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
outdated_driver = False
element = None
try:
page_actions.hover_element(self.driver, dropdown_element)
except Exception:
outdated_driver = True
element = self.wait_for_element_present(
click_selector, click_by, timeout
)
if click_by == By.LINK_TEXT:
self.open(self.__get_href_from_link_text(click_selector))
elif click_by == By.PARTIAL_LINK_TEXT:
self.open(
self.__get_href_from_partial_link_text(click_selector)
)
else:
self.__dont_record_js_click = True
self.js_click(click_selector, click_by)
self.__dont_record_js_click = False
if not outdated_driver:
element = page_actions.hover_element_and_double_click(
self.driver,
dropdown_element,
click_selector,
click_by=By.CSS_SELECTOR,
timeout=timeout,
)
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return element
def drag_and_drop(
self,
drag_selector,
drop_selector,
drag_by=By.CSS_SELECTOR,
drop_by=By.CSS_SELECTOR,
timeout=None,
):
""" Drag and drop an element from one selector to another. """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
drag_selector, drag_by = self.__recalculate_selector(
drag_selector, drag_by
)
drop_selector, drop_by = self.__recalculate_selector(
drop_selector, drop_by
)
drag_element = self.wait_for_element_visible(
drag_selector, by=drag_by, timeout=timeout
)
self.__demo_mode_highlight_if_active(drag_selector, drag_by)
self.wait_for_element_visible(
drop_selector, by=drop_by, timeout=timeout
)
self.__demo_mode_highlight_if_active(drop_selector, drop_by)
self.scroll_to(drag_selector, by=drag_by)
drag_selector = self.convert_to_css_selector(drag_selector, drag_by)
drop_selector = self.convert_to_css_selector(drop_selector, drop_by)
drag_and_drop_script = js_utils.get_drag_and_drop_script()
self.safe_execute_script(
drag_and_drop_script
+ (
"$('%s').simulateDragDrop("
"{dropTarget: "
"'%s'});" % (drag_selector, drop_selector)
)
)
if self.demo_mode:
self.__demo_mode_pause_if_active()
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return drag_element
def drag_and_drop_with_offset(
self, selector, x, y, by=By.CSS_SELECTOR, timeout=None
):
""" Drag and drop an element to an {X,Y}-offset location. """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
element = self.wait_for_element_visible(css_selector, timeout=timeout)
self.__demo_mode_highlight_if_active(css_selector, By.CSS_SELECTOR)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = js_utils.get_drag_and_drop_with_offset_script(
css_selector, x, y
)
self.safe_execute_script(script)
if self.demo_mode:
self.__demo_mode_pause_if_active()
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return element
def __select_option(
self,
dropdown_selector,
option,
dropdown_by=By.CSS_SELECTOR,
option_by="text",
timeout=None,
):
"""Selects an HTML <select> option by specification.
Option specifications are by "text", "index", or "value".
Defaults to "text" if option_by is unspecified or unknown."""
from selenium.webdriver.support.ui import Select
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
dropdown_selector, dropdown_by = self.__recalculate_selector(
dropdown_selector, dropdown_by
)
self.wait_for_ready_state_complete()
element = self.wait_for_element_present(
dropdown_selector, by=dropdown_by, timeout=timeout
)
if self.is_element_visible(dropdown_selector, by=dropdown_by):
self.__demo_mode_highlight_if_active(
dropdown_selector, dropdown_by
)
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
try:
if option_by == "index":
Select(element).select_by_index(option)
elif option_by == "value":
Select(element).select_by_value(option)
else:
Select(element).select_by_visible_text(option)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.14)
element = self.wait_for_element_present(
dropdown_selector, by=dropdown_by, timeout=timeout
)
if option_by == "index":
Select(element).select_by_index(option)
elif option_by == "value":
Select(element).select_by_value(option)
else:
Select(element).select_by_visible_text(option)
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def select_option_by_text(
self,
dropdown_selector,
option,
dropdown_by=By.CSS_SELECTOR,
timeout=None,
):
"""Selects an HTML <select> option by option text.
@Params
dropdown_selector - the <select> selector.
option - the text of the option.
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__select_option(
dropdown_selector,
option,
dropdown_by=dropdown_by,
option_by="text",
timeout=timeout,
)
def select_option_by_index(
self,
dropdown_selector,
option,
dropdown_by=By.CSS_SELECTOR,
timeout=None,
):
"""Selects an HTML <select> option by option index.
@Params
dropdown_selector - the <select> selector.
option - the index number of the option.
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__select_option(
dropdown_selector,
option,
dropdown_by=dropdown_by,
option_by="index",
timeout=timeout,
)
def select_option_by_value(
self,
dropdown_selector,
option,
dropdown_by=By.CSS_SELECTOR,
timeout=None,
):
"""Selects an HTML <select> option by option value.
@Params
dropdown_selector - the <select> selector.
option - the value property of the option.
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__select_option(
dropdown_selector,
option,
dropdown_by=dropdown_by,
option_by="value",
timeout=timeout,
)
def load_html_string(self, html_string, new_page=True):
"""Loads an HTML string into the web browser.
If new_page==True, the page will switch to: "data:text/html,"
If new_page==False, will load HTML into the current page."""
self.__check_scope()
soup = self.get_beautiful_soup(html_string)
found_base = False
links = soup.findAll("link")
href = None
for link in links:
if link.get("rel") == ["canonical"] and link.get("href"):
found_base = True
href = link.get("href")
href = self.get_domain_url(href)
if (
found_base
and html_string.count("<head>") == 1
and html_string.count("<base") == 0
):
html_string = html_string.replace(
"<head>", '<head><base href="%s">' % href
)
elif not found_base:
bases = soup.findAll("base")
for base in bases:
if base.get("href"):
href = base.get("href")
if href:
html_string = html_string.replace('base: "."', 'base: "%s"' % href)
soup = self.get_beautiful_soup(html_string)
scripts = soup.findAll("script")
for script in scripts:
if script.get("type") != "application/json":
html_string = html_string.replace(str(script), "")
soup = self.get_beautiful_soup(html_string)
found_head = False
found_body = False
html_head = None
html_body = None
if soup.head and len(str(soup.head)) > 12:
found_head = True
html_head = str(soup.head)
html_head = re.escape(html_head)
html_head = self.__escape_quotes_if_needed(html_head)
html_head = html_head.replace("\\ ", " ")
if soup.body and len(str(soup.body)) > 12:
found_body = True
html_body = str(soup.body)
html_body = html_body.replace("\xc2\xa0", " ")
html_body = html_body.replace("\xc2\xa1", "¡")
html_body = html_body.replace("\xc2\xa9", "©")
html_body = html_body.replace("\xc2\xb7", "·")
html_body = html_body.replace("\xc2\xbf", "¿")
html_body = html_body.replace("\xc3\x97", "×")
html_body = html_body.replace("\xc3\xb7", "÷")
html_body = re.escape(html_body)
html_body = self.__escape_quotes_if_needed(html_body)
html_body = html_body.replace("\\ ", " ")
html_string = re.escape(html_string)
html_string = self.__escape_quotes_if_needed(html_string)
html_string = html_string.replace("\\ ", " ")
if new_page:
self.open("data:text/html,")
inner_head = """document.getElementsByTagName("head")[0].innerHTML"""
inner_body = """document.getElementsByTagName("body")[0].innerHTML"""
if not found_body:
self.execute_script('''%s = \"%s\"''' % (inner_body, html_string))
elif found_body and not found_head:
self.execute_script('''%s = \"%s\"''' % (inner_body, html_body))
elif found_body and found_head:
self.execute_script('''%s = \"%s\"''' % (inner_head, html_head))
self.execute_script('''%s = \"%s\"''' % (inner_body, html_body))
else:
raise Exception("Logic Error!")
for script in scripts:
js_code = script.string
js_src = script.get("src")
if js_code and script.get("type") != "application/json":
js_code_lines = js_code.split("\n")
new_lines = []
for line in js_code_lines:
line = line.strip()
new_lines.append(line)
js_code = "\n".join(new_lines)
js_code = re.escape(js_code)
js_utils.add_js_code(self.driver, js_code)
elif js_src:
js_utils.add_js_link(self.driver, js_src)
else:
pass
def set_content(self, html_string, new_page=False):
""" Same as load_html_string(), but "new_page" defaults to False. """
self.load_html_string(html_string, new_page=new_page)
def load_html_file(self, html_file, new_page=True):
"""Loads a local html file into the browser from a relative file path.
If new_page==True, the page will switch to: "data:text/html,"
If new_page==False, will load HTML into the current page.
Local images and other local src content WILL BE IGNORED.
"""
self.__check_scope()
if self.__looks_like_a_page_url(html_file):
self.open(html_file)
return
if len(html_file) < 6 or not html_file.endswith(".html"):
raise Exception('Expecting a ".html" file!')
abs_path = os.path.abspath(".")
file_path = None
if abs_path in html_file:
file_path = html_file
else:
file_path = abs_path + "/%s" % html_file
html_string = None
with open(file_path, "r") as f:
html_string = f.read().strip()
self.load_html_string(html_string, new_page)
def open_html_file(self, html_file):
"""Opens a local html file into the browser from a relative file path.
The URL displayed in the web browser will start with "file://".
"""
self.__check_scope()
if self.__looks_like_a_page_url(html_file):
self.open(html_file)
return
if len(html_file) < 6 or not html_file.endswith(".html"):
raise Exception('Expecting a ".html" file!')
abs_path = os.path.abspath(".")
file_path = None
if abs_path in html_file:
file_path = html_file
else:
file_path = abs_path + "/%s" % html_file
self.open("file://" + file_path)
def execute_script(self, script, *args, **kwargs):
self.__check_scope()
return self.driver.execute_script(script, *args, **kwargs)
def execute_async_script(self, script, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.EXTREME_TIMEOUT
return js_utils.execute_async_script(self.driver, script, timeout)
def safe_execute_script(self, script, *args, **kwargs):
"""When executing a script that contains a jQuery command,
it's important that the jQuery library has been loaded first.
This method will load jQuery if it wasn't already loaded."""
self.__check_scope()
if not js_utils.is_jquery_activated(self.driver):
self.activate_jquery()
return self.driver.execute_script(script, *args, **kwargs)
def set_window_rect(self, x, y, width, height):
self.__check_scope()
self.driver.set_window_rect(x, y, width, height)
self.__demo_mode_pause_if_active()
def set_window_size(self, width, height):
self.__check_scope()
self.driver.set_window_size(width, height)
self.__demo_mode_pause_if_active()
def maximize_window(self):
self.__check_scope()
self.driver.maximize_window()
self.__demo_mode_pause_if_active()
def switch_to_frame(self, frame, timeout=None):
"""Wait for an iframe to appear, and switch to it. This should be
usable as a drop-in replacement for driver.switch_to.frame().
The iframe identifier can be a selector, an index, an id, a name,
or a web element, but scrolling to the iframe first will only occur
for visible iframes with a string selector.
@Params
frame - the frame element, name, id, index, or selector
timeout - the time to wait for the alert in seconds
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if type(frame) is str and self.is_element_visible(frame):
try:
self.scroll_to(frame, timeout=1)
except Exception:
pass
if self.recorder_mode and self._rec_overrides_switch:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
r_a = self.get_session_storage_item("recorder_activated")
if r_a == "yes":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sk_op", "", origin, time_stamp]
self.__extra_actions.append(action)
self.__set_c_from_switch = True
self.set_content_to_frame(frame, timeout=timeout)
self.__set_c_from_switch = False
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sw_fr", frame, origin, time_stamp]
self.__extra_actions.append(action)
return
page_actions.switch_to_frame(self.driver, frame, timeout)
def switch_to_default_content(self):
"""Brings driver control outside the current iframe.
(If the driver control is inside an iframe, the driver control
will be set to one level above the current frame. If the driver
control is not currently in an iframe, nothing will happen.)"""
self.__check_scope()
if self.recorder_mode and self._rec_overrides_switch:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
r_a = self.get_session_storage_item("recorder_activated")
if r_a == "yes":
self.__set_c_from_switch = True
self.set_content_to_default()
self.__set_c_from_switch = False
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sw_dc", "", origin, time_stamp]
self.__extra_actions.append(action)
return
self.driver.switch_to.default_content()
def set_content_to_frame(self, frame, timeout=None):
"""Replaces the page html with an iframe's html from that page.
If the iFrame contains an "src" field that includes a valid URL,
then instead of replacing the current html, this method will then
open up the "src" URL of the iFrame in a new browser tab.
To return to default content, use: self.set_content_to_default().
This method also sets the state of the browser window so that the
self.set_content_to_default() method can bring the user back to
the original content displayed, which is similar to how the methods
self.switch_to_frame(frame) and self.switch_to_default_content()
work together to get the user into frames and out of all of them.
"""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
current_url = self.get_current_url()
c_tab = self.driver.current_window_handle
current_page_source = self.get_page_source()
self.execute_script("document.cframe_swap = 0;")
page_actions.switch_to_frame(self.driver, frame, timeout)
iframe_html = self.get_page_source()
self.driver.switch_to.default_content()
self.wait_for_ready_state_complete()
frame_found = False
o_frame = frame
if self.is_element_present(frame):
frame_found = True
elif " " not in frame:
frame = 'iframe[name="%s"]' % frame
if self.is_element_present(frame):
frame_found = True
url = None
if frame_found:
url = self.execute_script(
"""return document.querySelector('%s').src;""" % frame
)
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
pass
else:
url = None
cframe_tab = False
if url:
cframe_tab = True
self.__page_sources.append([current_url, current_page_source, c_tab])
if self.recorder_mode and not self.__set_c_from_switch:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sk_op", "", origin, time_stamp]
self.__extra_actions.append(action)
if cframe_tab:
self.execute_script("document.cframe_tab = 1;")
self.open_new_window(switch_to=True)
self.open(url)
self.execute_script("document.cframe_tab = 1;")
else:
self.set_content(iframe_html)
if not self.execute_script("return document.cframe_swap;"):
self.execute_script("document.cframe_swap = 1;")
else:
self.execute_script("document.cframe_swap += 1;")
if self.recorder_mode and not self.__set_c_from_switch:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["s_c_f", o_frame, origin, time_stamp]
self.__extra_actions.append(action)
def set_content_to_default(self, nested=True):
"""After using self.set_content_to_frame(), this reverts the page back.
If self.set_content_to_frame() hasn't been called here, only refreshes.
If "nested" is set to False when the content was set to nested iFrames,
then the control will only move above the last iFrame that was entered.
"""
self.__check_scope()
swap_cnt = self.execute_script("return document.cframe_swap;")
tab_sta = self.execute_script("return document.cframe_tab;")
if self.recorder_mode and not self.__set_c_from_switch:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sk_op", "", origin, time_stamp]
self.__extra_actions.append(action)
if nested:
if (
len(self.__page_sources) > 0
and (
(swap_cnt and int(swap_cnt) > 0)
or (tab_sta and int(tab_sta) > 0)
)
):
past_content = self.__page_sources[0]
past_url = past_content[0]
past_source = past_content[1]
past_tab = past_content[2]
current_tab = self.driver.current_window_handle
if not current_tab == past_tab:
if past_tab in self.driver.window_handles:
self.switch_to_window(past_tab)
url_of_past_tab = self.get_current_url()
if url_of_past_tab == past_url:
self.set_content(past_source)
else:
self.refresh_page()
else:
self.refresh_page()
self.execute_script("document.cframe_swap = 0;")
self.__page_sources = []
else:
just_refresh = False
if swap_cnt and int(swap_cnt) > 0 and len(self.__page_sources) > 0:
self.execute_script("document.cframe_swap -= 1;")
current_url = self.get_current_url()
past_content = self.__page_sources.pop()
past_url = past_content[0]
past_source = past_content[1]
if current_url == past_url:
self.set_content(past_source)
else:
just_refresh = True
elif tab_sta and int(tab_sta) > 0 and len(self.__page_sources) > 0:
past_content = self.__page_sources.pop()
past_tab = past_content[2]
if past_tab in self.driver.window_handles:
self.switch_to_window(past_tab)
else:
just_refresh = True
else:
just_refresh = True
if just_refresh:
self.refresh_page()
self.execute_script("document.cframe_swap = 0;")
self.__page_sources = []
if self.recorder_mode and not self.__set_c_from_switch:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["s_c_d", nested, origin, time_stamp]
self.__extra_actions.append(action)
def open_new_window(self, switch_to=True):
""" Opens a new browser tab/window and switches to it by default. """
self.__check_scope()
self.driver.execute_script("window.open('');")
time.sleep(0.01)
if switch_to:
self.switch_to_newest_window()
time.sleep(0.01)
if self.browser == "safari":
self.wait_for_ready_state_complete()
def switch_to_window(self, window, timeout=None):
""" Switches control of the browser to the specified window.
The window can be an integer: 0 -> 1st tab, 1 -> 2nd tab, etc...
Or it can be a list item from self.driver.window_handles """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
page_actions.switch_to_window(self.driver, window, timeout)
def switch_to_default_window(self):
self.switch_to_window(0)
def __switch_to_newest_window_if_not_blank(self):
current_window = self.driver.current_window_handle
try:
self.switch_to_window(len(self.driver.window_handles) - 1)
if self.get_current_url() == "about:blank":
self.switch_to_window(current_window)
except Exception:
self.switch_to_window(current_window)
def switch_to_newest_window(self):
self.switch_to_window(len(self.driver.window_handles) - 1)
def get_new_driver(
self,
browser=None,
headless=None,
locale_code=None,
protocol=None,
servername=None,
port=None,
proxy=None,
agent=None,
switch_to=True,
cap_file=None,
cap_string=None,
recorder_ext=None,
disable_csp=None,
enable_ws=None,
enable_sync=None,
use_auto_ext=None,
no_sandbox=None,
disable_gpu=None,
incognito=None,
guest_mode=None,
devtools=None,
remote_debug=None,
swiftshader=None,
ad_block_on=None,
block_images=None,
chromium_arg=None,
firefox_arg=None,
firefox_pref=None,
user_data_dir=None,
extension_zip=None,
extension_dir=None,
is_mobile=None,
d_width=None,
d_height=None,
d_p_r=None,
):
"""This method spins up an extra browser for tests that require
more than one. The first browser is already provided by tests
that import base_case.BaseCase from seleniumbase. If parameters
aren't specified, the method uses the same as the default driver.
@Params
browser - the browser to use. (Ex: "chrome", "firefox")
headless - the option to run webdriver in headless mode
locale_code - the Language Locale Code for the web browser
protocol - if using a Selenium Grid, set the host protocol here
servername - if using a Selenium Grid, set the host address here
port - if using a Selenium Grid, set the host port here
proxy - if using a proxy server, specify the "host:port" combo here
switch_to - the option to switch to the new driver (default = True)
cap_file - the file containing desired capabilities for the browser
cap_string - the string with desired capabilities for the browser
recorder_ext - the option to enable the SBase Recorder extension
disable_csp - an option to disable Chrome's Content Security Policy
enable_ws - the option to enable the Web Security feature (Chrome)
enable_sync - the option to enable the Chrome Sync feature (Chrome)
use_auto_ext - the option to enable Chrome's Automation Extension
no_sandbox - the option to enable the "No-Sandbox" feature (Chrome)
disable_gpu - the option to enable Chrome's "Disable GPU" feature
incognito - the option to enable Chrome's Incognito mode (Chrome)
guest - the option to enable Chrome's Guest mode (Chrome)
devtools - the option to open Chrome's DevTools on start (Chrome)
remote_debug - the option to enable Chrome's Remote Debugger
swiftshader - the option to use Chrome's swiftshader (Chrome-only)
ad_block_on - the option to block ads from loading (Chromium-only)
block_images - the option to block images from loading (Chrome)
chromium_arg - the option to add a Chromium arg to Chrome/Edge
firefox_arg - the option to add a Firefox arg to Firefox runs
firefox_pref - the option to add a Firefox pref:value set (Firefox)
user_data_dir - Chrome's User Data Directory to use (Chrome-only)
extension_zip - A Chrome Extension ZIP file to use (Chrome-only)
extension_dir - A Chrome Extension folder to use (Chrome-only)
is_mobile - the option to use the mobile emulator (Chrome-only)
d_width - the device width of the mobile emulator (Chrome-only)
d_height - the device height of the mobile emulator (Chrome-only)
d_p_r - the device pixel ratio of the mobile emulator (Chrome-only)
"""
self.__check_scope()
if self.browser == "remote" and self.servername == "localhost":
raise Exception(
'Cannot use "remote" browser driver on localhost!'
" Did you mean to connect to a remote Grid server"
" such as BrowserStack or Sauce Labs? In that"
' case, you must specify the "server" and "port"'
" parameters on the command line! "
"Example: "
"--server=user:key@hub.browserstack.com --port=80"
)
browserstack_ref = "https://browserstack.com/automate/capabilities"
sauce_labs_ref = (
"https://wiki.saucelabs.com/display/DOCS/Platform+Configurator#/"
)
if self.browser == "remote" and not (self.cap_file or self.cap_string):
raise Exception(
"Need to specify a desired capabilities file when "
'using "--browser=remote". Add "--cap_file=FILE". '
"File should be in the Python format used by: "
"%s OR "
"%s "
"See SeleniumBase/examples/sample_cap_file_BS.py "
"and SeleniumBase/examples/sample_cap_file_SL.py"
% (browserstack_ref, sauce_labs_ref)
)
if browser is None:
browser = self.browser
browser_name = browser
if headless is None:
headless = self.headless
if locale_code is None:
locale_code = self.locale_code
if protocol is None:
protocol = self.protocol
if servername is None:
servername = self.servername
if port is None:
port = self.port
use_grid = False
if servername != "localhost":
# Use Selenium Grid (Use "127.0.0.1" for localhost Grid)
use_grid = True
proxy_string = proxy
if proxy_string is None:
proxy_string = self.proxy_string
user_agent = agent
if user_agent is None:
user_agent = self.user_agent
if recorder_ext is None:
recorder_ext = self.recorder_ext
if disable_csp is None:
disable_csp = self.disable_csp
if enable_ws is None:
enable_ws = self.enable_ws
if enable_sync is None:
enable_sync = self.enable_sync
if use_auto_ext is None:
use_auto_ext = self.use_auto_ext
if no_sandbox is None:
no_sandbox = self.no_sandbox
if disable_gpu is None:
disable_gpu = self.disable_gpu
if incognito is None:
incognito = self.incognito
if guest_mode is None:
guest_mode = self.guest_mode
if devtools is None:
devtools = self.devtools
if remote_debug is None:
remote_debug = self.remote_debug
if swiftshader is None:
swiftshader = self.swiftshader
if ad_block_on is None:
ad_block_on = self.ad_block_on
if block_images is None:
block_images = self.block_images
if chromium_arg is None:
chromium_arg = self.chromium_arg
if firefox_arg is None:
firefox_arg = self.firefox_arg
if firefox_pref is None:
firefox_pref = self.firefox_pref
if user_data_dir is None:
user_data_dir = self.user_data_dir
if extension_zip is None:
extension_zip = self.extension_zip
if extension_dir is None:
extension_dir = self.extension_dir
test_id = self.__get_test_id()
if cap_file is None:
cap_file = self.cap_file
if cap_string is None:
cap_string = self.cap_string
if is_mobile is None:
is_mobile = self.mobile_emulator
if d_width is None:
d_width = self.__device_width
if d_height is None:
d_height = self.__device_height
if d_p_r is None:
d_p_r = self.__device_pixel_ratio
valid_browsers = constants.ValidBrowsers.valid_browsers
if browser_name not in valid_browsers:
raise Exception(
"Browser: {%s} is not a valid browser option. "
"Valid options = {%s}" % (browser, valid_browsers)
)
# Launch a web browser
from seleniumbase.core import browser_launcher
new_driver = browser_launcher.get_driver(
browser_name=browser_name,
headless=headless,
locale_code=locale_code,
use_grid=use_grid,
protocol=protocol,
servername=servername,
port=port,
proxy_string=proxy_string,
user_agent=user_agent,
cap_file=cap_file,
cap_string=cap_string,
recorder_ext=recorder_ext,
disable_csp=disable_csp,
enable_ws=enable_ws,
enable_sync=enable_sync,
use_auto_ext=use_auto_ext,
no_sandbox=no_sandbox,
disable_gpu=disable_gpu,
incognito=incognito,
guest_mode=guest_mode,
devtools=devtools,
remote_debug=remote_debug,
swiftshader=swiftshader,
ad_block_on=ad_block_on,
block_images=block_images,
chromium_arg=chromium_arg,
firefox_arg=firefox_arg,
firefox_pref=firefox_pref,
user_data_dir=user_data_dir,
extension_zip=extension_zip,
extension_dir=extension_dir,
test_id=test_id,
mobile_emulator=is_mobile,
device_width=d_width,
device_height=d_height,
device_pixel_ratio=d_p_r,
)
self._drivers_list.append(new_driver)
self.__driver_browser_map[new_driver] = browser_name
if switch_to:
self.driver = new_driver
self.browser = browser_name
if self.headless or self.xvfb:
# Make sure the invisible browser window is big enough
width = settings.HEADLESS_START_WIDTH
height = settings.HEADLESS_START_HEIGHT
try:
self.driver.set_window_size(width, height)
self.wait_for_ready_state_complete()
except Exception:
# This shouldn't fail, but in case it does,
# get safely through setUp() so that
# WebDrivers can get closed during tearDown().
pass
else:
if self.browser == "chrome" or self.browser == "edge":
width = settings.CHROME_START_WIDTH
height = settings.CHROME_START_HEIGHT
try:
if self.maximize_option:
self.driver.maximize_window()
else:
self.driver.set_window_size(width, height)
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
elif self.browser == "firefox":
width = settings.CHROME_START_WIDTH
try:
if self.maximize_option:
self.driver.maximize_window()
else:
self.driver.set_window_size(width, 720)
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
elif self.browser == "safari":
width = settings.CHROME_START_WIDTH
if self.maximize_option:
try:
self.driver.maximize_window()
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
else:
try:
self.driver.set_window_rect(10, 30, width, 630)
except Exception:
pass
elif self.browser == "opera":
width = settings.CHROME_START_WIDTH
if self.maximize_option:
try:
self.driver.maximize_window()
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
else:
try:
self.driver.set_window_rect(10, 30, width, 700)
except Exception:
pass
if self.start_page and len(self.start_page) >= 4:
if page_utils.is_valid_url(self.start_page):
self.open(self.start_page)
else:
new_start_page = "https://" + self.start_page
if page_utils.is_valid_url(new_start_page):
self.__dont_record_open = True
self.open(new_start_page)
self.__dont_record_open = False
return new_driver
def switch_to_driver(self, driver):
"""Switches control of the browser to the specified driver.
Also sets the self.driver variable to the specified driver.
You may need this if using self.get_new_driver() in your code."""
self.__check_scope()
self.driver = driver
if self.driver in self.__driver_browser_map:
self.browser = self.__driver_browser_map[self.driver]
def switch_to_default_driver(self):
""" Sets self.driver to the default/original driver. """
self.__check_scope()
self.driver = self._default_driver
if self.driver in self.__driver_browser_map:
self.browser = self.__driver_browser_map[self.driver]
def save_screenshot(
self, name, folder=None, selector=None, by=By.CSS_SELECTOR
):
"""
Saves a screenshot of the current page.
If no folder is specified, uses the folder where pytest was called.
The screenshot will include the entire page unless a selector is given.
If a provided selector is not found, then takes a full-page screenshot.
If the folder provided doesn't exist, it will get created.
The screenshot will be in PNG format: (*.png)
"""
self.wait_for_ready_state_complete()
if selector and by:
selector, by = self.__recalculate_selector(selector, by)
if page_actions.is_element_present(self.driver, selector, by):
return page_actions.save_screenshot(
self.driver, name, folder, selector, by
)
return page_actions.save_screenshot(self.driver, name, folder)
def save_screenshot_to_logs(
self, name=None, selector=None, by=By.CSS_SELECTOR
):
"""Saves a screenshot of the current page to the "latest_logs" folder.
Naming is automatic:
If NO NAME provided: "_1_screenshot.png", "_2_screenshot.png", etc.
If NAME IS provided, it becomes: "_1_name.png", "_2_name.png", etc.
The screenshot will include the entire page unless a selector is given.
If a provided selector is not found, then takes a full-page screenshot.
(The last_page / failure screenshot is always "screenshot.png")
The screenshot will be in PNG format."""
self.wait_for_ready_state_complete()
test_id = self.__get_test_id()
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
if name:
name = str(name)
self.__screenshot_count += 1
if not name or len(name) == 0:
name = "_%s_screenshot.png" % self.__screenshot_count
else:
pre_name = "_%s_" % self.__screenshot_count
if len(name) >= 4 and name[-4:].lower() == ".png":
name = name[:-4]
if len(name) == 0:
name = "screenshot"
name = "%s%s.png" % (pre_name, name)
if selector and by:
selector, by = self.__recalculate_selector(selector, by)
if page_actions.is_element_present(self.driver, selector, by):
return page_actions.save_screenshot(
self.driver, name, test_logpath, selector, by
)
return page_actions.save_screenshot(self.driver, name, test_logpath)
def save_page_source(self, name, folder=None):
"""Saves the page HTML to the current directory (or given subfolder).
If the folder specified doesn't exist, it will get created.
@Params
name - The file name to save the current page's HTML to.
folder - The folder to save the file to. (Default = current folder)
"""
self.wait_for_ready_state_complete()
return page_actions.save_page_source(self.driver, name, folder)
def save_cookies(self, name="cookies.txt"):
""" Saves the page cookies to the "saved_cookies" folder. """
self.wait_for_ready_state_complete()
cookies = self.driver.get_cookies()
json_cookies = json.dumps(cookies)
if name.endswith("/"):
raise Exception("Invalid filename for Cookies!")
if "/" in name:
name = name.split("/")[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
if not os.path.exists(file_path):
os.makedirs(file_path)
cookies_file_path = "%s/%s" % (file_path, name)
cookies_file = codecs.open(cookies_file_path, "w+", encoding="utf-8")
cookies_file.writelines(json_cookies)
cookies_file.close()
def load_cookies(self, name="cookies.txt"):
""" Loads the page cookies from the "saved_cookies" folder. """
self.wait_for_ready_state_complete()
if name.endswith("/"):
raise Exception("Invalid filename for Cookies!")
if "/" in name:
name = name.split("/")[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
cookies_file_path = "%s/%s" % (file_path, name)
json_cookies = None
with open(cookies_file_path, "r") as f:
json_cookies = f.read().strip()
cookies = json.loads(json_cookies)
for cookie in cookies:
if "expiry" in cookie:
del cookie["expiry"]
self.driver.add_cookie(cookie)
def delete_all_cookies(self):
"""Deletes all cookies in the web browser.
Does NOT delete the saved cookies file."""
self.wait_for_ready_state_complete()
self.driver.delete_all_cookies()
def delete_saved_cookies(self, name="cookies.txt"):
"""Deletes the cookies file from the "saved_cookies" folder.
Does NOT delete the cookies from the web browser."""
self.wait_for_ready_state_complete()
if name.endswith("/"):
raise Exception("Invalid filename for Cookies!")
if "/" in name:
name = name.split("/")[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
cookies_file_path = "%s/%s" % (file_path, name)
if os.path.exists(cookies_file_path):
if cookies_file_path.endswith(".txt"):
os.remove(cookies_file_path)
def wait_for_ready_state_complete(self, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.EXTREME_TIMEOUT
if self.timeout_multiplier and timeout == settings.EXTREME_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
is_ready = js_utils.wait_for_ready_state_complete(self.driver, timeout)
self.wait_for_angularjs(timeout=settings.MINI_TIMEOUT)
if self.js_checking_on:
self.assert_no_js_errors()
if self.ad_block_on and (self.headless or not self.is_chromium()):
# For Chromium browsers in headed mode, the extension is used
current_url = self.get_current_url()
if not current_url == self.__last_page_load_url:
if page_actions.is_element_present(
self.driver, "iframe", By.CSS_SELECTOR
):
self.ad_block()
self.__last_page_load_url = current_url
return is_ready
def wait_for_angularjs(self, timeout=None, **kwargs):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
js_utils.wait_for_angularjs(self.driver, timeout, **kwargs)
def sleep(self, seconds):
self.__check_scope()
if not sb_config.time_limit:
time.sleep(seconds)
elif seconds <= 0.3:
shared_utils.check_if_time_limit_exceeded()
time.sleep(seconds)
shared_utils.check_if_time_limit_exceeded()
else:
start_ms = time.time() * 1000.0
stop_ms = start_ms + (seconds * 1000.0)
for x in range(int(seconds * 5)):
shared_utils.check_if_time_limit_exceeded()
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.2)
def install_addon(self, xpi_file):
"""Installs a Firefox add-on instantly at run-time.
@Params
xpi_file - A file archive in .xpi format.
"""
self.wait_for_ready_state_complete()
if self.browser != "firefox":
raise Exception(
"install_addon(xpi_file) is for Firefox ONLY!\n"
"To load a Chrome extension, use the comamnd-line:\n"
"--extension_zip=CRX_FILE OR --extension_dir=DIR"
)
xpi_path = os.path.abspath(xpi_file)
self.driver.install_addon(xpi_path, temporary=True)
def activate_demo_mode(self):
self.demo_mode = True
def deactivate_demo_mode(self):
self.demo_mode = False
def activate_design_mode(self):
# Activate Chrome's Design Mode, which lets you edit a site directly.
# See: https://twitter.com/sulco/status/1177559150563344384
self.wait_for_ready_state_complete()
script = """document.designMode = 'on';"""
self.execute_script(script)
def deactivate_design_mode(self):
# Deactivate Chrome's Design Mode.
self.wait_for_ready_state_complete()
script = """document.designMode = 'off';"""
self.execute_script(script)
def activate_recorder(self):
from seleniumbase.js_code.recorder_js import recorder_js
if not self.is_chromium():
raise Exception(
"The Recorder is only for Chromium browsers: (Chrome or Edge)")
url = self.driver.current_url
if (
url.startswith("data:") or url.startswith("about:")
or url.startswith("chrome:") or url.startswith("edge:")
):
message = (
'The URL in Recorder-Mode cannot start with: '
'"data:", "about:", "chrome:", or "edge:"!')
print("\n" + message)
return
if self.recorder_ext:
return # The Recorder extension is already active
try:
recorder_on = self.get_session_storage_item("recorder_activated")
if not recorder_on == "yes":
self.execute_script(recorder_js)
self.recorder_mode = True
message = "Recorder Mode ACTIVE. [ESC]: Pause. [~`]: Resume."
print("\n" + message)
p_msg = "Recorder Mode ACTIVE.<br>[ESC]: Pause. [~`]: Resume."
self.post_message(p_msg, pause=False, style="error")
except Exception:
pass
def __get_recorded_actions_on_active_tab(self):
url = self.driver.current_url
if (
url.startswith("data:") or url.startswith("about:")
or url.startswith("chrome:") or url.startswith("edge:")
):
return []
actions = self.get_session_storage_item('recorded_actions')
if actions:
actions = json.loads(actions)
return actions
else:
return []
def __process_recorded_actions(self):
import colorama
raw_actions = [] # All raw actions from sessionStorage
srt_actions = []
cleaned_actions = []
sb_actions = []
used_actions = []
action_dict = {}
for window in self.driver.window_handles:
self.switch_to_window(window)
tab_actions = self.__get_recorded_actions_on_active_tab()
for action in tab_actions:
if action not in used_actions:
used_actions.append(action)
raw_actions.append(action)
for action in self.__extra_actions:
if action not in used_actions:
used_actions.append(action)
raw_actions.append(action)
for action in raw_actions:
if self._reuse_session:
if int(action[3]) < int(self.__js_start_time):
continue
# Use key for sorting and preventing duplicates
key = str(action[3]) + "-" + str(action[0])
action_dict[key] = action
for key in sorted(action_dict):
# print(action_dict[key]) # For debugging purposes
srt_actions.append(action_dict[key])
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 0
and srt_actions[n-1][0] == "sk_op"
):
srt_actions[n][0] = "_skip"
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 0
and (
srt_actions[n-1][0] == "click"
or srt_actions[n-1][0] == "js_cl"
or srt_actions[n-1][0] == "js_ca"
)
):
url1 = srt_actions[n-1][2]
if (
srt_actions[n-1][0] == "js_cl"
or srt_actions[n-1][0] == "js_ca"
):
url1 = srt_actions[n-1][2][0]
if url1.endswith("/#/"):
url1 = url1[:-3]
elif url1.endswith("/"):
url1 = url1[:-1]
url2 = srt_actions[n][2]
if url2.endswith("/#/"):
url2 = url1[:-3]
elif url2.endswith("/"):
url2 = url2[:-1]
if url1 == url2:
srt_actions[n][0] = "f_url"
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 0
and (
srt_actions[n-1][0] == "begin"
or srt_actions[n-1][0] == "_url_"
)
):
url1 = srt_actions[n-1][2]
if url1.endswith("/#/"):
url1 = url1[:-3]
elif url1.endswith("/"):
url1 = url1[:-1]
url2 = srt_actions[n][2]
if url2.endswith("/#/"):
url2 = url1[:-3]
elif url2.endswith("/"):
url2 = url2[:-1]
if url1 == url2:
srt_actions[n-1][0] = "_skip"
elif url2.startswith(url1):
srt_actions[n][0] = "f_url"
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "input"
and n > 0
and srt_actions[n-1][0] == "input"
and srt_actions[n-1][2] == ""
):
srt_actions[n-1][0] = "_skip"
for n in range(len(srt_actions)):
if (
(srt_actions[n][0] == "begin" or srt_actions[n][0] == "_url_")
and n > 0
and (
srt_actions[n-1][0] == "click"
or srt_actions[n-1][0] == "js_cl"
or srt_actions[n-1][0] == "js_ca"
or srt_actions[n-1][0] == "input"
)
and (int(srt_actions[n][3]) - int(srt_actions[n-1][3]) < 6500)
):
if (
srt_actions[n-1][0] == "click"
or srt_actions[n-1][0] == "js_cl"
or srt_actions[n-1][0] == "js_ca"
):
if (
srt_actions[n-1][1].startswith("input")
or srt_actions[n-1][1].startswith("button")
):
srt_actions[n][0] = "f_url"
elif srt_actions[n-1][0] == "input":
if srt_actions[n-1][2].endswith("\n"):
srt_actions[n][0] = "f_url"
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "cho_f"
and n > 0
and srt_actions[n-1][0] == "chfil"
):
srt_actions[n-1][0] = "_skip"
srt_actions[n][2] = srt_actions[n-1][1][1]
origins = []
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "begin"
or srt_actions[n][0] == "_url_"
or srt_actions[n][0] == "f_url"
):
origin = srt_actions[n][1]
if origin.endswith("/"):
origin = origin[0:-1]
if origin not in origins:
origins.append(origin)
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "click"
and n > 0
and srt_actions[n-1][0] == "ho_cl"
and srt_actions[n-1][2] in origins
):
srt_actions[n-1][0] = "_skip"
srt_actions[n][0] = "h_clk"
srt_actions[n][1] = srt_actions[n-1][1][0]
srt_actions[n][2] = srt_actions[n-1][1][1]
for n in range(len(srt_actions)):
if srt_actions[n][0] == "chfil" and srt_actions[n][2] in origins:
srt_actions[n][0] = "cho_f"
srt_actions[n][2] = srt_actions[n][1][1]
srt_actions[n][1] = srt_actions[n][1][0]
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "sh_fc"
and n > 0
and srt_actions[n-1][0] == "sh_fc"
):
srt_actions[n-1][0] = "_skip"
ext_actions = []
ext_actions.append("_url_")
ext_actions.append("js_cl")
ext_actions.append("js_ca")
ext_actions.append("js_ty")
ext_actions.append("as_el")
ext_actions.append("as_ep")
ext_actions.append("asenv")
ext_actions.append("hi_li")
ext_actions.append("as_lt")
ext_actions.append("as_ti")
ext_actions.append("as_df")
ext_actions.append("do_fi")
ext_actions.append("as_at")
ext_actions.append("as_te")
ext_actions.append("as_et")
ext_actions.append("sw_fr")
ext_actions.append("sw_dc")
ext_actions.append("s_c_f")
ext_actions.append("s_c_d")
ext_actions.append("sh_fc")
ext_actions.append("c_l_s")
for n in range(len(srt_actions)):
if srt_actions[n][0] in ext_actions:
origin = srt_actions[n][2]
if (
srt_actions[n][0] == "js_cl"
or srt_actions[n][0] == "js_ca"
):
origin = srt_actions[n][2][1]
if origin.endswith("/"):
origin = origin[0:-1]
if srt_actions[n][0] == "js_ty":
srt_actions[n][2] = srt_actions[n][1][1]
srt_actions[n][1] = srt_actions[n][1][0]
if srt_actions[n][0] == "_url_" and origin not in origins:
origins.append(origin)
if origin not in origins:
srt_actions[n][0] = "_skip"
for n in range(len(srt_actions)):
if (
srt_actions[n][0] == "input"
and n > 0
and srt_actions[n-1][0] == "js_ty"
and srt_actions[n][2] == srt_actions[n-1][2]
):
srt_actions[n][0] = "_skip"
for n in range(len(srt_actions)):
cleaned_actions.append(srt_actions[n])
for action in srt_actions:
if action[0] == "begin" or action[0] == "_url_":
sb_actions.append('self.open("%s")' % action[2])
elif action[0] == "f_url":
sb_actions.append('self.open_if_not_url("%s")' % action[2])
elif action[0] == "click":
method = "click"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "js_cl":
method = "js_click"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "js_ca":
method = "js_click_all"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "input" or action[0] == "js_ty":
method = "type"
if action[0] == "js_ty":
method = "js_type"
text = action[2].replace("\n", "\\n")
if '"' not in action[1] and '"' not in text:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], text))
elif '"' not in action[1] and '"' in text:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], text))
elif '"' in action[1] and '"' not in text:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], text))
elif '"' in action[1] and '"' in text:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], text))
elif action[0] == "h_clk":
method = "hover_and_click"
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "ddrop":
method = "drag_and_drop"
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "s_opt":
method = "select_option_by_text"
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "set_v":
method = "set_value"
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "cho_f":
method = "choose_file"
action[2] = action[2].replace("\\", "\\\\")
if '"' not in action[1] and '"' not in action[2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1], action[2]))
elif '"' not in action[1] and '"' in action[2]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' not in action[2]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1], action[2]))
elif '"' in action[1] and '"' in action[2]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1], action[2]))
elif action[0] == "sw_fr":
method = "switch_to_frame"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "sw_dc":
sb_actions.append("self.switch_to_default_content()")
elif action[0] == "s_c_f":
method = "set_content_to_frame"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "s_c_d":
method = "set_content_to_default"
nested = action[1]
if nested:
sb_actions.append("self.%s()" % method)
else:
sb_actions.append("self.%s(nested=False)" % method)
elif action[0] == "as_el":
method = "assert_element"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "as_ep":
method = "assert_element_present"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "asenv":
method = "assert_element_not_visible"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "hi_li":
method = "highlight"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "as_lt":
method = "assert_link_text"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "as_ti":
method = "assert_title"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "as_df":
method = "assert_downloaded_file"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (method, action[1]))
elif action[0] == "do_fi":
method = "download_file"
file_url = action[1][0]
dest = action[1][1]
if not dest:
sb_actions.append('self.%s("%s")' % (
method, file_url))
else:
sb_actions.append('self.%s("%s", "%s")' % (
method, file_url, dest))
elif action[0] == "as_at":
method = "assert_attribute"
if ('"' not in action[1][0]) and action[1][2]:
sb_actions.append('self.%s("%s", "%s", "%s")' % (
method, action[1][0], action[1][1], action[1][2]))
elif ('"' not in action[1][0]) and not action[1][2]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1][0], action[1][1]))
elif ('"' in action[1][0]) and action[1][2]:
sb_actions.append('self.%s(\'%s\', "%s", "%s")' % (
method, action[1][0], action[1][1], action[1][2]))
else:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1][0], action[1][1]))
elif action[0] == "as_te" or action[0] == "as_et":
method = "assert_text"
if action[0] == "as_et":
method = "assert_exact_text"
if action[1][1] != "html":
if '"' not in action[1][0] and '"' not in action[1][1]:
sb_actions.append('self.%s("%s", "%s")' % (
method, action[1][0], action[1][1]))
elif '"' not in action[1][0] and '"' in action[1][1]:
sb_actions.append('self.%s("%s", \'%s\')' % (
method, action[1][0], action[1][1]))
elif '"' in action[1] and '"' not in action[1][1]:
sb_actions.append('self.%s(\'%s\', "%s")' % (
method, action[1][0], action[1][1]))
elif '"' in action[1] and '"' in action[1][1]:
sb_actions.append("self.%s('%s', '%s')" % (
method, action[1][0], action[1][1]))
else:
if '"' not in action[1][0]:
sb_actions.append('self.%s("%s")' % (
method, action[1][0]))
else:
sb_actions.append("self.%s('%s')" % (
method, action[1][0]))
elif action[0] == "sh_fc":
cb_method = "show_file_choosers"
sb_actions.append('self.%s()' % cb_method)
elif action[0] == "c_l_s":
sb_actions.append("self.clear_local_storage()")
elif action[0] == "c_box":
cb_method = "check_if_unchecked"
if action[2] == "no":
cb_method = "uncheck_if_checked"
if '"' not in action[1]:
sb_actions.append('self.%s("%s")' % (cb_method, action[1]))
else:
sb_actions.append("self.%s('%s')" % (cb_method, action[1]))
filename = self.__get_filename()
new_file = False
data = []
if filename not in sb_config._recorded_actions:
new_file = True
sb_config._recorded_actions[filename] = []
data.append("from seleniumbase import BaseCase")
data.append("")
data.append("")
data.append("class %s(BaseCase):" % self.__class__.__name__)
else:
data = sb_config._recorded_actions[filename]
data.append(" def %s(self):" % self._testMethodName)
if len(sb_actions) > 0:
for action in sb_actions:
data.append(" " + action)
else:
data.append(" pass")
data.append("")
sb_config._recorded_actions[filename] = data
recordings_folder = constants.Recordings.SAVED_FOLDER
if recordings_folder.endswith("/"):
recordings_folder = recordings_folder[:-1]
if not os.path.exists(recordings_folder):
try:
os.makedirs(recordings_folder)
except Exception:
pass
file_name = self.__class__.__module__.split(".")[-1] + "_rec.py"
file_path = "%s/%s" % (recordings_folder, file_name)
out_file = codecs.open(file_path, "w+", "utf-8")
out_file.writelines("\r\n".join(data))
out_file.close()
rec_message = ">>> RECORDING SAVED as: "
if not new_file:
rec_message = ">>> RECORDING ADDED to: "
star_len = len(rec_message) + len(file_path)
try:
terminal_size = os.get_terminal_size().columns
if terminal_size > 30 and star_len > terminal_size:
star_len = terminal_size
except Exception:
pass
stars = "*" * star_len
c1 = ""
c2 = ""
cr = ""
if "linux" not in sys.platform:
colorama.init(autoreset=True)
c1 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c2 = colorama.Fore.LIGHTRED_EX + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
rec_message = rec_message.replace(">>>", c2 + ">>>" + cr)
print("\n\n%s%s%s%s\n%s" % (rec_message, c1, file_path, cr, stars))
def activate_jquery(self):
"""If "jQuery is not defined", use this method to activate it for use.
This happens because jQuery is not always defined on web sites."""
self.wait_for_ready_state_complete()
js_utils.activate_jquery(self.driver)
self.wait_for_ready_state_complete()
def __are_quotes_escaped(self, string):
return js_utils.are_quotes_escaped(string)
def __escape_quotes_if_needed(self, string):
return js_utils.escape_quotes_if_needed(string)
def bring_to_front(self, selector, by=By.CSS_SELECTOR):
"""Updates the Z-index of a page element to bring it into view.
Useful when getting a WebDriverException, such as the one below:
{ Element is not clickable at point (#, #).
Other element would receive the click: ... }"""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't run action if can't convert to CSS_Selector for JavaScript
return
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
script = (
"""document.querySelector('%s').style.zIndex = '999999';"""
% selector
)
self.execute_script(script)
def highlight_click(
self, selector, by=By.CSS_SELECTOR, loops=3, scroll=True
):
self.__check_scope()
if not self.demo_mode:
self.highlight(selector, by=by, loops=loops, scroll=scroll)
self.click(selector, by=by)
def highlight_update_text(
self, selector, text, by=By.CSS_SELECTOR, loops=3, scroll=True
):
self.__check_scope()
if not self.demo_mode:
self.highlight(selector, by=by, loops=loops, scroll=scroll)
self.update_text(selector, text, by=by)
def highlight(self, selector, by=By.CSS_SELECTOR, loops=None, scroll=True):
"""This method uses fancy JavaScript to highlight an element.
Used during demo_mode.
@Params
selector - the selector of the element to find
by - the type of selector to search by (Default: CSS)
loops - # of times to repeat the highlight animation
(Default: 4. Each loop lasts for about 0.18s)
scroll - the option to scroll to the element first (Default: True)
"""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
if not loops:
loops = settings.HIGHLIGHTS
if scroll:
try:
if self.browser != "safari":
scroll_distance = js_utils.get_scroll_distance_to_element(
self.driver, element
)
if abs(scroll_distance) > constants.Values.SSMD:
self.__jquery_slow_scroll_to(selector, by)
else:
self.__slow_scroll_to_element(element)
else:
self.__jquery_slow_scroll_to(selector, by)
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
self.__slow_scroll_to_element(element)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't highlight if can't convert to CSS_SELECTOR
return
if self.highlights:
loops = self.highlights
if self.browser == "ie":
loops = 1 # Override previous setting because IE is slow
loops = int(loops)
o_bs = "" # original_box_shadow
try:
style = element.get_attribute("style")
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT
)
style = element.get_attribute("style")
if style:
if "box-shadow: " in style:
box_start = style.find("box-shadow: ")
box_end = style.find(";", box_start) + 1
original_box_shadow = style[box_start:box_end]
o_bs = original_box_shadow
orig_selector = selector
if ":contains" not in selector and ":first" not in selector:
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
self.__highlight_with_js(selector, loops, o_bs)
else:
selector = self.__make_css_match_first_element_only(selector)
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
try:
self.__highlight_with_jquery(selector, loops, o_bs)
except Exception:
pass # JQuery probably couldn't load. Skip highlighting.
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["hi_li", orig_selector, origin, time_stamp]
self.__extra_actions.append(action)
time.sleep(0.065)
def __highlight_with_js(self, selector, loops, o_bs):
self.wait_for_ready_state_complete()
js_utils.highlight_with_js(self.driver, selector, loops, o_bs)
def __highlight_with_jquery(self, selector, loops, o_bs):
self.wait_for_ready_state_complete()
js_utils.highlight_with_jquery(self.driver, selector, loops, o_bs)
def press_up_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
"""Simulates pressing the UP Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens."""
self.__check_scope()
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_UP)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_UP)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def press_down_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
"""Simulates pressing the DOWN Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens."""
self.__check_scope()
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_DOWN)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_DOWN)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def press_left_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
"""Simulates pressing the LEFT Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens."""
self.__check_scope()
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_LEFT)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_LEFT)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def press_right_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
"""Simulates pressing the RIGHT Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens."""
self.__check_scope()
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_RIGHT)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_RIGHT)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def scroll_to(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Fast scroll to destination """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if self.demo_mode or self.slow_mode:
self.slow_scroll_to(selector, by=by, timeout=timeout)
return
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
try:
self.__scroll_to_element(element, selector, by)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__scroll_to_element(element, selector, by)
def scroll_to_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
self.scroll_to(selector, by=by, timeout=timeout)
def slow_scroll_to(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Slow motion scroll to destination """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
try:
scroll_distance = js_utils.get_scroll_distance_to_element(
self.driver, element
)
if abs(scroll_distance) > constants.Values.SSMD:
self.__jquery_slow_scroll_to(selector, by)
else:
self.__slow_scroll_to_element(element)
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__slow_scroll_to_element(element)
def slow_scroll_to_element(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
self.slow_scroll_to(selector, by=by, timeout=timeout)
def scroll_to_top(self):
""" Scroll to the top of the page. """
self.__check_scope()
scroll_script = "window.scrollTo(0, 0);"
try:
self.execute_script(scroll_script)
time.sleep(0.012)
return True
except Exception:
return False
def scroll_to_bottom(self):
""" Scroll to the bottom of the page. """
self.__check_scope()
scroll_script = "window.scrollTo(0, 10000);"
try:
self.execute_script(scroll_script)
time.sleep(0.012)
return True
except Exception:
return False
def click_xpath(self, xpath):
# Technically self.click() will automatically detect an xpath selector,
# so self.click_xpath() is just a longer name for the same action.
self.click(xpath, by=By.XPATH)
def js_click(
self, selector, by=By.CSS_SELECTOR, all_matches=False, scroll=True
):
"""Clicks an element using JavaScript.
Can be used to click hidden / invisible elements.
If "all_matches" is False, only the first match is clicked.
If "scroll" is False, won't scroll unless running in Demo Mode."""
self.wait_for_ready_state_complete()
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
if by == By.LINK_TEXT:
message = (
"Pure JavaScript doesn't support clicking by Link Text. "
"You may want to use self.jquery_click() instead, which "
"allows this with :contains(), assuming jQuery isn't blocked. "
"For now, self.js_click() will use a regular WebDriver click."
)
logging.debug(message)
self.click(selector, by=by)
return
element = self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if scroll and not self.demo_mode and not self.slow_mode:
success = js_utils.scroll_to_element(self.driver, element)
if not success:
self.wait_for_ready_state_complete()
timeout = settings.SMALL_TIMEOUT
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout=timeout
)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
action = None
pre_action_url = self.driver.current_url
pre_window_count = len(self.driver.window_handles)
if self.recorder_mode and not self.__dont_record_js_click:
time_stamp = self.execute_script("return Date.now();")
tag_name = None
href = ""
if ":contains\\(" not in css_selector:
tag_name = self.execute_script(
"return document.querySelector('%s').tagName.toLowerCase()"
% css_selector
)
if tag_name == "a":
href = self.execute_script(
"return document.querySelector('%s').href" % css_selector
)
origin = self.get_origin()
href_origin = [href, origin]
action = ["js_cl", selector, href_origin, time_stamp]
if all_matches:
action[0] = "js_ca"
if not all_matches:
if ":contains\\(" not in css_selector:
self.__js_click(selector, by=by)
else:
click_script = """jQuery('%s')[0].click();""" % css_selector
self.safe_execute_script(click_script)
else:
if ":contains\\(" not in css_selector:
self.__js_click_all(selector, by=by)
else:
click_script = """jQuery('%s').click();""" % css_selector
self.safe_execute_script(click_script)
if self.recorder_mode and action:
self.__extra_actions.append(action)
latest_window_count = len(self.driver.window_handles)
if (
latest_window_count > pre_window_count
and (
self.recorder_mode
or (
settings.SWITCH_TO_NEW_TABS_ON_CLICK
and self.driver.current_url == pre_action_url
)
)
):
self.__switch_to_newest_window_if_not_blank()
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def js_click_all(self, selector, by=By.CSS_SELECTOR):
""" Clicks all matching elements using pure JS. (No jQuery) """
self.js_click(selector, by=By.CSS_SELECTOR, all_matches=True)
def jquery_click(self, selector, by=By.CSS_SELECTOR):
"""Clicks an element using jQuery. (Different from using pure JS.)
Can be used to click hidden / invisible elements."""
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
click_script = """jQuery('%s')[0].click();""" % selector
self.safe_execute_script(click_script)
self.__demo_mode_pause_if_active()
def jquery_click_all(self, selector, by=By.CSS_SELECTOR):
""" Clicks all matching elements using jQuery. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
click_script = """jQuery('%s').click();""" % css_selector
self.safe_execute_script(click_script)
self.__demo_mode_pause_if_active()
def hide_element(self, selector, by=By.CSS_SELECTOR):
""" Hide the first element on the page that matches the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
hide_script = """jQuery('%s').hide();""" % selector
self.safe_execute_script(hide_script)
def hide_elements(self, selector, by=By.CSS_SELECTOR):
""" Hide all elements on the page that match the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
hide_script = """jQuery('%s').hide();""" % selector
self.safe_execute_script(hide_script)
def show_element(self, selector, by=By.CSS_SELECTOR):
""" Show the first element on the page that matches the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
show_script = """jQuery('%s').show(0);""" % selector
self.safe_execute_script(show_script)
def show_elements(self, selector, by=By.CSS_SELECTOR):
""" Show all elements on the page that match the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
show_script = """jQuery('%s').show(0);""" % selector
self.safe_execute_script(show_script)
def remove_element(self, selector, by=By.CSS_SELECTOR):
""" Remove the first element on the page that matches the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
remove_script = """jQuery('%s').remove();""" % selector
self.safe_execute_script(remove_script)
def remove_elements(self, selector, by=By.CSS_SELECTOR):
""" Remove all elements on the page that match the selector. """
self.__check_scope()
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
remove_script = """jQuery('%s').remove();""" % selector
self.safe_execute_script(remove_script)
def ad_block(self):
""" Block ads that appear on the current web page. """
from seleniumbase.config import ad_block_list
self.__check_scope() # Using wait_for_RSC would cause an infinite loop
for css_selector in ad_block_list.AD_BLOCK_LIST:
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
$elements[index].remove();}"""
% css_selector
)
try:
self.execute_script(script)
except Exception:
pass # Don't fail test if ad_blocking fails
def show_file_choosers(self):
"""Display hidden file-chooser input fields on sites if present."""
css_selector = 'input[type="file"]'
try:
self.show_elements(css_selector)
except Exception:
pass
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
the_class = $elements[index].getAttribute('class');
new_class = the_class.replaceAll('hidden', 'visible');
$elements[index].setAttribute('class', new_class);}"""
% css_selector
)
try:
self.execute_script(script)
except Exception:
pass
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["sh_fc", "", origin, time_stamp]
self.__extra_actions.append(action)
def get_domain_url(self, url):
self.__check_scope()
return page_utils.get_domain_url(url)
def get_beautiful_soup(self, source=None):
"""BeautifulSoup is a toolkit for dissecting an HTML document
and extracting what you need. It's great for screen-scraping!
See: https://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
from bs4 import BeautifulSoup
if not source:
source = self.get_page_source()
soup = BeautifulSoup(source, "html.parser")
return soup
def get_unique_links(self):
"""Get all unique links in the html of the page source.
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
"""
page_url = self.get_current_url()
soup = self.get_beautiful_soup(self.get_page_source())
links = page_utils._get_unique_links(page_url, soup)
return links
def get_link_status_code(self, link, allow_redirects=False, timeout=5):
"""Get the status code of a link.
If the timeout is set to less than 1, it becomes 1.
If the timeout is exceeded by requests.get(), it will return a 404.
For a list of available status codes, see:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
"""
if self.__requests_timeout:
timeout = self.__requests_timeout
if timeout < 1:
timeout = 1
status_code = page_utils._get_link_status_code(
link, allow_redirects=allow_redirects, timeout=timeout
)
return status_code
def assert_link_status_code_is_not_404(self, link):
status_code = str(self.get_link_status_code(link))
bad_link_str = 'Error: "%s" returned a 404!' % link
self.assertNotEqual(status_code, "404", bad_link_str)
def __get_link_if_404_error(self, link):
status_code = str(self.get_link_status_code(link))
if status_code == "404":
# Verify again to be sure. (In case of multi-threading overload.)
status_code = str(self.get_link_status_code(link))
if status_code == "404":
return link
else:
return None
else:
return None
def assert_no_404_errors(self, multithreaded=True, timeout=None):
"""Assert no 404 errors from page links obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
Timeout is on a per-link basis using the "requests" library.
(A 404 error represents a broken link on a web page.)
"""
all_links = self.get_unique_links()
links = []
for link in all_links:
if (
"javascript:" not in link
and "mailto:" not in link
and "data:" not in link
and "://fonts.gstatic.com" not in link
):
links.append(link)
if timeout:
if not type(timeout) is int and not type(timeout) is float:
raise Exception('Expecting a numeric value for "timeout"!')
if timeout < 0:
raise Exception('The "timeout" cannot be a negative number!')
self.__requests_timeout = timeout
broken_links = []
if multithreaded:
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(10)
results = pool.map(self.__get_link_if_404_error, links)
pool.close()
pool.join()
for result in results:
if result:
broken_links.append(result)
else:
broken_links = []
for link in links:
if self.__get_link_if_404_error(link):
broken_links.append(link)
self.__requests_timeout = None # Reset the requests.get() timeout
if len(broken_links) > 0:
bad_links_str = "\n".join(broken_links)
if len(broken_links) == 1:
self.fail("Broken link detected:\n%s" % bad_links_str)
elif len(broken_links) > 1:
self.fail("Broken links detected:\n%s" % bad_links_str)
if self.demo_mode:
a_t = "ASSERT NO 404 ERRORS"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_no_404_errors(self._language)
messenger_post = "%s" % a_t
self.__highlight_with_assert_success(messenger_post, "html")
def print_unique_links_with_status_codes(self):
"""Finds all unique links in the html of the page source
and then prints out those links with their status codes.
Format: ["link" -> "status_code"] (per line)
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
"""
page_url = self.get_current_url()
soup = self.get_beautiful_soup(self.get_page_source())
page_utils._print_unique_links_with_status_codes(page_url, soup)
def __fix_unicode_conversion(self, text):
""" Fixing Chinese characters when converting from PDF to HTML. """
text = text.replace("\u2f8f", "\u884c")
text = text.replace("\u2f45", "\u65b9")
text = text.replace("\u2f08", "\u4eba")
text = text.replace("\u2f70", "\u793a")
text = text.replace("\xe2\xbe\x8f", "\xe8\xa1\x8c")
text = text.replace("\xe2\xbd\xb0", "\xe7\xa4\xba")
text = text.replace("\xe2\xbe\x8f", "\xe8\xa1\x8c")
text = text.replace("\xe2\xbd\x85", "\xe6\x96\xb9")
return text
def get_pdf_text(
self,
pdf,
page=None,
maxpages=None,
password=None,
codec="utf-8",
wrap=False,
nav=False,
override=False,
):
"""Gets text from a PDF file.
PDF can be either a URL or a file path on the local file system.
@Params
pdf - The URL or file path of the PDF file.
page - The page number (or a list of page numbers) of the PDF.
If a page number is provided, looks only at that page.
(1 is the first page, 2 is the second page, etc.)
If no page number is provided, returns all PDF text.
maxpages - Instead of providing a page number, you can provide
the number of pages to use from the beginning.
password - If the PDF is password-protected, enter it here.
codec - The compression format for character encoding.
(The default codec used by this method is 'utf-8'.)
wrap - Replaces ' \n' with ' ' so that individual sentences
from a PDF don't get broken up into separate lines when
getting converted into text format.
nav - If PDF is a URL, navigates to the URL in the browser first.
(Not needed because the PDF will be downloaded anyway.)
override - If the PDF file to be downloaded already exists in the
downloaded_files/ folder, that PDF will be used
instead of downloading it again."""
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
from pdfminer.high_level import extract_text
if not password:
password = ""
if not maxpages:
maxpages = 0
if not pdf.lower().endswith(".pdf"):
raise Exception("%s is not a PDF file! (Expecting a .pdf)" % pdf)
file_path = None
if page_utils.is_valid_url(pdf):
from seleniumbase.core import download_helper
downloads_folder = download_helper.get_downloads_folder()
if nav:
if self.get_current_url() != pdf:
self.open(pdf)
file_name = pdf.split("/")[-1]
file_path = downloads_folder + "/" + file_name
if not os.path.exists(file_path):
self.download_file(pdf)
elif override:
self.download_file(pdf)
else:
if not os.path.exists(pdf):
raise Exception("%s is not a valid URL or file path!" % pdf)
file_path = os.path.abspath(pdf)
page_search = None # (Pages are delimited by '\x0c')
if type(page) is list:
pages = page
page_search = []
for page in pages:
page_search.append(page - 1)
elif type(page) is int:
page = page - 1
if page < 0:
page = 0
page_search = [page]
else:
page_search = None
pdf_text = extract_text(
file_path,
password="",
page_numbers=page_search,
maxpages=maxpages,
caching=False,
codec=codec,
)
pdf_text = self.__fix_unicode_conversion(pdf_text)
if wrap:
pdf_text = pdf_text.replace(" \n", " ")
pdf_text = pdf_text.strip() # Remove leading and trailing whitespace
return pdf_text
def assert_pdf_text(
self,
pdf,
text,
page=None,
maxpages=None,
password=None,
codec="utf-8",
wrap=True,
nav=False,
override=False,
):
"""Asserts text in a PDF file.
PDF can be either a URL or a file path on the local file system.
@Params
pdf - The URL or file path of the PDF file.
text - The expected text to verify in the PDF.
page - The page number of the PDF to use (optional).
If a page number is provided, looks only at that page.
(1 is the first page, 2 is the second page, etc.)
If no page number is provided, looks at all the pages.
maxpages - Instead of providing a page number, you can provide
the number of pages to use from the beginning.
password - If the PDF is password-protected, enter it here.
codec - The compression format for character encoding.
(The default codec used by this method is 'utf-8'.)
wrap - Replaces ' \n' with ' ' so that individual sentences
from a PDF don't get broken up into separate lines when
getting converted into text format.
nav - If PDF is a URL, navigates to the URL in the browser first.
(Not needed because the PDF will be downloaded anyway.)
override - If the PDF file to be downloaded already exists in the
downloaded_files/ folder, that PDF will be used
instead of downloading it again."""
text = self.__fix_unicode_conversion(text)
if not codec:
codec = "utf-8"
pdf_text = self.get_pdf_text(
pdf,
page=page,
maxpages=maxpages,
password=password,
codec=codec,
wrap=wrap,
nav=nav,
override=override,
)
if type(page) is int:
if text not in pdf_text:
raise Exception(
"PDF [%s] is missing expected text [%s] on "
"page [%s]!" % (pdf, text, page)
)
else:
if text not in pdf_text:
raise Exception(
"PDF [%s] is missing expected text [%s]!" % (pdf, text)
)
return True
def create_folder(self, folder):
""" Creates a folder of the given name if it doesn't already exist. """
if folder.endswith("/"):
folder = folder[:-1]
if len(folder) < 1:
raise Exception("Minimum folder name length = 1.")
if not os.path.exists(folder):
try:
os.makedirs(folder)
except Exception:
pass
def choose_file(
self, selector, file_path, by=By.CSS_SELECTOR, timeout=None
):
"""This method is used to choose a file to upload to a website.
It works by populating a file-chooser "input" field of type="file".
A relative file_path will get converted into an absolute file_path.
Example usage:
self.choose_file('input[type="file"]', "my_dir/my_file.txt")
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
abs_path = os.path.abspath(file_path)
element = self.wait_for_element_present(
selector, by=by, timeout=timeout
)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
else:
choose_file_selector = 'input[type="file"]'
if self.is_element_present(choose_file_selector):
if not self.is_element_visible(choose_file_selector):
self.show_file_choosers()
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode and not self.slow_mode:
self.__scroll_to_element(element, selector, by)
pre_action_url = self.driver.current_url
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
sele_file_path = [selector, file_path]
action = ["chfil", sele_file_path, origin, time_stamp]
self.__extra_actions.append(action)
if type(abs_path) is int or type(abs_path) is float:
abs_path = str(abs_path)
try:
element.send_keys(abs_path)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.16)
element = self.wait_for_element_present(
selector, by=by, timeout=timeout
)
element.send_keys(abs_path)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def save_element_as_image_file(
self, selector, file_name, folder=None, overlay_text=""
):
"""Take a screenshot of an element and save it as an image file.
If no folder is specified, will save it to the current folder.
If overlay_text is provided, will add that to the saved image."""
element = self.wait_for_element_visible(selector)
element_png = element.screenshot_as_png
if len(file_name.split(".")[0]) < 1:
raise Exception("Error: file_name length must be > 0.")
if not file_name.endswith(".png"):
file_name = file_name + ".png"
image_file_path = None
if folder:
if folder.endswith("/"):
folder = folder[:-1]
if len(folder) > 0:
self.create_folder(folder)
image_file_path = "%s/%s" % (folder, file_name)
if not image_file_path:
image_file_path = file_name
with open(image_file_path, "wb") as file:
file.write(element_png)
# Add a text overlay if given
if type(overlay_text) is str and len(overlay_text) > 0:
from PIL import Image, ImageDraw
text_rows = overlay_text.split("\n")
len_text_rows = len(text_rows)
max_width = 0
for text_row in text_rows:
if len(text_row) > max_width:
max_width = len(text_row)
image = Image.open(image_file_path)
draw = ImageDraw.Draw(image)
draw.rectangle(
(0, 0, (max_width * 6) + 6, 16 * len_text_rows),
fill=(236, 236, 28),
)
draw.text(
(4, 2), # Coordinates
overlay_text, # Text
(8, 38, 176), # Color
)
image.save(image_file_path, "PNG", quality=100, optimize=True)
def download_file(self, file_url, destination_folder=None):
"""Downloads the file from the url to the destination folder.
If no destination folder is specified, the default one is used.
(The default [Downloads Folder] = "./downloaded_files")"""
if not destination_folder:
destination_folder = constants.Files.DOWNLOADS_FOLDER
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
page_utils._download_file_to(file_url, destination_folder)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
url_dest = [file_url, destination_folder]
action = ["do_fi", url_dest, origin, time_stamp]
self.__extra_actions.append(action)
def save_file_as(self, file_url, new_file_name, destination_folder=None):
"""Similar to self.download_file(), except that you get to rename the
file being downloaded to whatever you want."""
if not destination_folder:
destination_folder = constants.Files.DOWNLOADS_FOLDER
page_utils._download_file_to(
file_url, destination_folder, new_file_name
)
def save_data_as(self, data, file_name, destination_folder=None):
"""Saves the data specified to a file of the name specified.
If no destination folder is specified, the default one is used.
(The default [Downloads Folder] = "./downloaded_files")"""
if not destination_folder:
destination_folder = constants.Files.DOWNLOADS_FOLDER
page_utils._save_data_as(data, destination_folder, file_name)
def get_downloads_folder(self):
"""Returns the path of the SeleniumBase "downloaded_files/" folder.
Calling self.download_file(file_url) will put that file in here.
With the exception of Safari, IE, and Chromium Guest Mode,
any clicks that download files will also use this folder
rather than using the browser's default "downloads/" path."""
self.__check_scope()
from seleniumbase.core import download_helper
return download_helper.get_downloads_folder()
def get_browser_downloads_folder(self):
"""Returns the path that is used when a click initiates a download.
SeleniumBase overrides the system path to be "downloaded_files/"
The path can't be changed on Safari, IE, or Chromium Guest Mode.
The same problem occurs when using an out-of-date chromedriver.
"""
self.__check_scope()
if self.is_chromium() and self.guest_mode and not self.headless:
# Guest Mode (non-headless) can force the default downloads path
return os.path.join(os.path.expanduser("~"), "downloads")
elif self.browser == "safari" or self.browser == "ie":
# Can't change the system [Downloads Folder] on Safari or IE
return os.path.join(os.path.expanduser("~"), "downloads")
elif (
self.driver.capabilities["browserName"].lower() == "chrome"
and int(self.get_chromedriver_version().split(".")[0]) < 73
and self.headless
):
return os.path.join(os.path.expanduser("~"), "downloads")
else:
from seleniumbase.core import download_helper
return download_helper.get_downloads_folder()
return os.path.join(os.path.expanduser("~"), "downloads")
def get_path_of_downloaded_file(self, file, browser=False):
""" Returns the OS path of the downloaded file. """
if browser:
return os.path.join(self.get_browser_downloads_folder(), file)
else:
return os.path.join(self.get_downloads_folder(), file)
def is_downloaded_file_present(self, file, browser=False):
"""Returns True if the file exists in the pre-set [Downloads Folder].
For browser click-initiated downloads, SeleniumBase will override
the system [Downloads Folder] to be "./downloaded_files/",
but that path can't be overridden when using Safari, IE,
or Chromium Guest Mode, which keeps the default system path.
self.download_file(file_url) will always use "./downloaded_files/".
@Params
file - The filename of the downloaded file.
browser - If True, uses the path set by click-initiated downloads.
If False, uses the self.download_file(file_url) path.
Those paths are often the same. (browser-dependent)
(Default: False).
"""
return os.path.exists(
self.get_path_of_downloaded_file(file, browser=browser)
)
def delete_downloaded_file_if_present(self, file, browser=False):
"""Deletes the file from the [Downloads Folder] if the file exists.
For browser click-initiated downloads, SeleniumBase will override
the system [Downloads Folder] to be "./downloaded_files/",
but that path can't be overridden when using Safari, IE,
or Chromium Guest Mode, which keeps the default system path.
self.download_file(file_url) will always use "./downloaded_files/".
@Params
file - The filename to be deleted from the [Downloads Folder].
browser - If True, uses the path set by click-initiated downloads.
If False, uses the self.download_file(file_url) path.
Those paths are usually the same. (browser-dependent)
(Default: False).
"""
if self.is_downloaded_file_present(file, browser=browser):
file_path = self.get_path_of_downloaded_file(file, browser=browser)
try:
os.remove(file_path)
except Exception:
pass
def delete_downloaded_file(self, file, browser=False):
"""Same as self.delete_downloaded_file_if_present()
Deletes the file from the [Downloads Folder] if the file exists.
For browser click-initiated downloads, SeleniumBase will override
the system [Downloads Folder] to be "./downloaded_files/",
but that path can't be overridden when using Safari, IE,
or Chromium Guest Mode, which keeps the default system path.
self.download_file(file_url) will always use "./downloaded_files/".
@Params
file - The filename to be deleted from the [Downloads Folder].
browser - If True, uses the path set by click-initiated downloads.
If False, uses the self.download_file(file_url) path.
Those paths are usually the same. (browser-dependent)
(Default: False).
"""
if self.is_downloaded_file_present(file, browser=browser):
file_path = self.get_path_of_downloaded_file(file, browser=browser)
try:
os.remove(file_path)
except Exception:
pass
def assert_downloaded_file(self, file, timeout=None, browser=False):
"""Asserts that the file exists in SeleniumBase's [Downloads Folder].
For browser click-initiated downloads, SeleniumBase will override
the system [Downloads Folder] to be "./downloaded_files/",
but that path can't be overridden when using Safari, IE,
or Chromium Guest Mode, which keeps the default system path.
self.download_file(file_url) will always use "./downloaded_files/".
@Params
file - The filename of the downloaded file.
timeout - The time (seconds) to wait for the download to complete.
browser - If True, uses the path set by click-initiated downloads.
If False, uses the self.download_file(file_url) path.
Those paths are often the same. (browser-dependent)
(Default: False).
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
downloaded_file_path = self.get_path_of_downloaded_file(file, browser)
found = False
for x in range(int(timeout)):
shared_utils.check_if_time_limit_exceeded()
try:
self.assertTrue(
os.path.exists(downloaded_file_path),
"File [%s] was not found in the downloads folder [%s]!"
% (file, self.get_downloads_folder()),
)
found = True
break
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(1)
if not found and not os.path.exists(downloaded_file_path):
message = (
"File {%s} was not found in the downloads folder {%s} "
"after %s seconds! (Or the download didn't complete!)"
% (file, self.get_downloads_folder(), timeout)
)
page_actions.timeout_exception("NoSuchFileException", message)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_df", file, origin, time_stamp]
self.__extra_actions.append(action)
if self.demo_mode:
messenger_post = "ASSERT DOWNLOADED FILE: [%s]" % file
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
def assert_true(self, expr, msg=None):
"""Asserts that the expression is True.
Will raise an exception if the statement if False."""
self.assertTrue(expr, msg=msg)
def assert_false(self, expr, msg=None):
"""Asserts that the expression is False.
Will raise an exception if the statement if True."""
self.assertFalse(expr, msg=msg)
def assert_equal(self, first, second, msg=None):
"""Asserts that the two values are equal.
Will raise an exception if the values are not equal."""
self.assertEqual(first, second, msg=msg)
def assert_not_equal(self, first, second, msg=None):
"""Asserts that the two values are not equal.
Will raise an exception if the values are equal."""
self.assertNotEqual(first, second, msg=msg)
def assert_in(self, first, second, msg=None):
"""Asserts that the first string is in the second string.
Will raise an exception if the first string is not in the second."""
self.assertIn(first, second, msg=msg)
def assert_not_in(self, first, second, msg=None):
"""Asserts that the first string is not in the second string.
Will raise an exception if the first string is in the second string."""
self.assertNotIn(first, second, msg=msg)
def assert_raises(self, *args, **kwargs):
"""Asserts that the following block of code raises an exception.
Will raise an exception if the block of code has no exception.
Usage Example =>
# Verify that the expected exception is raised.
with self.assert_raises(Exception):
raise Exception("Expected Exception!")
"""
return self.assertRaises(*args, **kwargs)
def wait_for_attribute(
self, selector, attribute, value=None, by=By.CSS_SELECTOR, timeout=None
):
"""Raises an exception if the element attribute/value is not found.
If the value is not specified, the attribute only needs to exist.
Returns the element that contains the attribute if successful.
Default timeout = LARGE_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_attribute_present(
selector, attribute, value=value, timeout=timeout
)
return page_actions.wait_for_attribute(
self.driver,
selector,
attribute,
value=value,
by=by,
timeout=timeout,
)
def assert_attribute(
self, selector, attribute, value=None, by=By.CSS_SELECTOR, timeout=None
):
"""Raises an exception if the element attribute/value is not found.
If the value is not specified, the attribute only needs to exist.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_attribute(
selector, attribute, value=value, by=by, timeout=timeout
)
if (
self.demo_mode
and not self.__is_shadow_selector(selector)
and self.is_element_visible(selector, by=by)
):
a_a = "ASSERT ATTRIBUTE"
i_n = "in"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_a = SD.translate_assert_attribute(self._language)
i_n = SD.translate_in(self._language)
if not value:
messenger_post = "%s: {%s} %s %s: %s" % (
a_a,
attribute,
i_n,
by.upper(),
selector,
)
else:
messenger_post = '%s: {%s == "%s"} %s %s: %s' % (
a_a,
attribute,
value,
i_n,
by.upper(),
selector,
)
self.__highlight_with_assert_success(messenger_post, selector, by)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
value = value.replace("\\", "\\\\")
sel_att_val = [selector, attribute, value]
action = ["as_at", sel_att_val, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_title(self, title):
"""Asserts that the web page title matches the expected title.
When a web page initially loads, the title starts as the URL,
but then the title switches over to the actual page title.
In Recorder Mode, this assertion is skipped because the Recorder
changes the page title to the selector of the hovered element.
"""
self.wait_for_ready_state_complete()
expected = title.strip()
actual = self.get_page_title().strip()
error = (
"Expected page title [%s] does not match the actual title [%s]!"
)
try:
if not self.recorder_mode:
self.assertEqual(expected, actual, error % (expected, actual))
except Exception:
self.wait_for_ready_state_complete()
self.sleep(settings.MINI_TIMEOUT)
actual = self.get_page_title().strip()
try:
self.assertEqual(expected, actual, error % (expected, actual))
except Exception:
self.wait_for_ready_state_complete()
self.sleep(settings.MINI_TIMEOUT)
actual = self.get_page_title().strip()
self.assertEqual(expected, actual, error % (expected, actual))
if self.demo_mode and not self.recorder_mode:
a_t = "ASSERT TITLE"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_title(self._language)
messenger_post = "%s: {%s}" % (a_t, title)
self.__highlight_with_assert_success(messenger_post, "html")
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_ti", title, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_no_js_errors(self):
"""Asserts that there are no JavaScript "SEVERE"-level page errors.
Works ONLY on Chromium browsers (Chrome or Edge).
Does NOT work on Firefox, IE, Safari, or some other browsers:
* See https://github.com/SeleniumHQ/selenium/issues/1161
Based on the following Stack Overflow solution:
* https://stackoverflow.com/a/41150512/7058266
"""
self.__check_scope()
time.sleep(0.1) # May take a moment for errors to appear after loads.
try:
browser_logs = self.driver.get_log("browser")
except (ValueError, WebDriverException):
# If unable to get browser logs, skip the assert and return.
return
messenger_library = "//cdnjs.cloudflare.com/ajax/libs/messenger"
underscore_library = "//cdnjs.cloudflare.com/ajax/libs/underscore"
errors = []
for entry in browser_logs:
if entry["level"] == "SEVERE":
if (
messenger_library not in entry["message"]
and underscore_library not in entry["message"]
):
# Add errors if not caused by SeleniumBase dependencies
errors.append(entry)
if len(errors) > 0:
for n in range(len(errors)):
f_t_l_r = " - Failed to load resource"
u_c_t_e = " Uncaught TypeError: "
if f_t_l_r in errors[n]["message"]:
url = errors[n]["message"].split(f_t_l_r)[0]
errors[n] = {"Error 404 (broken link)": url}
elif u_c_t_e in errors[n]["message"]:
url = errors[n]["message"].split(u_c_t_e)[0]
error = errors[n]["message"].split(u_c_t_e)[1]
errors[n] = {"Uncaught TypeError (%s)" % error: url}
er_str = str(errors)
er_str = er_str.replace("[{", "[\n{").replace("}, {", "},\n{")
current_url = self.get_current_url()
raise Exception(
"JavaScript errors found on %s => %s" % (current_url, er_str)
)
if self.demo_mode:
if self.browser == "chrome" or self.browser == "edge":
a_t = "ASSERT NO JS ERRORS"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_no_js_errors(self._language)
messenger_post = "%s" % a_t
self.__highlight_with_assert_success(messenger_post, "html")
def __activate_html_inspector(self):
self.wait_for_ready_state_complete()
time.sleep(0.05)
js_utils.activate_html_inspector(self.driver)
def inspect_html(self):
"""Inspects the Page HTML with HTML-Inspector.
(https://github.com/philipwalton/html-inspector)
(https://cdnjs.com/libraries/html-inspector)
Prints the results and also returns them."""
self.__activate_html_inspector()
self.wait_for_ready_state_complete()
script = """HTMLInspector.inspect();"""
try:
self.execute_script(script)
except Exception:
# If unable to load the JavaScript, skip inspection and return.
msg = "(Unable to load HTML-Inspector JS! Inspection Skipped!)"
print("\n" + msg)
return msg
time.sleep(0.1)
browser_logs = []
try:
browser_logs = self.driver.get_log("browser")
except (ValueError, WebDriverException):
# If unable to get browser logs, skip the assert and return.
msg = "(Unable to Inspect HTML! -> Only works on Chromium!)"
print("\n" + msg)
return msg
messenger_library = "//cdnjs.cloudflare.com/ajax/libs/messenger"
url = self.get_current_url()
header = "\n* HTML Inspection Results: %s" % url
results = [header]
row_count = 0
for entry in browser_logs:
message = entry["message"]
if "0:6053 " in message:
message = message.split("0:6053")[1]
message = message.replace("\\u003C", "<")
if message.startswith(' "') and message.count('"') == 2:
message = message.split('"')[1]
message = "X - " + message
if messenger_library not in message:
if message not in results:
results.append(message)
row_count += 1
if row_count > 0:
results.append("* (See the Console output for details!)")
else:
results.append("* (No issues detected!)")
results = "\n".join(results)
print(results)
return results
def is_chromium(self):
""" Return True if the browser is Chrome, Edge, or Opera. """
self.__check_scope()
chromium = False
browser_name = self.driver.capabilities["browserName"]
if browser_name.lower() in ("chrome", "edge", "msedge", "opera"):
chromium = True
return chromium
def __fail_if_not_using_chrome(self, method):
chrome = False
browser_name = self.driver.capabilities["browserName"]
if browser_name.lower() == "chrome":
chrome = True
if not chrome:
from seleniumbase.common.exceptions import NotUsingChromeException
message = (
'Error: "%s" should only be called '
'by tests running with self.browser == "chrome"! '
'You should add an "if" statement to your code before calling '
"this method if using browsers that are Not Chrome! "
'The browser detected was: "%s".' % (method, browser_name)
)
raise NotUsingChromeException(message)
def get_chrome_version(self):
self.__check_scope()
self.__fail_if_not_using_chrome("get_chrome_version()")
driver_capabilities = self.driver.capabilities
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
return chrome_version
def get_chromedriver_version(self):
self.__check_scope()
self.__fail_if_not_using_chrome("get_chromedriver_version()")
chrome_dict = self.driver.capabilities["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(" ")[0]
return chromedriver_version
def is_chromedriver_too_old(self):
"""There are known issues with chromedriver versions below 73.
This can impact tests that need to hover over an element, or ones
that require a custom downloads folder ("./downloaded_files").
Due to the situation that newer versions of chromedriver require
an exact match to the version of Chrome, an "old" version of
chromedriver is installed by default. It is then up to the user
to upgrade to the correct version of chromedriver from there.
This method can be used to change test behavior when trying
to perform an action that is impacted by having an old version
of chromedriver installed."""
self.__check_scope()
self.__fail_if_not_using_chrome("is_chromedriver_too_old()")
if int(self.get_chromedriver_version().split(".")[0]) < 73:
return True # chromedriver is too old! Please upgrade!
return False
def get_totp_code(self, totp_key=None):
"""Returns a time-based one-time password based on the
Google Authenticator algorithm. Works with Authy and Okta.
If the "totp_key" is not specified, this method defaults
to using the one provided in [seleniumbase/config/settings.py].
Google Authenticator codes expire & change at 30-sec intervals.
If the fetched password expires in the next 1.5 seconds, waits
for a new one before returning it (may take up to 1.5 seconds).
See https://pyotp.readthedocs.io/en/latest/ for details."""
import pyotp
if not totp_key:
totp_key = settings.TOTP_KEY
epoch_interval = time.time() / 30.0
cycle_lifespan = float(epoch_interval) - int(epoch_interval)
if float(cycle_lifespan) > 0.95:
# Password expires in the next 1.5 seconds. Wait for a new one.
for i in range(30):
time.sleep(0.05)
epoch_interval = time.time() / 30.0
cycle_lifespan = float(epoch_interval) - int(epoch_interval)
if not float(cycle_lifespan) > 0.95:
# The new password cycle has begun
break
totp = pyotp.TOTP(totp_key)
return str(totp.now())
def convert_css_to_xpath(self, css):
return css_to_xpath.convert_css_to_xpath(css)
def convert_xpath_to_css(self, xpath):
return xpath_to_css.convert_xpath_to_css(xpath)
def convert_to_css_selector(self, selector, by):
"""This method converts a selector to a CSS_SELECTOR.
jQuery commands require a CSS_SELECTOR for finding elements.
This method should only be used for jQuery/JavaScript actions.
Pure JavaScript doesn't support using a:contains("LINK_TEXT")."""
if by == By.CSS_SELECTOR:
return selector
elif by == By.ID:
return "#%s" % selector
elif by == By.CLASS_NAME:
return ".%s" % selector
elif by == By.NAME:
return '[name="%s"]' % selector
elif by == By.TAG_NAME:
return selector
elif by == By.XPATH:
return self.convert_xpath_to_css(selector)
elif by == By.LINK_TEXT:
return 'a:contains("%s")' % selector
elif by == By.PARTIAL_LINK_TEXT:
return 'a:contains("%s")' % selector
else:
raise Exception(
"Exception: Could not convert {%s}(by=%s) to CSS_SELECTOR!"
% (selector, by)
)
def set_value(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, scroll=True
):
""" This method uses JavaScript to update a text field. """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
self.wait_for_ready_state_complete()
self.wait_for_element_present(selector, by=by, timeout=timeout)
orginal_selector = selector
css_selector = self.convert_to_css_selector(selector, by=by)
self.__demo_mode_highlight_if_active(orginal_selector, by)
if scroll and not self.demo_mode and not self.slow_mode:
self.scroll_to(orginal_selector, by=by, timeout=timeout)
if type(text) is int or type(text) is float:
text = str(text)
value = re.escape(text)
value = self.__escape_quotes_if_needed(value)
pre_escape_css_selector = css_selector
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
the_type = None
if ":contains\\(" not in css_selector:
get_type_script = (
"""return document.querySelector('%s').getAttribute('type');"""
% css_selector
)
the_type = self.execute_script(get_type_script) # Used later
script = """document.querySelector('%s').value='%s';""" % (
css_selector,
value,
)
self.execute_script(script)
if self.recorder_mode:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
sel_tex = [pre_escape_css_selector, text]
action = ["js_ty", sel_tex, origin, time_stamp]
self.__extra_actions.append(action)
else:
script = """jQuery('%s')[0].value='%s';""" % (css_selector, value)
self.safe_execute_script(script)
if text.endswith("\n"):
element = self.wait_for_element_present(
orginal_selector, by=by, timeout=timeout
)
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
else:
if the_type == "range" and ":contains\\(" not in css_selector:
# Some input sliders need a mouse event to trigger listeners.
try:
mouse_move_script = (
"""m_elm = document.querySelector('%s');"""
"""m_evt = new Event('mousemove');"""
"""m_elm.dispatchEvent(m_evt);"""
% css_selector
)
self.execute_script(mouse_move_script)
except Exception:
pass
self.__demo_mode_pause_if_active()
def js_update_text(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
"""JavaScript + send_keys are used to update a text field.
Performs self.set_value() and triggers event listeners.
If text ends in "\n", set_value() presses RETURN after.
Works faster than send_keys() alone due to the JS call.
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if type(text) is int or type(text) is float:
text = str(text)
self.set_value(selector, text, by=by, timeout=timeout)
if not text.endswith("\n"):
try:
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout=0.2
)
element.send_keys(" " + Keys.BACK_SPACE)
except Exception:
pass
def js_type(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
"""Same as self.js_update_text()
JavaScript + send_keys are used to update a text field.
Performs self.set_value() and triggers event listeners.
If text ends in "\n", set_value() presses RETURN after.
Works faster than send_keys() alone due to the JS call.
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.js_update_text(selector, text, by=by, timeout=timeout)
def set_text(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
"""Same as self.js_update_text()
JavaScript + send_keys are used to update a text field.
Performs self.set_value() and triggers event listeners.
If text ends in "\n", set_value() presses RETURN after.
Works faster than send_keys() alone due to the JS call.
If not an input or textarea, sets textContent instead."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
if element.tag_name == "input" or element.tag_name == "textarea":
self.js_update_text(selector, text, by=by, timeout=timeout)
else:
self.set_text_content(selector, text, by=by, timeout=timeout)
def set_text_content(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, scroll=False
):
"""This method uses JavaScript to set an element's textContent.
If the element is an input or textarea, sets the value instead."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
if element.tag_name == "input" or element.tag_name == "textarea":
self.js_update_text(selector, text, by=by, timeout=timeout)
return
orginal_selector = selector
css_selector = self.convert_to_css_selector(selector, by=by)
if scroll:
self.__demo_mode_highlight_if_active(orginal_selector, by)
if not self.demo_mode and not self.slow_mode:
self.scroll_to(orginal_selector, by=by, timeout=timeout)
if type(text) is int or type(text) is float:
text = str(text)
value = re.escape(text)
value = self.__escape_quotes_if_needed(value)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
if ":contains\\(" not in css_selector:
script = """document.querySelector('%s').textContent='%s';""" % (
css_selector,
value,
)
self.execute_script(script)
else:
script = """jQuery('%s')[0].textContent='%s';""" % (
css_selector,
value,
)
self.safe_execute_script(script)
self.__demo_mode_pause_if_active()
def jquery_update_text(
self, selector, text, by=By.CSS_SELECTOR, timeout=None
):
"""This method uses jQuery to update a text field.
If the text string ends with the newline character,
Selenium finishes the call, which simulates pressing
{Enter/Return} after the text is entered."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout
)
self.__demo_mode_highlight_if_active(selector, by)
self.scroll_to(selector, by=by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
selector = self.__escape_quotes_if_needed(selector)
text = re.escape(text)
text = self.__escape_quotes_if_needed(text)
update_text_script = """jQuery('%s').val('%s');""" % (selector, text)
self.safe_execute_script(update_text_script)
if text.endswith("\n"):
element.send_keys("\n")
self.__demo_mode_pause_if_active()
def get_value(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""This method uses JavaScript to get the value of an input field.
(Works on both input fields and textarea fields.)"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
self.wait_for_element_present(selector, by=by, timeout=timeout)
orginal_selector = selector
css_selector = self.convert_to_css_selector(selector, by=by)
self.__demo_mode_highlight_if_active(orginal_selector, by)
if not self.demo_mode and not self.slow_mode:
self.scroll_to(orginal_selector, by=by, timeout=timeout)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
if ":contains\\(" not in css_selector:
script = """return document.querySelector('%s').value;""" % (
css_selector
)
value = self.execute_script(script)
else:
script = """return jQuery('%s')[0].value;""" % css_selector
value = self.safe_execute_script(script)
return value
def set_time_limit(self, time_limit):
self.__check_scope()
if time_limit:
try:
sb_config.time_limit = float(time_limit)
except Exception:
sb_config.time_limit = None
else:
sb_config.time_limit = None
if sb_config.time_limit and sb_config.time_limit > 0:
sb_config.time_limit_ms = int(sb_config.time_limit * 1000.0)
self.time_limit = sb_config.time_limit
else:
self.time_limit = None
sb_config.time_limit = None
sb_config.time_limit_ms = None
def set_default_timeout(self, timeout):
"""This method changes the default timeout values of test methods
for the duration of the current test.
Effected timeouts: (used by methods that wait for elements)
* settings.SMALL_TIMEOUT - (default value: 6 seconds)
* settings.LARGE_TIMEOUT - (default value: 10 seconds)
The minimum allowable default timeout is: 0.5 seconds.
The maximum allowable default timeout is: 60.0 seconds.
(Test methods can still override timeouts outside that range.)
"""
self.__check_scope()
if not type(timeout) is int and not type(timeout) is float:
raise Exception('Expecting a numeric value for "timeout"!')
if timeout < 0:
raise Exception('The "timeout" cannot be a negative number!')
timeout = float(timeout)
# Min default timeout: 0.5 seconds. Max default timeout: 60.0 seconds.
min_timeout = 0.5
max_timeout = 60.0
if timeout < min_timeout:
logging.info("Minimum default timeout = %s" % min_timeout)
timeout = min_timeout
elif timeout > max_timeout:
logging.info("Maximum default timeout = %s" % max_timeout)
timeout = max_timeout
self.__overrided_default_timeouts = True
sb_config._is_timeout_changed = True
settings.SMALL_TIMEOUT = timeout
settings.LARGE_TIMEOUT = timeout
def reset_default_timeout(self):
"""Reset default timeout values to the original from settings.py
This method reverts the changes made by set_default_timeout()"""
if self.__overrided_default_timeouts:
if sb_config._SMALL_TIMEOUT and sb_config._LARGE_TIMEOUT:
settings.SMALL_TIMEOUT = sb_config._SMALL_TIMEOUT
settings.LARGE_TIMEOUT = sb_config._LARGE_TIMEOUT
sb_config._is_timeout_changed = False
self.__overrided_default_timeouts = False
def skip(self, reason=""):
""" Mark the test as Skipped. """
self.__check_scope()
if self.dashboard:
test_id = self.__get_test_id_2()
if hasattr(self, "_using_sb_fixture"):
test_id = sb_config._test_id
if (
test_id in sb_config._results.keys()
and sb_config._results[test_id] == "Passed"
):
# Duplicate tearDown() called where test already passed
self.__passed_then_skipped = True
self.__will_be_skipped = True
sb_config._results[test_id] = "Skipped"
if self.with_db_reporting:
if self.is_pytest:
self.__skip_reason = reason
else:
self._nose_skip_reason = reason
# Add skip reason to the logs
if not hasattr(self, "_using_sb_fixture"):
test_id = self.__get_test_id() # Recalculate the test id
test_logpath = os.path.join(self.log_path, test_id)
self.__create_log_path_as_needed(test_logpath)
browser = self.browser
if not reason:
reason = "No skip reason given"
log_helper.log_skipped_test_data(self, test_logpath, browser, reason)
# Finally skip the test for real
self.skipTest(reason)
############
# Shadow DOM / Shadow-root methods
def __get_shadow_element(self, selector, timeout=None):
self.wait_for_ready_state_complete()
if timeout is None:
timeout = settings.SMALL_TIMEOUT
elif timeout == 0:
timeout = 0.1 # Use for: is_shadow_element_* (* = present/visible)
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__fail_if_invalid_shadow_selector_usage(selector)
if "::shadow " not in selector:
raise Exception(
'A Shadow DOM selector must contain at least one "::shadow "!'
)
selectors = selector.split("::shadow ")
element = self.get_element(selectors[0])
selector_chain = selectors[0]
for selector_part in selectors[1:]:
shadow_root = self.execute_script(
"return arguments[0].shadowRoot", element
)
if timeout == 0.1 and not shadow_root:
raise Exception(
"Element {%s} has no shadow root!" % selector_chain
)
elif not shadow_root:
time.sleep(2) # Wait two seconds for the shadow root to appear
shadow_root = self.execute_script(
"return arguments[0].shadowRoot", element
)
if not shadow_root:
raise Exception(
"Element {%s} has no shadow root!" % selector_chain
)
selector_chain += "::shadow "
selector_chain += selector_part
try:
element = page_actions.wait_for_element_present(
shadow_root,
selector_part,
by=By.CSS_SELECTOR,
timeout=timeout,
)
except Exception:
msg = (
"Shadow DOM Element {%s} was not present after %s seconds!"
% (selector_chain, timeout)
)
page_actions.timeout_exception("NoSuchElementException", msg)
return element
def __fail_if_invalid_shadow_selector_usage(self, selector):
if selector.strip().endswith("::shadow"):
msg = (
"A Shadow DOM selector cannot end on a shadow root element!"
" End the selector with an element inside the shadow root!"
)
raise Exception(msg)
def __is_shadow_selector(self, selector):
self.__fail_if_invalid_shadow_selector_usage(selector)
if "::shadow " in selector:
return True
return False
def __shadow_click(self, selector):
element = self.__get_shadow_element(selector)
element.click()
def __shadow_type(self, selector, text, clear_first=True):
element = self.__get_shadow_element(selector)
if clear_first:
try:
element.clear()
backspaces = Keys.BACK_SPACE * 42 # Autofill Defense
element.send_keys(backspaces)
except Exception:
pass
if type(text) is int or type(text) is float:
text = str(text)
if not text.endswith("\n"):
element.send_keys(text)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
else:
element.send_keys(text[:-1])
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
def __shadow_clear(self, selector):
element = self.__get_shadow_element(selector)
try:
element.clear()
backspaces = Keys.BACK_SPACE * 42 # Autofill Defense
element.send_keys(backspaces)
except Exception:
pass
def __get_shadow_text(self, selector):
element = self.__get_shadow_element(selector)
return element.text
def __wait_for_shadow_text_visible(self, text, selector):
start_ms = time.time() * 1000.0
stop_ms = start_ms + (settings.SMALL_TIMEOUT * 1000.0)
for x in range(int(settings.SMALL_TIMEOUT * 10)):
try:
actual_text = self.__get_shadow_text(selector).strip()
text = text.strip()
if text not in actual_text:
msg = (
"Expected text {%s} in element {%s} was not visible!"
% (text, selector)
)
page_actions.timeout_exception(
"ElementNotVisibleException", msg
)
return True
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
actual_text = self.__get_shadow_text(selector).strip()
text = text.strip()
if text not in actual_text:
msg = "Expected text {%s} in element {%s} was not visible!" % (
text,
selector,
)
page_actions.timeout_exception("ElementNotVisibleException", msg)
return True
def __wait_for_exact_shadow_text_visible(self, text, selector):
start_ms = time.time() * 1000.0
stop_ms = start_ms + (settings.SMALL_TIMEOUT * 1000.0)
for x in range(int(settings.SMALL_TIMEOUT * 10)):
try:
actual_text = self.__get_shadow_text(selector).strip()
text = text.strip()
if text != actual_text:
msg = (
"Expected exact text {%s} in element {%s} not visible!"
"" % (text, selector)
)
page_actions.timeout_exception(
"ElementNotVisibleException", msg
)
return True
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
actual_text = self.__get_shadow_text(selector).strip()
text = text.strip()
if text != actual_text:
msg = (
"Expected exact text {%s} in element {%s} was not visible!"
% (text, selector)
)
page_actions.timeout_exception("ElementNotVisibleException", msg)
return True
def __assert_shadow_text_visible(self, text, selector):
self.__wait_for_shadow_text_visible(text, selector)
if self.demo_mode:
a_t = "ASSERT TEXT"
i_n = "in"
by = By.CSS_SELECTOR
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_text(self._language)
i_n = SD.translate_in(self._language)
messenger_post = "%s: {%s} %s %s: %s" % (
a_t,
text,
i_n,
by.upper(),
selector,
)
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
def __assert_exact_shadow_text_visible(self, text, selector):
self.__wait_for_exact_shadow_text_visible(text, selector)
if self.demo_mode:
a_t = "ASSERT EXACT TEXT"
i_n = "in"
by = By.CSS_SELECTOR
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_exact_text(self._language)
i_n = SD.translate_in(self._language)
messenger_post = "%s: {%s} %s %s: %s" % (
a_t,
text,
i_n,
by.upper(),
selector,
)
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
def __is_shadow_element_present(self, selector):
try:
element = self.__get_shadow_element(selector, timeout=0.1)
return element is not None
except Exception:
return False
def __is_shadow_element_visible(self, selector):
try:
element = self.__get_shadow_element(selector, timeout=0.1)
return element.is_displayed()
except Exception:
return False
def __wait_for_shadow_element_present(self, selector):
element = self.__get_shadow_element(selector)
return element
def __wait_for_shadow_element_visible(self, selector):
element = self.__get_shadow_element(selector)
if not element.is_displayed():
msg = "Shadow DOM Element {%s} was not visible!" % selector
page_actions.timeout_exception("NoSuchElementException", msg)
return element
def __wait_for_shadow_attribute_present(
self, selector, attribute, value=None, timeout=None
):
element = self.__get_shadow_element(selector, timeout=timeout)
actual_value = element.get_attribute(attribute)
plural = "s"
if timeout == 1:
plural = ""
if value is None:
# The element attribute only needs to exist
if actual_value is not None:
return element
else:
# The element does not have the attribute
message = (
"Expected attribute {%s} of element {%s} "
"was not present after %s second%s!"
% (attribute, selector, timeout, plural)
)
page_actions.timeout_exception(
"NoSuchAttributeException", message
)
else:
if actual_value == value:
return element
else:
message = (
"Expected value {%s} for attribute {%s} of element "
"{%s} was not present after %s second%s! "
"(The actual value was {%s})"
% (
value,
attribute,
selector,
timeout,
plural,
actual_value,
)
)
page_actions.timeout_exception(
"NoSuchAttributeException", message
)
def __assert_shadow_element_present(self, selector):
self.__get_shadow_element(selector)
if self.demo_mode:
a_t = "ASSERT"
by = By.CSS_SELECTOR
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert(self._language)
messenger_post = "%s %s: %s" % (a_t, by.upper(), selector)
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
def __assert_shadow_element_visible(self, selector):
element = self.__get_shadow_element(selector)
if not element.is_displayed():
msg = "Shadow DOM Element {%s} was not visible!" % selector
page_actions.timeout_exception("NoSuchElementException", msg)
if self.demo_mode:
a_t = "ASSERT"
by = By.CSS_SELECTOR
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert(self._language)
messenger_post = "%s %s: %s" % (a_t, by.upper(), selector)
try:
js_utils.activate_jquery(self.driver)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration
)
except Exception:
pass
############
# Application "Local Storage" controls
def set_local_storage_item(self, key, value):
self.__check_scope()
self.execute_script(
"window.localStorage.setItem('{}', '{}');".format(key, value)
)
def get_local_storage_item(self, key):
self.__check_scope()
return self.execute_script(
"return window.localStorage.getItem('{}');".format(key)
)
def remove_local_storage_item(self, key):
self.__check_scope()
self.execute_script(
"window.localStorage.removeItem('{}');".format(key)
)
def clear_local_storage(self):
self.__check_scope()
self.execute_script("window.localStorage.clear();")
if self.recorder_mode:
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["c_l_s", "", origin, time_stamp]
self.__extra_actions.append(action)
def get_local_storage_keys(self):
self.__check_scope()
return self.execute_script(
"var ls = window.localStorage, keys = []; "
"for (var i = 0; i < ls.length; ++i) "
" keys[i] = ls.key(i); "
"return keys;"
)
def get_local_storage_items(self):
self.__check_scope()
return self.execute_script(
r"var ls = window.localStorage, items = {}; "
"for (var i = 0, k; i < ls.length; ++i) "
" items[k = ls.key(i)] = ls.getItem(k); "
"return items;"
)
# Application "Session Storage" controls
def set_session_storage_item(self, key, value):
self.__check_scope()
self.execute_script(
"window.sessionStorage.setItem('{}', '{}');".format(key, value)
)
def get_session_storage_item(self, key):
self.__check_scope()
return self.execute_script(
"return window.sessionStorage.getItem('{}');".format(key)
)
def remove_session_storage_item(self, key):
self.__check_scope()
self.execute_script(
"window.sessionStorage.removeItem('{}');".format(key)
)
def clear_session_storage(self):
self.__check_scope()
self.execute_script("window.sessionStorage.clear();")
def get_session_storage_keys(self):
self.__check_scope()
return self.execute_script(
"var ls = window.sessionStorage, keys = []; "
"for (var i = 0; i < ls.length; ++i) "
" keys[i] = ls.key(i); "
"return keys;"
)
def get_session_storage_items(self):
self.__check_scope()
return self.execute_script(
r"var ls = window.sessionStorage, items = {}; "
"for (var i = 0, k; i < ls.length; ++i) "
" items[k = ls.key(i)] = ls.getItem(k); "
"return items;"
)
############
# Duplicates (Avoids name confusion when migrating from other frameworks.)
def open_url(self, url):
""" Same as self.open() """
self.open(url)
def visit(self, url):
""" Same as self.open() """
self.open(url)
def visit_url(self, url):
""" Same as self.open() """
self.open(url)
def goto(self, url):
""" Same as self.open() """
self.open(url)
def go_to(self, url):
""" Same as self.open() """
self.open(url)
def reload(self):
""" Same as self.refresh_page() """
self.refresh_page()
def reload_page(self):
""" Same as self.refresh_page() """
self.refresh_page()
def open_new_tab(self, switch_to=True):
""" Same as self.open_new_window() """
self.open_new_window(switch_to=switch_to)
def switch_to_tab(self, tab, timeout=None):
""" Same as self.switch_to_window()
Switches control of the browser to the specified window.
The window can be an integer: 0 -> 1st tab, 1 -> 2nd tab, etc...
Or it can be a list item from self.driver.window_handles """
self.switch_to_window(window=tab, timeout=timeout)
def switch_to_default_tab(self):
""" Same as self.switch_to_default_window() """
self.switch_to_default_window()
def switch_to_newest_tab(self):
""" Same as self.switch_to_newest_window() """
self.switch_to_newest_window()
def input(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
""" Same as self.update_text() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def fill(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
""" Same as self.update_text() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def write(
self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False
):
""" Same as self.update_text() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def send_keys(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
""" Same as self.add_text() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.add_text(selector, text, by=by, timeout=timeout)
def click_link(self, link_text, timeout=None):
""" Same as self.click_link_text() """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.click_link_text(link_text, timeout=timeout)
def click_partial_link(self, partial_link_text, timeout=None):
""" Same as self.click_partial_link_text() """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.click_partial_link_text(partial_link_text, timeout=timeout)
def wait_for_element_visible(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
""" Same as self.wait_for_element() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_element_visible(selector)
return page_actions.wait_for_element_visible(
self.driver, selector, by, timeout
)
def wait_for_element_not_present(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Same as self.wait_for_element_absent()
Waits for an element to no longer appear in the HTML of a page.
A hidden element still counts as appearing in the page HTML.
If waiting for elements to be hidden instead of nonexistent,
use wait_for_element_not_visible() instead.
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_element_absent(
self.driver, selector, by, timeout
)
def assert_element_not_present(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Same as self.assert_element_absent()
Will raise an exception if the element stays present.
A hidden element counts as a present element, which fails this assert.
If you want to assert that elements are hidden instead of nonexistent,
use assert_element_not_visible() instead.
(Note that hidden elements are still present in the HTML of the page.)
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_absent(selector, by=by, timeout=timeout)
return True
def get_google_auth_password(self, totp_key=None):
""" Same as self.get_totp_code() """
return self.get_totp_code(totp_key=totp_key)
def get_google_auth_code(self, totp_key=None):
""" Same as self.get_totp_code() """
return self.get_totp_code(totp_key=totp_key)
def assert_no_broken_links(self, multithreaded=True):
""" Same as self.assert_no_404_errors() """
self.assert_no_404_errors(multithreaded=multithreaded)
def wait(self, seconds):
""" Same as self.sleep() - Some JS frameworks use this method name. """
self.sleep(seconds)
def block_ads(self):
""" Same as self.ad_block() """
self.ad_block()
def _print(self, msg):
"""Same as Python's print(), but won't print during multithreaded runs
because overlapping print() commands may lead to unexpected output.
In most cases, the print() command won't print for multithreaded tests,
but there are some exceptions, and this will take care of those.
Here's an example of running tests multithreaded: "pytest -n=4".
To force a print during multithreaded tests, use: "sys.stderr.write()".
To print without the new-line character end, use: "sys.stdout.write()".
"""
if not sb_config._multithreaded:
print(msg)
def start_tour(self, name=None, interval=0):
self.play_tour(name=name, interval=interval)
############
def add_css_link(self, css_link):
self.__check_scope()
js_utils.add_css_link(self.driver, css_link)
def add_js_link(self, js_link):
self.__check_scope()
js_utils.add_js_link(self.driver, js_link)
def add_css_style(self, css_style):
self.__check_scope()
js_utils.add_css_style(self.driver, css_style)
def add_js_code_from_link(self, js_link):
self.__check_scope()
js_utils.add_js_code_from_link(self.driver, js_link)
def add_js_code(self, js_code):
self.__check_scope()
js_utils.add_js_code(self.driver, js_code)
def add_meta_tag(self, http_equiv=None, content=None):
self.__check_scope()
js_utils.add_meta_tag(
self.driver, http_equiv=http_equiv, content=content
)
############
def create_presentation(
self, name=None, theme="default", transition="default"
):
"""Creates a Reveal-JS presentation that you can add slides to.
@Params
name - If creating multiple presentations at the same time,
use this to specify the name of the current presentation.
theme - Set a theme with a unique style for the presentation.
Valid themes: "serif" (default), "sky", "white", "black",
"simple", "league", "moon", "night",
"beige", "blood", and "solarized".
transition - Set a transition between slides.
Valid transitions: "none" (default), "slide", "fade",
"zoom", "convex", and "concave".
"""
if not name:
name = "default"
if not theme or theme == "default":
theme = "serif"
valid_themes = [
"serif",
"white",
"black",
"beige",
"simple",
"sky",
"league",
"moon",
"night",
"blood",
"solarized",
]
theme = theme.lower()
if theme not in valid_themes:
raise Exception(
"Theme {%s} not found! Valid themes: %s"
% (theme, valid_themes)
)
if not transition or transition == "default":
transition = "none"
valid_transitions = [
"none",
"slide",
"fade",
"zoom",
"convex",
"concave",
]
transition = transition.lower()
if transition not in valid_transitions:
raise Exception(
"Transition {%s} not found! Valid transitions: %s"
% (transition, valid_transitions)
)
reveal_theme_css = None
if theme == "serif":
reveal_theme_css = constants.Reveal.SERIF_MIN_CSS
elif theme == "sky":
reveal_theme_css = constants.Reveal.SKY_MIN_CSS
elif theme == "white":
reveal_theme_css = constants.Reveal.WHITE_MIN_CSS
elif theme == "black":
reveal_theme_css = constants.Reveal.BLACK_MIN_CSS
elif theme == "simple":
reveal_theme_css = constants.Reveal.SIMPLE_MIN_CSS
elif theme == "league":
reveal_theme_css = constants.Reveal.LEAGUE_MIN_CSS
elif theme == "moon":
reveal_theme_css = constants.Reveal.MOON_MIN_CSS
elif theme == "night":
reveal_theme_css = constants.Reveal.NIGHT_MIN_CSS
elif theme == "beige":
reveal_theme_css = constants.Reveal.BEIGE_MIN_CSS
elif theme == "blood":
reveal_theme_css = constants.Reveal.BLOOD_MIN_CSS
elif theme == "solarized":
reveal_theme_css = constants.Reveal.SOLARIZED_MIN_CSS
else:
# Use the default if unable to determine the theme
reveal_theme_css = constants.Reveal.SERIF_MIN_CSS
new_presentation = (
"<html>\n"
"<head>\n"
'<meta charset="utf-8">\n'
'<meta http-equiv="Content-Type" content="text/html">\n'
'<meta name="viewport" content="shrink-to-fit=no">\n'
'<link rel="stylesheet" href="%s">\n'
'<link rel="stylesheet" href="%s">\n'
"<style>\n"
"pre{background-color:#fbe8d4;border-radius:8px;}\n"
"div[flex_div]{height:68vh;margin:0;align-items:center;"
"justify-content:center;}\n"
"img[rounded]{border-radius:16px;max-width:64%%;}\n"
"</style>\n"
"</head>\n\n"
"<body>\n"
"<!-- Generated by SeleniumBase - https://seleniumbase.io -->\n"
'<div class="reveal">\n'
'<div class="slides">\n'
% (constants.Reveal.MIN_CSS, reveal_theme_css)
)
self._presentation_slides[name] = []
self._presentation_slides[name].append(new_presentation)
self._presentation_transition[name] = transition
def add_slide(
self,
content=None,
image=None,
code=None,
iframe=None,
content2=None,
notes=None,
transition=None,
name=None,
):
"""Allows the user to add slides to a presentation.
@Params
content - The HTML content to display on the presentation slide.
image - Attach an image (from a URL link) to the slide.
code - Attach code of any programming language to the slide.
Language-detection will be used to add syntax formatting.
iframe - Attach an iFrame (from a URL link) to the slide.
content2 - HTML content to display after adding an image or code.
notes - Additional notes to include with the slide.
ONLY SEEN if show_notes is set for the presentation.
transition - Set a transition between slides. (overrides previous)
Valid transitions: "none" (default), "slide", "fade",
"zoom", "convex", and "concave".
name - If creating multiple presentations at the same time,
use this to select the presentation to add slides to.
"""
if not name:
name = "default"
if name not in self._presentation_slides:
# Create a presentation if it doesn't already exist
self.create_presentation(name=name)
if not content:
content = ""
if not content2:
content2 = ""
if not notes:
notes = ""
if not transition:
transition = self._presentation_transition[name]
elif transition == "default":
transition = "none"
valid_transitions = [
"none",
"slide",
"fade",
"zoom",
"convex",
"concave",
]
transition = transition.lower()
if transition not in valid_transitions:
raise Exception(
"Transition {%s} not found! Valid transitions: %s"
"" % (transition, valid_transitions)
)
add_line = ""
if content.startswith("<"):
add_line = "\n"
html = '\n<section data-transition="%s">%s%s' % (
transition,
add_line,
content,
)
if image:
html += '\n<div flex_div><img rounded src="%s" /></div>' % image
if code:
html += "\n<div></div>"
html += '\n<pre class="prettyprint">\n%s</pre>' % code
if iframe:
html += (
"\n<div></div>"
'\n<iframe src="%s" style="width:92%%;height:550px;" '
'title="iframe content"></iframe>' % iframe
)
add_line = ""
if content2.startswith("<"):
add_line = "\n"
if content2:
html += "%s%s" % (add_line, content2)
html += '\n<aside class="notes">%s</aside>' % notes
html += "\n</section>\n"
self._presentation_slides[name].append(html)
def save_presentation(
self, name=None, filename=None, show_notes=False, interval=0
):
"""Saves a Reveal-JS Presentation to a file for later use.
@Params
name - If creating multiple presentations at the same time,
use this to select the one you wish to use.
filename - The name of the HTML file that you wish to
save the presentation to. (filename must end in ".html")
show_notes - When set to True, the Notes feature becomes enabled,
which allows presenters to see notes next to slides.
interval - The delay time between autoplaying slides. (in seconds)
If set to 0 (default), autoplay is disabled.
"""
if not name:
name = "default"
if not filename:
filename = "my_presentation.html"
if name not in self._presentation_slides:
raise Exception("Presentation {%s} does not exist!" % name)
if not filename.endswith(".html"):
raise Exception('Presentation file must end in ".html"!')
if not interval:
interval = 0
if interval == 0 and self.interval:
interval = float(self.interval)
if not type(interval) is int and not type(interval) is float:
raise Exception('Expecting a numeric value for "interval"!')
if interval < 0:
raise Exception('The "interval" cannot be a negative number!')
interval_ms = float(interval) * 1000.0
show_notes_str = "false"
if show_notes:
show_notes_str = "true"
the_html = ""
for slide in self._presentation_slides[name]:
the_html += slide
the_html += (
"\n</div>\n"
"</div>\n"
'<script src="%s"></script>\n'
'<script src="%s"></script>\n'
"<script>Reveal.initialize("
"{showNotes: %s, slideNumber: true, progress: true, hash: false, "
"autoSlide: %s,});"
"</script>\n"
"</body>\n"
"</html>\n"
% (
constants.Reveal.MIN_JS,
constants.PrettifyJS.RUN_PRETTIFY_JS,
show_notes_str,
interval_ms,
)
)
# Remove duplicate ChartMaker library declarations
chart_libs = """
<script src="%s"></script>
<script src="%s"></script>
<script src="%s"></script>
<script src="%s"></script>
""" % (
constants.HighCharts.HC_JS,
constants.HighCharts.EXPORTING_JS,
constants.HighCharts.EXPORT_DATA_JS,
constants.HighCharts.ACCESSIBILITY_JS,
)
if the_html.count(chart_libs) > 1:
chart_libs_comment = "<!-- HighCharts Libraries Imported -->"
the_html = the_html.replace(chart_libs, chart_libs_comment)
# Only need to import the HighCharts libraries once
the_html = the_html.replace(chart_libs_comment, chart_libs, 1)
saved_presentations_folder = constants.Presentations.SAVED_FOLDER
if saved_presentations_folder.endswith("/"):
saved_presentations_folder = saved_presentations_folder[:-1]
if not os.path.exists(saved_presentations_folder):
try:
os.makedirs(saved_presentations_folder)
except Exception:
pass
file_path = saved_presentations_folder + "/" + filename
out_file = codecs.open(file_path, "w+", encoding="utf-8")
out_file.writelines(the_html)
out_file.close()
print("\n>>> [%s] was saved!\n" % file_path)
return file_path
def begin_presentation(
self, name=None, filename=None, show_notes=False, interval=0
):
"""Begin a Reveal-JS Presentation in the web browser.
@Params
name - If creating multiple presentations at the same time,
use this to select the one you wish to use.
filename - The name of the HTML file that you wish to
save the presentation to. (filename must end in ".html")
show_notes - When set to True, the Notes feature becomes enabled,
which allows presenters to see notes next to slides.
interval - The delay time between autoplaying slides. (in seconds)
If set to 0 (default), autoplay is disabled.
"""
if self.headless or self.xvfb:
return # Presentations should not run in headless mode.
if not name:
name = "default"
if not filename:
filename = "my_presentation.html"
if name not in self._presentation_slides:
raise Exception("Presentation {%s} does not exist!" % name)
if not filename.endswith(".html"):
raise Exception('Presentation file must end in ".html"!')
if not interval:
interval = 0
if interval == 0 and self.interval:
interval = float(self.interval)
if not type(interval) is int and not type(interval) is float:
raise Exception('Expecting a numeric value for "interval"!')
if interval < 0:
raise Exception('The "interval" cannot be a negative number!')
end_slide = (
'\n<section data-transition="none">\n'
'<p class="End_Presentation_Now"> </p>\n</section>\n'
)
self._presentation_slides[name].append(end_slide)
file_path = self.save_presentation(
name=name,
filename=filename,
show_notes=show_notes,
interval=interval,
)
self._presentation_slides[name].pop()
self.open_html_file(file_path)
presentation_folder = constants.Presentations.SAVED_FOLDER
try:
while (
len(self.driver.window_handles) > 0
and presentation_folder in self.get_current_url()
):
time.sleep(0.05)
if self.is_element_visible(
"section.present p.End_Presentation_Now"
):
break
time.sleep(0.05)
except Exception:
pass
############
def create_pie_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript pie chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "pie"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
libs=libs,
labels=labels,
legend=legend,
)
def create_bar_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript bar chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "bar"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
libs=libs,
labels=labels,
legend=legend,
)
def create_column_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript column chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "column"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
libs=libs,
labels=labels,
legend=legend,
)
def create_line_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
zero=False,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript line chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
zero - If True, the y-axis always starts at 0. (Default: False).
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "line"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
zero=zero,
libs=libs,
labels=labels,
legend=legend,
)
def create_area_chart(
self,
chart_name=None,
title=None,
subtitle=None,
data_name=None,
unit=None,
zero=False,
libs=True,
labels=True,
legend=True,
):
"""Creates a JavaScript area chart using "HighCharts".
@Params
chart_name - If creating multiple charts,
use this to select which one.
title - The title displayed for the chart.
subtitle - The subtitle displayed for the chart.
data_name - The series name. Useful for multi-series charts.
If no data_name, will default to using "Series 1".
unit - The description label given to the chart's y-axis values.
zero - If True, the y-axis always starts at 0. (Default: False).
libs - The option to include Chart libraries (JS and CSS files).
Should be set to True (default) for the first time creating
a chart on a web page. If creating multiple charts on the
same web page, you won't need to re-import the libraries
when creating additional charts.
labels - If True, displays labels on the chart for data points.
legend - If True, displays the data point legend on the chart.
"""
if not chart_name:
chart_name = "default"
if not data_name:
data_name = ""
style = "area"
self.__create_highchart(
chart_name=chart_name,
title=title,
subtitle=subtitle,
style=style,
data_name=data_name,
unit=unit,
zero=zero,
libs=libs,
labels=labels,
legend=legend,
)
def __create_highchart(
self,
chart_name=None,
title=None,
subtitle=None,
style=None,
data_name=None,
unit=None,
zero=False,
libs=True,
labels=True,
legend=True,
):
""" Creates a JavaScript chart using the "HighCharts" library. """
if not chart_name:
chart_name = "default"
if not title:
title = ""
if not subtitle:
subtitle = ""
if not style:
style = "pie"
if not data_name:
data_name = "Series 1"
if not unit:
unit = "Values"
if labels:
labels = "true"
else:
labels = "false"
if legend:
legend = "true"
else:
legend = "false"
title = title.replace("'", "\\'")
subtitle = subtitle.replace("'", "\\'")
unit = unit.replace("'", "\\'")
self._chart_count += 1
# If chart_libs format is changed, also change: save_presentation()
chart_libs = """
<script src="%s"></script>
<script src="%s"></script>
<script src="%s"></script>
<script src="%s"></script>
""" % (
constants.HighCharts.HC_JS,
constants.HighCharts.EXPORTING_JS,
constants.HighCharts.EXPORT_DATA_JS,
constants.HighCharts.ACCESSIBILITY_JS,
)
if not libs:
chart_libs = ""
chart_css = """
<style>
.highcharts-figure, .highcharts-data-table table {
min-width: 320px;
max-width: 660px;
margin: 1em auto;
}
.highcharts-data-table table {
font-family: Verdana, sans-serif;
border-collapse: collapse;
border: 1px solid #EBEBEB;
margin: 10px auto;
text-align: center;
width: 100%;
max-width: 500px;
}
.highcharts-data-table caption {
padding: 1em 0;
font-size: 1.2em;
color: #555;
}
.highcharts-data-table th {
font-weight: 600;
padding: 0.5em;
}
.highcharts-data-table td, .highcharts-data-table th,
.highcharts-data-table caption {
padding: 0.5em;
}
.highcharts-data-table thead tr,
.highcharts-data-table tr:nth-child(even) {
background: #f8f8f8;
}
.highcharts-data-table tr:hover {
background: #f1f7ff;
}
</style>
"""
if not libs:
chart_css = ""
chart_description = ""
chart_figure = """
<figure class="highcharts-figure">
<div id="chartcontainer_num_%s"></div>
<p class="highcharts-description">%s</p>
</figure>
""" % (
self._chart_count,
chart_description,
)
min_zero = ""
if zero:
min_zero = "min: 0,"
chart_init_1 = """
<script>
// Build the chart
Highcharts.chart('chartcontainer_num_%s', {
credits: {
enabled: false
},
title: {
text: '%s'
},
subtitle: {
text: '%s'
},
xAxis: { },
yAxis: {
%s
title: {
text: '%s',
style: {
fontSize: '14px'
}
},
labels: {
useHTML: true,
style: {
fontSize: '14px'
}
}
},
chart: {
renderTo: 'statusChart',
plotBackgroundColor: null,
plotBorderWidth: null,
plotShadow: false,
type: '%s'
},
""" % (
self._chart_count,
title,
subtitle,
min_zero,
unit,
style,
)
# "{series.name}:"
point_format = (
r"<b>{point.y}</b><br />" r"<b>{point.percentage:.1f}%</b>"
)
if style != "pie":
point_format = r"<b>{point.y}</b>"
chart_init_2 = (
"""
tooltip: {
enabled: true,
useHTML: true,
style: {
padding: '6px',
fontSize: '14px'
},
backgroundColor: {
linearGradient: {
x1: 0,
y1: 0,
x2: 0,
y2: 1
},
stops: [
[0, 'rgba(255, 255, 255, 0.78)'],
[0.5, 'rgba(235, 235, 235, 0.76)'],
[1, 'rgba(244, 252, 255, 0.74)']
]
},
hideDelay: 40,
pointFormat: '%s'
},
"""
% point_format
)
chart_init_3 = """
accessibility: {
point: {
valueSuffix: '%%'
}
},
plotOptions: {
series: {
states: {
inactive: {
opacity: 0.85
}
}
},
pie: {
size: "95%%",
allowPointSelect: true,
animation: false,
cursor: 'pointer',
dataLabels: {
enabled: %s,
formatter: function() {
if (this.y > 0) {
return this.point.name + ': ' + this.point.y
}
}
},
states: {
hover: {
enabled: true
}
},
showInLegend: %s
}
},
""" % (
labels,
legend,
)
if style != "pie":
chart_init_3 = """
allowPointSelect: true,
cursor: 'pointer',
legend: {
layout: 'vertical',
align: 'right',
verticalAlign: 'middle'
},
states: {
hover: {
enabled: true
}
},
plotOptions: {
series: {
dataLabels: {
enabled: %s
},
showInLegend: %s,
animation: false,
shadow: false,
lineWidth: 3,
fillOpacity: 0.5,
marker: {
enabled: true
}
}
},
""" % (
labels,
legend,
)
chart_init = chart_init_1 + chart_init_2 + chart_init_3
color_by_point = "true"
if style != "pie":
color_by_point = "false"
series = """
series: [{
name: '%s',
colorByPoint: %s,
data: [
""" % (
data_name,
color_by_point,
)
new_chart = chart_libs + chart_css + chart_figure + chart_init + series
self._chart_data[chart_name] = []
self._chart_label[chart_name] = []
self._chart_data[chart_name].append(new_chart)
self._chart_first_series[chart_name] = True
self._chart_series_count[chart_name] = 1
def add_series_to_chart(self, data_name=None, chart_name=None):
"""Add a new data series to an existing chart.
This allows charts to have multiple data sets.
@Params
data_name - Set the series name. Useful for multi-series charts.
chart_name - If creating multiple charts,
use this to select which one.
"""
if not chart_name:
chart_name = "default"
self._chart_series_count[chart_name] += 1
if not data_name:
data_name = "Series %s" % self._chart_series_count[chart_name]
series = (
"""
]
},
{
name: '%s',
colorByPoint: false,
data: [
"""
% data_name
)
self._chart_data[chart_name].append(series)
self._chart_first_series[chart_name] = False
def add_data_point(self, label, value, color=None, chart_name=None):
"""Add a data point to a SeleniumBase-generated chart.
@Params
label - The label name for the data point.
value - The numeric value of the data point.
color - The HTML color of the data point.
Can be an RGB color. Eg: "#55ACDC".
Can also be a named color. Eg: "Teal".
chart_name - If creating multiple charts,
use this to select which one.
"""
if not chart_name:
chart_name = "default"
if chart_name not in self._chart_data:
# Create a chart if it doesn't already exist
self.create_pie_chart(chart_name=chart_name)
if not value:
value = 0
if not type(value) is int and not type(value) is float:
raise Exception('Expecting a numeric value for "value"!')
if not color:
color = ""
label = label.replace("'", "\\'")
color = color.replace("'", "\\'")
data_point = """
{
name: '%s',
y: %s,
color: '%s'
},
""" % (
label,
value,
color,
)
self._chart_data[chart_name].append(data_point)
if self._chart_first_series[chart_name]:
self._chart_label[chart_name].append(label)
def save_chart(self, chart_name=None, filename=None, folder=None):
"""Saves a SeleniumBase-generated chart to a file for later use.
@Params
chart_name - If creating multiple charts at the same time,
use this to select the one you wish to use.
filename - The name of the HTML file that you wish to
save the chart to. (filename must end in ".html")
folder - The name of the folder where you wish to
save the HTML file. (Default: "./saved_charts/")
"""
if not chart_name:
chart_name = "default"
if not filename:
filename = "my_chart.html"
if chart_name not in self._chart_data:
raise Exception("Chart {%s} does not exist!" % chart_name)
if not filename.endswith(".html"):
raise Exception('Chart file must end in ".html"!')
the_html = '<meta charset="utf-8">\n'
the_html += '<meta http-equiv="Content-Type" content="text/html">\n'
the_html += '<meta name="viewport" content="shrink-to-fit=no">\n'
for chart_data_point in self._chart_data[chart_name]:
the_html += chart_data_point
the_html += """
]
}]
});
</script>
"""
axis = "xAxis: {\n"
axis += " labels: {\n"
axis += " useHTML: true,\n"
axis += " style: {\n"
axis += " fontSize: '14px',\n"
axis += " },\n"
axis += " },\n"
axis += " categories: ["
for label in self._chart_label[chart_name]:
axis += "'%s'," % label
axis += "], crosshair: false},"
the_html = the_html.replace("xAxis: { },", axis)
if not folder:
saved_charts_folder = constants.Charts.SAVED_FOLDER
else:
saved_charts_folder = folder
if saved_charts_folder.endswith("/"):
saved_charts_folder = saved_charts_folder[:-1]
if not os.path.exists(saved_charts_folder):
try:
os.makedirs(saved_charts_folder)
except Exception:
pass
file_path = saved_charts_folder + "/" + filename
out_file = codecs.open(file_path, "w+", encoding="utf-8")
out_file.writelines(the_html)
out_file.close()
print("\n>>> [%s] was saved!" % file_path)
return file_path
def display_chart(self, chart_name=None, filename=None, interval=0):
"""Displays a SeleniumBase-generated chart in the browser window.
@Params
chart_name - If creating multiple charts at the same time,
use this to select the one you wish to use.
filename - The name of the HTML file that you wish to
save the chart to. (filename must end in ".html")
interval - The delay time for auto-advancing charts. (in seconds)
If set to 0 (default), auto-advancing is disabled.
"""
if self.headless or self.xvfb:
interval = 1 # Race through chart if running in headless mode
if not chart_name:
chart_name = "default"
if not filename:
filename = "my_chart.html"
if not interval:
interval = 0
if interval == 0 and self.interval:
interval = float(self.interval)
if not type(interval) is int and not type(interval) is float:
raise Exception('Expecting a numeric value for "interval"!')
if interval < 0:
raise Exception('The "interval" cannot be a negative number!')
if chart_name not in self._chart_data:
raise Exception("Chart {%s} does not exist!" % chart_name)
if not filename.endswith(".html"):
raise Exception('Chart file must end in ".html"!')
file_path = self.save_chart(chart_name=chart_name, filename=filename)
self.open_html_file(file_path)
chart_folder = constants.Charts.SAVED_FOLDER
if interval == 0:
try:
print("\n*** Close the browser window to continue ***")
# Will also continue if manually navigating to a new page
while len(self.driver.window_handles) > 0 and (
chart_folder in self.get_current_url()
):
time.sleep(0.05)
except Exception:
pass
else:
try:
start_ms = time.time() * 1000.0
stop_ms = start_ms + (interval * 1000.0)
for x in range(int(interval * 10)):
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
if len(self.driver.window_handles) == 0:
break
if chart_folder not in self.get_current_url():
break
time.sleep(0.1)
except Exception:
pass
def extract_chart(self, chart_name=None):
"""Extracts the HTML from a SeleniumBase-generated chart.
@Params
chart_name - If creating multiple charts at the same time,
use this to select the one you wish to use.
"""
if not chart_name:
chart_name = "default"
if chart_name not in self._chart_data:
raise Exception("Chart {%s} does not exist!" % chart_name)
the_html = ""
for chart_data_point in self._chart_data[chart_name]:
the_html += chart_data_point
the_html += """
]
}]
});
</script>
"""
axis = "xAxis: {\n"
axis += " labels: {\n"
axis += " useHTML: true,\n"
axis += " style: {\n"
axis += " fontSize: '14px',\n"
axis += " },\n"
axis += " },\n"
axis += " categories: ["
for label in self._chart_label[chart_name]:
axis += "'%s'," % label
axis += "], crosshair: false},"
the_html = the_html.replace("xAxis: { },", axis)
self._chart_xcount += 1
the_html = the_html.replace(
"chartcontainer_num_", "chartcontainer_%s_" % self._chart_xcount
)
return the_html
############
def create_tour(self, name=None, theme=None):
"""Creates a guided tour for any website.
The default theme is the IntroJS Library.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
theme - Sets the default theme for the website tour. Available themes:
"Bootstrap", "DriverJS", "Hopscotch", "IntroJS", "Shepherd".
The "Shepherd" library also contains multiple variation themes:
"light"/"arrows", "dark", "default", "square", "square-dark".
"""
if not name:
name = "default"
if theme:
if theme.lower() == "bootstrap":
self.create_bootstrap_tour(name)
elif theme.lower() == "hopscotch":
self.create_hopscotch_tour(name)
elif theme.lower() == "intro":
self.create_introjs_tour(name)
elif theme.lower() == "introjs":
self.create_introjs_tour(name)
elif theme.lower() == "driver":
self.create_driverjs_tour(name)
elif theme.lower() == "driverjs":
self.create_driverjs_tour(name)
elif theme.lower() == "shepherd":
self.create_shepherd_tour(name, theme="light")
elif theme.lower() == "light":
self.create_shepherd_tour(name, theme="light")
elif theme.lower() == "arrows":
self.create_shepherd_tour(name, theme="light")
elif theme.lower() == "dark":
self.create_shepherd_tour(name, theme="dark")
elif theme.lower() == "square":
self.create_shepherd_tour(name, theme="square")
elif theme.lower() == "square-dark":
self.create_shepherd_tour(name, theme="square-dark")
elif theme.lower() == "default":
self.create_shepherd_tour(name, theme="default")
else:
self.create_introjs_tour(name)
else:
self.create_introjs_tour(name)
def create_shepherd_tour(self, name=None, theme=None):
"""Creates a Shepherd JS website tour.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
theme - Sets the default theme for the tour.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("light" is used if None is selected.)
"""
shepherd_theme = "shepherd-theme-arrows"
if theme:
if theme.lower() == "default":
shepherd_theme = "shepherd-theme-default"
elif theme.lower() == "dark":
shepherd_theme = "shepherd-theme-dark"
elif theme.lower() == "light":
shepherd_theme = "shepherd-theme-arrows"
elif theme.lower() == "arrows":
shepherd_theme = "shepherd-theme-arrows"
elif theme.lower() == "square":
shepherd_theme = "shepherd-theme-square"
elif theme.lower() == "square-dark":
shepherd_theme = "shepherd-theme-square-dark"
if not name:
name = "default"
new_tour = (
"""
// Shepherd Tour
var tour = new Shepherd.Tour({
defaults: {
classes: '%s',
scrollTo: true
}
});
var allButtons = {
skip: {
text: "Skip",
action: tour.cancel,
classes: 'shepherd-button-secondary tour-button-left'
},
back: {
text: "Back",
action: tour.back,
classes: 'shepherd-button-secondary'
},
next: {
text: "Next",
action: tour.next,
classes: 'shepherd-button-primary tour-button-right'
},
};
var firstStepButtons = [allButtons.skip, allButtons.next];
var midTourButtons = [allButtons.back, allButtons.next];
"""
% shepherd_theme
)
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_bootstrap_tour(self, name=None):
"""Creates a Bootstrap tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not name:
name = "default"
new_tour = """
// Bootstrap Tour
var tour = new Tour({
container: 'body',
animation: true,
keyboard: true,
orphan: true,
smartPlacement: true,
autoscroll: true,
backdrop: true,
backdropContainer: 'body',
backdropPadding: 3,
});
tour.addSteps([
"""
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_driverjs_tour(self, name=None):
"""Creates a DriverJS tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not name:
name = "default"
new_tour = """
// DriverJS Tour
var tour = new Driver({
opacity: 0.24, // Background opacity (0: no popover / overlay)
padding: 6, // Distance of element from around the edges
allowClose: false, // Whether clicking on overlay should close
overlayClickNext: false, // Move to next step on overlay click
doneBtnText: 'Done', // Text that appears on the Done button
closeBtnText: 'Close', // Text appearing on the Close button
nextBtnText: 'Next', // Text that appears on the Next button
prevBtnText: 'Previous', // Text appearing on Previous button
showButtons: true, // This shows control buttons in the footer
keyboardControl: true, // (escape to close, arrow keys to move)
animate: true, // Animate while changing highlighted element
});
tour.defineSteps([
"""
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_hopscotch_tour(self, name=None):
"""Creates a Hopscotch tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not name:
name = "default"
new_tour = """
// Hopscotch Tour
var tour = {
id: "hopscotch_tour",
steps: [
"""
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_introjs_tour(self, name=None):
"""Creates an IntroJS tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not hasattr(sb_config, "introjs_theme_color"):
sb_config.introjs_theme_color = constants.TourColor.theme_color
if not hasattr(sb_config, "introjs_hover_color"):
sb_config.introjs_hover_color = constants.TourColor.hover_color
if not name:
name = "default"
new_tour = """
// IntroJS Tour
function startIntro(){
var intro = introJs();
intro.setOptions({
steps: [
"""
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def set_introjs_colors(self, theme_color=None, hover_color=None):
"""Use this method to set the theme colors for IntroJS tours.
Args must be hex color values that start with a "#" sign.
If a color isn't specified, the color will reset to the default.
The border color of buttons is set to the hover color.
@Params
theme_color - The color of buttons.
hover_color - The color of buttons after hovering over them.
"""
if not hasattr(sb_config, "introjs_theme_color"):
sb_config.introjs_theme_color = constants.TourColor.theme_color
if not hasattr(sb_config, "introjs_hover_color"):
sb_config.introjs_hover_color = constants.TourColor.hover_color
if theme_color:
match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', theme_color)
if not match:
raise Exception(
'Expecting a hex value color that starts with "#"!')
sb_config.introjs_theme_color = theme_color
else:
sb_config.introjs_theme_color = constants.TourColor.theme_color
if hover_color:
match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', hover_color)
if not match:
raise Exception(
'Expecting a hex value color that starts with "#"!')
sb_config.introjs_hover_color = hover_color
else:
sb_config.introjs_hover_color = constants.TourColor.hover_color
def add_tour_step(
self,
message,
selector=None,
name=None,
title=None,
theme=None,
alignment=None,
duration=None,
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
theme - (Shepherd Tours ONLY) The styling of the tour step.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("arrows" is used if None is selected.)
alignment - Choose from "top", "bottom", "left", and "right".
("top" is default, except for Hopscotch and DriverJS).
duration - (Bootstrap Tours ONLY) The amount of time, in seconds,
before automatically advancing to the next tour step.
"""
if not selector:
selector = "html"
if page_utils.is_name_selector(selector):
name = page_utils.get_name_from_selector(selector)
selector = '[name="%s"]' % name
if page_utils.is_xpath_selector(selector):
selector = self.convert_to_css_selector(selector, By.XPATH)
selector = self.__escape_quotes_if_needed(selector)
if not name:
name = "default"
if name not in self._tour_steps:
# By default, will create an IntroJS tour if no tours exist
self.create_tour(name=name, theme="introjs")
if not title:
title = ""
title = self.__escape_quotes_if_needed(title)
if message:
message = self.__escape_quotes_if_needed(message)
else:
message = ""
if not alignment or alignment not in [
"top",
"bottom",
"left",
"right",
]:
t_name = self._tour_steps[name][0]
if "Hopscotch" not in t_name and "DriverJS" not in t_name:
alignment = "top"
else:
alignment = "bottom"
if "Bootstrap" in self._tour_steps[name][0]:
self.__add_bootstrap_tour_step(
message,
selector=selector,
name=name,
title=title,
alignment=alignment,
duration=duration,
)
elif "DriverJS" in self._tour_steps[name][0]:
self.__add_driverjs_tour_step(
message,
selector=selector,
name=name,
title=title,
alignment=alignment,
)
elif "Hopscotch" in self._tour_steps[name][0]:
self.__add_hopscotch_tour_step(
message,
selector=selector,
name=name,
title=title,
alignment=alignment,
)
elif "IntroJS" in self._tour_steps[name][0]:
self.__add_introjs_tour_step(
message,
selector=selector,
name=name,
title=title,
alignment=alignment,
)
else:
self.__add_shepherd_tour_step(
message,
selector=selector,
name=name,
title=title,
theme=theme,
alignment=alignment,
)
def __add_shepherd_tour_step(
self,
message,
selector=None,
name=None,
title=None,
theme=None,
alignment=None,
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
theme - (Shepherd Tours ONLY) The styling of the tour step.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("arrows" is used if None is selected.)
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
"""
if theme == "default":
shepherd_theme = "shepherd-theme-default"
elif theme == "dark":
shepherd_theme = "shepherd-theme-dark"
elif theme == "light":
shepherd_theme = "shepherd-theme-arrows"
elif theme == "arrows":
shepherd_theme = "shepherd-theme-arrows"
elif theme == "square":
shepherd_theme = "shepherd-theme-square"
elif theme == "square-dark":
shepherd_theme = "shepherd-theme-square-dark"
else:
shepherd_base_theme = re.search(
r"[\S\s]+classes: '([\S\s]+)',[\S\s]+",
self._tour_steps[name][0],
).group(1)
shepherd_theme = shepherd_base_theme
shepherd_classes = shepherd_theme
if selector == "html":
shepherd_classes += " shepherd-orphan"
buttons = "firstStepButtons"
if len(self._tour_steps[name]) > 1:
buttons = "midTourButtons"
step = """tour.addStep('%s', {
title: '%s',
classes: '%s',
text: '%s',
attachTo: {element: '%s', on: '%s'},
buttons: %s,
advanceOn: '.docs-link click'
});""" % (
name,
title,
shepherd_classes,
message,
selector,
alignment,
buttons,
)
self._tour_steps[name].append(step)
def __add_bootstrap_tour_step(
self,
message,
selector=None,
name=None,
title=None,
alignment=None,
duration=None,
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
duration - (Bootstrap Tours ONLY) The amount of time, in seconds,
before automatically advancing to the next tour step.
"""
if selector != "html":
selector = self.__make_css_match_first_element_only(selector)
element_row = "element: '%s'," % selector
else:
element_row = ""
if not duration:
duration = "0"
else:
duration = str(float(duration) * 1000.0)
bd = "backdrop: true,"
if selector == "html":
bd = "backdrop: false,"
step = """{
%s
title: '%s',
content: '%s',
orphan: true,
autoscroll: true,
%s
placement: 'auto %s',
smartPlacement: true,
duration: %s,
},""" % (
element_row,
title,
message,
bd,
alignment,
duration,
)
self._tour_steps[name].append(step)
def __add_driverjs_tour_step(
self, message, selector=None, name=None, title=None, alignment=None
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
"""
message = (
'<font size="3" color="#33477B"><b>' + message + "</b></font>"
)
title_row = ""
if not title:
title_row = "title: '%s'," % message
message = ""
else:
title_row = "title: '%s'," % title
align_row = "position: '%s'," % alignment
ani_row = "animate: true,"
if not selector or selector == "html" or selector == "body":
selector = "body"
ani_row = "animate: false,"
align_row = "position: '%s'," % "mid-center"
element_row = "element: '%s'," % selector
desc_row = "description: '%s'," % message
step = """{
%s
%s
popover: {
className: 'popover-class',
%s
%s
%s
}
},""" % (
element_row,
ani_row,
title_row,
desc_row,
align_row,
)
self._tour_steps[name].append(step)
def __add_hopscotch_tour_step(
self, message, selector=None, name=None, title=None, alignment=None
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("bottom" is the default alignment).
"""
arrow_offset_row = None
if not selector or selector == "html":
selector = "head"
alignment = "bottom"
arrow_offset_row = "arrowOffset: '200',"
else:
arrow_offset_row = ""
step = """{
target: '%s',
title: '%s',
content: '%s',
%s
showPrevButton: 'true',
scrollDuration: '550',
placement: '%s'},
""" % (
selector,
title,
message,
arrow_offset_row,
alignment,
)
self._tour_steps[name].append(step)
def __add_introjs_tour_step(
self, message, selector=None, name=None, title=None, alignment=None
):
"""Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
"""
if selector != "html":
element_row = "element: '%s'," % selector
else:
element_row = ""
if title:
message = "<center><b>" + title + "</b></center><hr>" + message
message = '<font size="3" color="#33477B">' + message + "</font>"
step = """{%s
intro: '%s',
position: '%s'},""" % (
element_row,
message,
alignment,
)
self._tour_steps[name].append(step)
def play_tour(self, name=None, interval=0):
"""Plays a tour on the current website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
interval - The delay time between autoplaying tour steps. (Seconds)
If set to 0 (default), the tour is fully manual control.
"""
from seleniumbase.core import tour_helper
if self.headless or self.xvfb:
return # Tours should not run in headless mode.
self.wait_for_ready_state_complete()
if not interval:
interval = 0
if interval == 0 and self.interval:
interval = float(self.interval)
if not name:
name = "default"
if name not in self._tour_steps:
raise Exception("Tour {%s} does not exist!" % name)
if "Bootstrap" in self._tour_steps[name][0]:
tour_helper.play_bootstrap_tour(
self.driver,
self._tour_steps,
self.browser,
self.message_duration,
name=name,
interval=interval,
)
elif "DriverJS" in self._tour_steps[name][0]:
tour_helper.play_driverjs_tour(
self.driver,
self._tour_steps,
self.browser,
self.message_duration,
name=name,
interval=interval,
)
elif "Hopscotch" in self._tour_steps[name][0]:
tour_helper.play_hopscotch_tour(
self.driver,
self._tour_steps,
self.browser,
self.message_duration,
name=name,
interval=interval,
)
elif "IntroJS" in self._tour_steps[name][0]:
tour_helper.play_introjs_tour(
self.driver,
self._tour_steps,
self.browser,
self.message_duration,
name=name,
interval=interval,
)
else:
# "Shepherd"
tour_helper.play_shepherd_tour(
self.driver,
self._tour_steps,
self.message_duration,
name=name,
interval=interval,
)
def export_tour(self, name=None, filename="my_tour.js", url=None):
"""Exports a tour as a JS file.
You can call self.export_tour() anywhere where you would
normally use self.play_tour() to play a website tour.
It will include necessary resources as well, such as jQuery.
You'll be able to copy the tour directly into the Console of
any web browser to play the tour outside of SeleniumBase runs.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
filename - The name of the JavaScript file that you wish to
save the tour to.
url - The URL where the tour starts. If not specified, the URL
of the current page will be used.
"""
from seleniumbase.core import tour_helper
if not url:
url = self.get_current_url()
tour_helper.export_tour(
self._tour_steps, name=name, filename=filename, url=url
)
############
def activate_jquery_confirm(self):
""" See https://craftpip.github.io/jquery-confirm/ for usage. """
self.__check_scope()
js_utils.activate_jquery_confirm(self.driver)
self.wait_for_ready_state_complete()
def set_jqc_theme(self, theme, color=None, width=None):
""" Sets the default jquery-confirm theme and width (optional).
Available themes: "bootstrap", "modern", "material", "supervan",
"light", "dark", and "seamless".
Available colors: (This sets the BORDER color, NOT the button color.)
"blue", "default", "green", "red", "purple", "orange", "dark".
Width can be set using percent or pixels. Eg: "36.0%", "450px".
"""
if not self.__changed_jqc_theme:
self.__jqc_default_theme = constants.JqueryConfirm.DEFAULT_THEME
self.__jqc_default_color = constants.JqueryConfirm.DEFAULT_COLOR
self.__jqc_default_width = constants.JqueryConfirm.DEFAULT_WIDTH
valid_themes = [
"bootstrap",
"modern",
"material",
"supervan",
"light",
"dark",
"seamless",
]
if theme.lower() not in valid_themes:
raise Exception(
"%s is not a valid jquery-confirm theme! "
"Select from %s" % (theme.lower(), valid_themes)
)
constants.JqueryConfirm.DEFAULT_THEME = theme.lower()
if color:
valid_colors = [
"blue",
"default",
"green",
"red",
"purple",
"orange",
"dark",
]
if color.lower() not in valid_colors:
raise Exception(
"%s is not a valid jquery-confirm border color! "
"Select from %s" % (color.lower(), valid_colors)
)
constants.JqueryConfirm.DEFAULT_COLOR = color.lower()
if width:
if type(width) is int or type(width) is float:
# Convert to a string if a number is given
width = str(width)
if width.isnumeric():
if int(width) <= 0:
raise Exception("Width must be set to a positive number!")
elif int(width) <= 100:
width = str(width) + "%"
else:
width = str(width) + "px" # Use pixels if width is > 100
if not width.endswith("%") and not width.endswith("px"):
raise Exception(
"jqc width must end with %% for percent or px for pixels!"
)
value = None
if width.endswith("%"):
value = width[:-1]
if width.endswith("px"):
value = width[:-2]
try:
value = float(value)
except Exception:
raise Exception("%s is not a numeric value!" % value)
if value <= 0:
raise Exception("%s is not a positive number!" % value)
constants.JqueryConfirm.DEFAULT_WIDTH = width
def reset_jqc_theme(self):
""" Resets the jqc theme settings to factory defaults. """
if self.__changed_jqc_theme:
constants.JqueryConfirm.DEFAULT_THEME = self.__jqc_default_theme
constants.JqueryConfirm.DEFAULT_COLOR = self.__jqc_default_color
constants.JqueryConfirm.DEFAULT_WIDTH = self.__jqc_default_width
self.__changed_jqc_theme = False
def get_jqc_button_input(self, message, buttons, options=None):
"""
Pop up a jquery-confirm box and return the text of the button clicked.
If running in headless mode, the last button text is returned.
@Params
message: The message to display in the jquery-confirm dialog.
buttons: A list of tuples for text and color.
Example: [("Yes!", "green"), ("No!", "red")]
Available colors: blue, green, red, orange, purple, default, dark.
A simple text string also works: "My Button". (Uses default color.)
options: A list of tuples for options to set.
Example: [("theme", "bootstrap"), ("width", "450px")]
Available theme options: bootstrap, modern, material, supervan,
light, dark, and seamless.
Available colors: (For the BORDER color, NOT the button color.)
"blue", "default", "green", "red", "purple", "orange", "dark".
Example option for changing the border color: ("color", "default")
Width can be set using percent or pixels. Eg: "36.0%", "450px".
"""
from seleniumbase.core import jqc_helper
if message and type(message) is not str:
raise Exception('Expecting a string for arg: "message"!')
if not type(buttons) is list and not type(buttons) is tuple:
raise Exception('Expecting a list or tuple for arg: "button"!')
if len(buttons) < 1:
raise Exception('List "buttons" requires at least one button!')
new_buttons = []
for button in buttons:
if (
(type(button) is list or type(button) is tuple) and (
len(button) == 1)
):
new_buttons.append(button[0])
elif (
(type(button) is list or type(button) is tuple) and (
len(button) > 1)
):
new_buttons.append((button[0], str(button[1]).lower()))
else:
new_buttons.append((str(button), ""))
buttons = new_buttons
if options:
for option in options:
if not type(option) is list and not type(option) is tuple:
raise Exception('"options" should be a list of tuples!')
if self.headless or self.xvfb:
return buttons[-1][0]
jqc_helper.jquery_confirm_button_dialog(
self.driver, message, buttons, options
)
self.sleep(0.02)
jf = "document.querySelector('.jconfirm-box').focus();"
try:
self.execute_script(jf)
except Exception:
pass
waiting_for_response = True
while waiting_for_response:
self.sleep(0.05)
jqc_open = self.execute_script(
"return jconfirm.instances.length"
)
if str(jqc_open) == "0":
break
self.sleep(0.1)
status = None
try:
status = self.execute_script("return $jqc_status")
except Exception:
status = self.execute_script(
"return jconfirm.lastButtonText"
)
return status
def get_jqc_text_input(self, message, button=None, options=None):
"""
Pop up a jquery-confirm box and return the text submitted by the input.
If running in headless mode, the text returned is "" by default.
@Params
message: The message to display in the jquery-confirm dialog.
button: A 2-item list or tuple for text and color. Or just the text.
Example: ["Submit", "blue"] -> (default button if not specified)
Available colors: blue, green, red, orange, purple, default, dark.
A simple text string also works: "My Button". (Uses default color.)
options: A list of tuples for options to set.
Example: [("theme", "bootstrap"), ("width", "450px")]
Available theme options: bootstrap, modern, material, supervan,
light, dark, and seamless.
Available colors: (For the BORDER color, NOT the button color.)
"blue", "default", "green", "red", "purple", "orange", "dark".
Example option for changing the border color: ("color", "default")
Width can be set using percent or pixels. Eg: "36.0%", "450px".
"""
from seleniumbase.core import jqc_helper
if message and type(message) is not str:
raise Exception('Expecting a string for arg: "message"!')
if button:
if (
(type(button) is list or type(button) is tuple) and (
len(button) == 1)
):
button = (str(button[0]), "")
elif (
(type(button) is list or type(button) is tuple) and (
len(button) > 1)
):
valid_colors = [
"blue",
"default",
"green",
"red",
"purple",
"orange",
"dark",
]
detected_color = str(button[1]).lower()
if str(button[1]).lower() not in valid_colors:
raise Exception(
"%s is an invalid jquery-confirm button color!\n"
"Select from %s" % (detected_color, valid_colors)
)
button = (str(button[0]), str(button[1]).lower())
else:
button = (str(button), "")
else:
button = ("Submit", "blue")
if options:
for option in options:
if not type(option) is list and not type(option) is tuple:
raise Exception('"options" should be a list of tuples!')
if self.headless or self.xvfb:
return ""
jqc_helper.jquery_confirm_text_dialog(
self.driver, message, button, options
)
self.sleep(0.02)
jf = "document.querySelector('.jconfirm-box input.jqc_input').focus();"
try:
self.execute_script(jf)
except Exception:
pass
waiting_for_response = True
while waiting_for_response:
self.sleep(0.05)
jqc_open = self.execute_script(
"return jconfirm.instances.length"
)
if str(jqc_open) == "0":
break
self.sleep(0.1)
status = None
try:
status = self.execute_script("return $jqc_input")
except Exception:
status = self.execute_script(
"return jconfirm.lastInputText"
)
return status
def get_jqc_form_inputs(self, message, buttons, options=None):
"""
Pop up a jquery-confirm box and return the input/button texts as tuple.
If running in headless mode, returns the ("", buttons[-1][0]) tuple.
@Params
message: The message to display in the jquery-confirm dialog.
buttons: A list of tuples for text and color.
Example: [("Yes!", "green"), ("No!", "red")]
Available colors: blue, green, red, orange, purple, default, dark.
A simple text string also works: "My Button". (Uses default color.)
options: A list of tuples for options to set.
Example: [("theme", "bootstrap"), ("width", "450px")]
Available theme options: bootstrap, modern, material, supervan,
light, dark, and seamless.
Available colors: (For the BORDER color, NOT the button color.)
"blue", "default", "green", "red", "purple", "orange", "dark".
Example option for changing the border color: ("color", "default")
Width can be set using percent or pixels. Eg: "36.0%", "450px".
"""
from seleniumbase.core import jqc_helper
if message and type(message) is not str:
raise Exception('Expecting a string for arg: "message"!')
if not type(buttons) is list and not type(buttons) is tuple:
raise Exception('Expecting a list or tuple for arg: "button"!')
if len(buttons) < 1:
raise Exception('List "buttons" requires at least one button!')
new_buttons = []
for button in buttons:
if (
(type(button) is list or type(button) is tuple) and (
len(button) == 1)
):
new_buttons.append(button[0])
elif (
(type(button) is list or type(button) is tuple) and (
len(button) > 1)
):
new_buttons.append((button[0], str(button[1]).lower()))
else:
new_buttons.append((str(button), ""))
buttons = new_buttons
if options:
for option in options:
if not type(option) is list and not type(option) is tuple:
raise Exception('"options" should be a list of tuples!')
if self.headless or self.xvfb:
return ("", buttons[-1][0])
jqc_helper.jquery_confirm_full_dialog(
self.driver, message, buttons, options
)
self.sleep(0.02)
jf = "document.querySelector('.jconfirm-box input.jqc_input').focus();"
try:
self.execute_script(jf)
except Exception:
pass
waiting_for_response = True
while waiting_for_response:
self.sleep(0.05)
jqc_open = self.execute_script(
"return jconfirm.instances.length"
)
if str(jqc_open) == "0":
break
self.sleep(0.1)
text_status = None
button_status = None
try:
text_status = self.execute_script("return $jqc_input")
button_status = self.execute_script("return $jqc_status")
except Exception:
text_status = self.execute_script(
"return jconfirm.lastInputText"
)
button_status = self.execute_script(
"return jconfirm.lastButtonText"
)
return (text_status, button_status)
############
def activate_messenger(self):
self.__check_scope()
js_utils.activate_messenger(self.driver)
self.wait_for_ready_state_complete()
def set_messenger_theme(
self, theme="default", location="default", max_messages="default"
):
"""Sets a theme for posting messages.
Themes: ["flat", "future", "block", "air", "ice"]
Locations: ["top_left", "top_center", "top_right",
"bottom_left", "bottom_center", "bottom_right"]
max_messages is the limit of concurrent messages to display.
"""
self.__check_scope()
if not theme:
theme = "default" # "flat"
if not location:
location = "default" # "bottom_right"
if not max_messages:
max_messages = "default" # "8"
else:
max_messages = str(max_messages) # Value must be in string format
js_utils.set_messenger_theme(
self.driver,
theme=theme,
location=location,
max_messages=max_messages,
)
def post_message(self, message, duration=None, pause=True, style="info"):
"""Post a message on the screen with Messenger.
Arguments:
message: The message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
style: "info", "success", or "error".
You can also post messages by using =>
self.execute_script('Messenger().post("My Message")')
"""
self.__check_scope()
if style not in ["info", "success", "error"]:
style = "info"
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
try:
js_utils.post_message(self.driver, message, duration, style=style)
except Exception:
print(" * %s message: %s" % (style.upper(), message))
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration))
def post_message_and_highlight(
self, message, selector, by=By.CSS_SELECTOR
):
"""Post a message on the screen and highlight an element.
Arguments:
message: The message to display.
selector: The selector of the Element to highlight.
by: The type of selector to search by. (Default: CSS Selector)
"""
self.__check_scope()
self.__highlight_with_assert_success(message, selector, by=by)
def post_success_message(self, message, duration=None, pause=True):
"""Post a success message on the screen with Messenger.
Arguments:
message: The success message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
"""
self.__check_scope()
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
try:
js_utils.post_message(
self.driver, message, duration, style="success"
)
except Exception:
print(" * SUCCESS message: %s" % message)
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration))
def post_error_message(self, message, duration=None, pause=True):
"""Post an error message on the screen with Messenger.
Arguments:
message: The error message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
"""
self.__check_scope()
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
try:
js_utils.post_message(
self.driver, message, duration, style="error"
)
except Exception:
print(" * ERROR message: %s" % message)
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration))
############
def generate_referral(self, start_page, destination_page, selector=None):
"""This method opens the start_page, creates a referral link there,
and clicks on that link, which goes to the destination_page.
If a selector is given, clicks that on the destination_page,
which can prevent an artificial rise in website bounce-rate.
(This generates real traffic for testing analytics software.)"""
self.__check_scope()
if not page_utils.is_valid_url(destination_page):
raise Exception(
"Exception: destination_page {%s} is not a valid URL!"
% destination_page
)
if start_page:
if not page_utils.is_valid_url(start_page):
raise Exception(
"Exception: start_page {%s} is not a valid URL! "
"(Use an empty string or None to start from current page.)"
% start_page
)
self.open(start_page)
time.sleep(0.08)
self.wait_for_ready_state_complete()
referral_link = (
"""<body>"""
"""<a class='analytics referral test' href='%s' """
"""style='font-family: Arial,sans-serif; """
"""font-size: 30px; color: #18a2cd'>"""
"""Magic Link Button</a></body>""" % destination_page
)
self.execute_script(
'''document.body.outerHTML = \"%s\"''' % referral_link
)
# Now click the generated button
self.click("a.analytics.referral.test", timeout=2)
time.sleep(0.15)
if selector:
self.click(selector)
time.sleep(0.15)
def generate_traffic(
self, start_page, destination_page, loops=1, selector=None
):
"""Similar to generate_referral(), but can do multiple loops.
If a selector is given, clicks that on the destination_page,
which can prevent an artificial rise in website bounce-rate."""
self.__check_scope()
for loop in range(loops):
self.generate_referral(
start_page, destination_page, selector=selector
)
time.sleep(0.05)
def generate_referral_chain(self, pages):
"""Use this method to chain the action of creating button links on
one website page that will take you to the next page.
(When you want to create a referral to a website for traffic
generation without increasing the bounce rate, you'll want to visit
at least one additional page on that site with a button click.)"""
self.__check_scope()
if not type(pages) is tuple and not type(pages) is list:
raise Exception(
"Exception: Expecting a list of website pages for chaining!"
)
if len(pages) < 2:
raise Exception(
"Exception: At least two website pages required for chaining!"
)
for page in pages:
# Find out if any of the web pages are invalid before continuing
if not page_utils.is_valid_url(page):
raise Exception(
"Exception: Website page {%s} is not a valid URL!" % page
)
for page in pages:
self.generate_referral(None, page)
def generate_traffic_chain(self, pages, loops=1):
""" Similar to generate_referral_chain(), but for multiple loops. """
self.__check_scope()
for loop in range(loops):
self.generate_referral_chain(pages)
time.sleep(0.05)
############
def wait_for_element_present(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Waits for an element to appear in the HTML of a page.
The element does not need be visible (it may be hidden)."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_element_present(selector)
return page_actions.wait_for_element_present(
self.driver, selector, by, timeout
)
def wait_for_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Waits for an element to appear in the HTML of a page.
The element must be visible (it cannot be hidden)."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_element_visible(selector)
return page_actions.wait_for_element_visible(
self.driver, selector, by, timeout
)
def get_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Same as wait_for_element_present() - returns the element.
The element does not need be visible (it may be hidden)."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return self.wait_for_element_present(selector, by=by, timeout=timeout)
def assert_element_present(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_element_present(), but returns nothing.
Waits for an element to appear in the HTML of a page.
The element does not need be visible (it may be hidden).
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if type(selector) is list:
self.assert_elements_present(selector, by=by, timeout=timeout)
return True
if self.__is_shadow_selector(selector):
self.__assert_shadow_element_present(selector)
return True
self.wait_for_element_present(selector, by=by, timeout=timeout)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_ep", selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_elements_present(self, *args, **kwargs):
"""Similar to self.assert_element_present(),
but can assert that multiple elements are present in the HTML.
The input is a list of elements.
Optional kwargs include "by" and "timeout" (used by all selectors).
Raises an exception if any of the elements are not visible.
Examples:
self.assert_elements_present("head", "style", "script", "body")
OR
self.assert_elements_present(["head", "body", "h1", "h2"])
"""
self.__check_scope()
selectors = []
timeout = None
by = By.CSS_SELECTOR
for kwarg in kwargs:
if kwarg == "timeout":
timeout = kwargs["timeout"]
elif kwarg == "by":
by = kwargs["by"]
elif kwarg == "selector":
selector = kwargs["selector"]
if type(selector) is str:
selectors.append(selector)
elif type(selector) is list:
selectors_list = selector
for selector in selectors_list:
if type(selector) is str:
selectors.append(selector)
else:
raise Exception('Unknown kwarg: "%s"!' % kwarg)
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
for arg in args:
if type(arg) is list:
for selector in arg:
if type(selector) is str:
selectors.append(selector)
elif type(arg) is str:
selectors.append(arg)
for selector in selectors:
if self.__is_shadow_selector(selector):
self.__assert_shadow_element_visible(selector)
continue
self.wait_for_element_present(selector, by=by, timeout=timeout)
continue
return True
def find_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Same as wait_for_element_visible() - returns the element """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(selector, by=by, timeout=timeout)
def assert_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
"""Similar to wait_for_element_visible(), but returns nothing.
As above, will raise an exception if nothing can be found.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if type(selector) is list:
self.assert_elements(selector, by=by, timeout=timeout)
return True
if self.__is_shadow_selector(selector):
self.__assert_shadow_element_visible(selector)
return True
self.wait_for_element_visible(selector, by=by, timeout=timeout)
if self.demo_mode:
selector, by = self.__recalculate_selector(
selector, by, xp_ok=False
)
a_t = "ASSERT"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert(self._language)
messenger_post = "%s %s: %s" % (a_t, by.upper(), selector)
self.__highlight_with_assert_success(messenger_post, selector, by)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_el", selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_element_visible(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Same as self.assert_element()
As above, will raise an exception if nothing can be found."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.assert_element(selector, by=by, timeout=timeout)
return True
def assert_elements(self, *args, **kwargs):
"""Similar to self.assert_element(), but can assert multiple elements.
The input is a list of elements.
Optional kwargs include "by" and "timeout" (used by all selectors).
Raises an exception if any of the elements are not visible.
Examples:
self.assert_elements("h1", "h2", "h3")
OR
self.assert_elements(["h1", "h2", "h3"])"""
self.__check_scope()
selectors = []
timeout = None
by = By.CSS_SELECTOR
for kwarg in kwargs:
if kwarg == "timeout":
timeout = kwargs["timeout"]
elif kwarg == "by":
by = kwargs["by"]
elif kwarg == "selector":
selector = kwargs["selector"]
if type(selector) is str:
selectors.append(selector)
elif type(selector) is list:
selectors_list = selector
for selector in selectors_list:
if type(selector) is str:
selectors.append(selector)
else:
raise Exception('Unknown kwarg: "%s"!' % kwarg)
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
for arg in args:
if type(arg) is list:
for selector in arg:
if type(selector) is str:
selectors.append(selector)
elif type(arg) is str:
selectors.append(arg)
for selector in selectors:
if self.__is_shadow_selector(selector):
self.__assert_shadow_element_visible(selector)
continue
self.wait_for_element_visible(selector, by=by, timeout=timeout)
if self.demo_mode:
selector, by = self.__recalculate_selector(selector, by)
a_t = "ASSERT"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert(self._language)
messenger_post = "%s %s: %s" % (a_t, by.upper(), selector)
self.__highlight_with_assert_success(
messenger_post, selector, by
)
continue
return True
def assert_elements_visible(self, *args, **kwargs):
"""Same as self.assert_elements()
Raises an exception if any element cannot be found."""
return self.assert_elements(*args, **kwargs)
############
def wait_for_text_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_shadow_text_visible(text, selector)
return page_actions.wait_for_text_visible(
self.driver, text, selector, by, timeout
)
def wait_for_exact_text_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
return self.__wait_for_exact_shadow_text_visible(text, selector)
return page_actions.wait_for_exact_text_visible(
self.driver, text, selector, by, timeout
)
def wait_for_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
""" The shorter version of wait_for_text_visible() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_text_visible(
text, selector, by=by, timeout=timeout
)
def find_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
""" Same as wait_for_text_visible() - returns the element """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_text_visible(
text, selector, by=by, timeout=timeout
)
def assert_text_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
""" Same as assert_text() """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.assert_text(text, selector, by=by, timeout=timeout)
def assert_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_text_visible()
Raises an exception if the element or the text is not found.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__assert_shadow_text_visible(text, selector)
return True
self.wait_for_text_visible(text, selector, by=by, timeout=timeout)
if self.demo_mode:
a_t = "ASSERT TEXT"
i_n = "in"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_text(self._language)
i_n = SD.translate_in(self._language)
messenger_post = "%s: {%s} %s %s: %s" % (
a_t,
text,
i_n,
by.upper(),
selector,
)
self.__highlight_with_assert_success(messenger_post, selector, by)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
text_selector = [text, selector]
action = ["as_te", text_selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
def assert_exact_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
"""Similar to assert_text(), but the text must be exact, rather than
exist as a subset of the full text.
(Extra whitespace at the beginning or the end doesn't count.)
Raises an exception if the element or the text is not found.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.__is_shadow_selector(selector):
self.__assert_exact_shadow_text_visible(text, selector)
return True
self.wait_for_exact_text_visible(
text, selector, by=by, timeout=timeout
)
if self.demo_mode:
a_t = "ASSERT EXACT TEXT"
i_n = "in"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_exact_text(self._language)
i_n = SD.translate_in(self._language)
messenger_post = "%s: {%s} %s %s: %s" % (
a_t,
text,
i_n,
by.upper(),
selector,
)
self.__highlight_with_assert_success(messenger_post, selector, by)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
text_selector = [text, selector]
action = ["as_et", text_selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
############
def wait_for_link_text_present(self, link_text, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 5)):
shared_utils.check_if_time_limit_exceeded()
try:
if not self.is_link_text_present(link_text):
raise Exception(
"Link text {%s} was not found!" % link_text
)
return
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.2)
message = "Link text {%s} was not present after %s seconds!" % (
link_text,
timeout,
)
page_actions.timeout_exception("NoSuchElementException", message)
def wait_for_partial_link_text_present(self, link_text, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 5)):
shared_utils.check_if_time_limit_exceeded()
try:
if not self.is_partial_link_text_present(link_text):
raise Exception(
"Partial Link text {%s} was not found!" % link_text
)
return
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.2)
message = (
"Partial Link text {%s} was not present after %s seconds!"
"" % (link_text, timeout)
)
page_actions.timeout_exception("NoSuchElementException", message)
def wait_for_link_text_visible(self, link_text, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(
link_text, by=By.LINK_TEXT, timeout=timeout
)
def wait_for_link_text(self, link_text, timeout=None):
""" The shorter version of wait_for_link_text_visible() """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_link_text_visible(link_text, timeout=timeout)
def find_link_text(self, link_text, timeout=None):
""" Same as wait_for_link_text_visible() - returns the element """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_link_text_visible(link_text, timeout=timeout)
def assert_link_text(self, link_text, timeout=None):
"""Similar to wait_for_link_text_visible(), but returns nothing.
As above, will raise an exception if nothing can be found.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_link_text_visible(link_text, timeout=timeout)
if self.demo_mode:
a_t = "ASSERT LINK TEXT"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_link_text(self._language)
messenger_post = "%s: {%s}" % (a_t, link_text)
self.__highlight_with_assert_success(
messenger_post, link_text, by=By.LINK_TEXT
)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["as_lt", link_text, origin, time_stamp]
self.__extra_actions.append(action)
return True
def wait_for_partial_link_text(self, partial_link_text, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(
partial_link_text, by=By.PARTIAL_LINK_TEXT, timeout=timeout
)
def find_partial_link_text(self, partial_link_text, timeout=None):
""" Same as wait_for_partial_link_text() - returns the element """
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_partial_link_text(
partial_link_text, timeout=timeout
)
def assert_partial_link_text(self, partial_link_text, timeout=None):
"""Similar to wait_for_partial_link_text(), but returns nothing.
As above, will raise an exception if nothing can be found.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_partial_link_text(partial_link_text, timeout=timeout)
if self.demo_mode:
a_t = "ASSERT PARTIAL LINK TEXT"
if self._language != "English":
from seleniumbase.fixtures.words import SD
a_t = SD.translate_assert_link_text(self._language)
messenger_post = "%s: {%s}" % (a_t, partial_link_text)
self.__highlight_with_assert_success(
messenger_post, partial_link_text, by=By.PARTIAL_LINK_TEXT
)
return True
############
def wait_for_element_absent(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Waits for an element to no longer appear in the HTML of a page.
A hidden element counts as a present element, which fails this assert.
If waiting for elements to be hidden instead of nonexistent,
use wait_for_element_not_visible() instead.
"""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_element_absent(
self.driver, selector, by, timeout
)
def assert_element_absent(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_element_absent()
As above, will raise an exception if the element stays present.
A hidden element counts as a present element, which fails this assert.
If you want to assert that elements are hidden instead of nonexistent,
use assert_element_not_visible() instead.
(Note that hidden elements are still present in the HTML of the page.)
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_absent(selector, by=by, timeout=timeout)
return True
############
def wait_for_element_not_visible(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Waits for an element to no longer be visible on a page.
The element can be non-existent in the HTML or hidden on the page
to qualify as not visible."""
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_element_not_visible(
self.driver, selector, by, timeout
)
def assert_element_not_visible(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_element_not_visible()
As above, will raise an exception if the element stays visible.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_not_visible(selector, by=by, timeout=timeout)
if self.recorder_mode:
url = self.get_current_url()
if url and len(url) > 0:
if ("http:") in url or ("https:") in url or ("file:") in url:
if self.get_session_storage_item("pause_recorder") == "no":
time_stamp = self.execute_script("return Date.now();")
origin = self.get_origin()
action = ["asenv", selector, origin, time_stamp]
self.__extra_actions.append(action)
return True
############
def wait_for_text_not_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_text_not_visible(
self.driver, text, selector, by, timeout
)
def assert_text_not_visible(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_text_not_visible()
Raises an exception if the text is still visible after timeout.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_text_not_visible(
text, selector, by=by, timeout=timeout
)
############
def wait_for_attribute_not_present(
self, selector, attribute, value=None, by=By.CSS_SELECTOR, timeout=None
):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_attribute_not_present(
self.driver, selector, attribute, value, by, timeout
)
def assert_attribute_not_present(
self, selector, attribute, value=None, by=By.CSS_SELECTOR, timeout=None
):
"""Similar to wait_for_attribute_not_present()
Raises an exception if the attribute is still present after timeout.
Returns True if successful. Default timeout = SMALL_TIMEOUT."""
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_attribute_not_present(
selector, attribute, value=value, by=by, timeout=timeout
)
############
def wait_for_and_accept_alert(self, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_accept_alert(self.driver, timeout)
def wait_for_and_dismiss_alert(self, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_dismiss_alert(self.driver, timeout)
def wait_for_and_switch_to_alert(self, timeout=None):
self.__check_scope()
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_switch_to_alert(self.driver, timeout)
############
def accept_alert(self, timeout=None):
""" Same as wait_for_and_accept_alert(), but smaller default T_O """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_accept_alert(self.driver, timeout)
def dismiss_alert(self, timeout=None):
""" Same as wait_for_and_dismiss_alert(), but smaller default T_O """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_dismiss_alert(self.driver, timeout)
def switch_to_alert(self, timeout=None):
""" Same as wait_for_and_switch_to_alert(), but smaller default T_O """
self.__check_scope()
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_switch_to_alert(self.driver, timeout)
############
def __assert_eq(self, *args, **kwargs):
""" Minified assert_equal() using only the list diff. """
minified_exception = None
try:
self.assertEqual(*args, **kwargs)
except Exception as e:
str_e = str(e)
minified_exception = "\nAssertionError:\n"
lines = str_e.split("\n")
countdown = 3
countdown_on = False
first_differing = False
skip_lines = False
for line in lines:
if countdown_on:
if not skip_lines:
minified_exception += line + "\n"
countdown = countdown - 1
if countdown == 0:
countdown_on = False
skip_lines = False
elif line.startswith("First differing"):
first_differing = True
countdown_on = True
countdown = 3
minified_exception += line + "\n"
elif line.startswith("First list"):
countdown_on = True
countdown = 3
if not first_differing:
minified_exception += line + "\n"
else:
skip_lines = True
elif line.startswith("F"):
countdown_on = True
countdown = 3
minified_exception += line + "\n"
elif line.startswith("+") or line.startswith("-"):
minified_exception += line + "\n"
elif line.startswith("?"):
minified_exception += line + "\n"
elif line.strip().startswith("*"):
minified_exception += line + "\n"
if minified_exception:
raise Exception(minified_exception)
def check_window(
self,
name="default",
level=0,
baseline=False,
check_domain=True,
full_diff=False,
):
"""*** Automated Visual Testing with SeleniumBase ***
The first time a test calls self.check_window() for a unique "name"
parameter provided, it will set a visual baseline, meaning that it
creates a folder, saves the URL to a file, saves the current window
screenshot to a file, and creates the following three files
with the listed data saved:
tags_level1.txt -> HTML tags from the window
tags_level2.txt -> HTML tags + attributes from the window
tags_level3.txt -> HTML tags + attributes/values from the window
Baseline folders are named based on the test name and the name
parameter passed to self.check_window(). The same test can store
multiple baseline folders.
If the baseline is being set/reset, the "level" doesn't matter.
After the first run of self.check_window(), it will compare the
HTML tags of the latest window to the one from the initial run.
Here's how the level system works:
* level=0 ->
DRY RUN ONLY - Will perform comparisons to the baseline (and
print out any differences that are found) but
won't fail the test even if differences exist.
* level=1 ->
HTML tags are compared to tags_level1.txt
* level=2 ->
HTML tags are compared to tags_level1.txt and
HTML tags/attributes are compared to tags_level2.txt
* level=3 ->
HTML tags are compared to tags_level1.txt and
HTML tags + attributes are compared to tags_level2.txt and
HTML tags + attributes/values are compared to tags_level3.txt
As shown, Level-3 is the most strict, Level-1 is the least strict.
If the comparisons from the latest window to the existing baseline
don't match, the current test will fail, except for Level-0 tests.
You can reset the visual baseline on the command line by using:
--visual_baseline
As long as "--visual_baseline" is used on the command line while
running tests, the self.check_window() method cannot fail because
it will rebuild the visual baseline rather than comparing the html
tags of the latest run to the existing baseline. If there are any
expected layout changes to a website that you're testing, you'll
need to reset the baseline to prevent unnecessary failures.
self.check_window() will fail with "Page Domain Mismatch Failure"
if the page domain doesn't match the domain of the baseline,
unless "check_domain" is set to False when calling check_window().
If you want to use self.check_window() to compare a web page to
a later version of itself from within the same test run, you can
add the parameter "baseline=True" to the first time you call
self.check_window() in a test to use that as the baseline. This
only makes sense if you're calling self.check_window() more than
once with the same name parameter in the same test.
If "full_diff" is set to False, the error output will only
include the first differing element in the list comparison.
Set "full_diff" to True if you want to see the full output.
Automated Visual Testing with self.check_window() is not very
effective for websites that have dynamic content that changes
the layout and structure of web pages. For those, you're much
better off using regular SeleniumBase functional testing.
Example usage:
self.check_window(name="testing", level=0)
self.check_window(name="xkcd_home", level=1)
self.check_window(name="github_page", level=2)
self.check_window(name="wikipedia_page", level=3)
"""
self.wait_for_ready_state_complete()
if level == "0":
level = 0
if level == "1":
level = 1
if level == "2":
level = 2
if level == "3":
level = 3
if level != 0 and level != 1 and level != 2 and level != 3:
raise Exception('Parameter "level" must be set to 0, 1, 2, or 3!')
if self.demo_mode:
message = (
"WARNING: Using check_window() from Demo Mode may lead "
"to unexpected results caused by Demo Mode HTML changes."
)
logging.info(message)
test_id = self.__get_display_id().split("::")[-1]
if not name or len(name) < 1:
name = "default"
name = str(name)
from seleniumbase.core import visual_helper
visual_helper.visual_baseline_folder_setup()
baseline_dir = constants.VisualBaseline.STORAGE_FOLDER
visual_baseline_path = baseline_dir + "/" + test_id + "/" + name
page_url_file = visual_baseline_path + "/page_url.txt"
screenshot_file = visual_baseline_path + "/screenshot.png"
level_1_file = visual_baseline_path + "/tags_level_1.txt"
level_2_file = visual_baseline_path + "/tags_level_2.txt"
level_3_file = visual_baseline_path + "/tags_level_3.txt"
set_baseline = False
if baseline or self.visual_baseline:
set_baseline = True
if not os.path.exists(visual_baseline_path):
set_baseline = True
try:
os.makedirs(visual_baseline_path)
except Exception:
pass # Only reachable during multi-threaded test runs
if not os.path.exists(page_url_file):
set_baseline = True
if not os.path.exists(screenshot_file):
set_baseline = True
if not os.path.exists(level_1_file):
set_baseline = True
if not os.path.exists(level_2_file):
set_baseline = True
if not os.path.exists(level_3_file):
set_baseline = True
page_url = self.get_current_url()
soup = self.get_beautiful_soup()
html_tags = soup.body.find_all()
level_1 = [[tag.name] for tag in html_tags]
level_1 = json.loads(json.dumps(level_1)) # Tuples become lists
level_2 = [[tag.name, sorted(tag.attrs.keys())] for tag in html_tags]
level_2 = json.loads(json.dumps(level_2)) # Tuples become lists
level_3 = [[tag.name, sorted(tag.attrs.items())] for tag in html_tags]
level_3 = json.loads(json.dumps(level_3)) # Tuples become lists
if set_baseline:
self.save_screenshot("screenshot.png", visual_baseline_path)
out_file = codecs.open(page_url_file, "w+", encoding="utf-8")
out_file.writelines(page_url)
out_file.close()
out_file = codecs.open(level_1_file, "w+", encoding="utf-8")
out_file.writelines(json.dumps(level_1))
out_file.close()
out_file = codecs.open(level_2_file, "w+", encoding="utf-8")
out_file.writelines(json.dumps(level_2))
out_file.close()
out_file = codecs.open(level_3_file, "w+", encoding="utf-8")
out_file.writelines(json.dumps(level_3))
out_file.close()
if not set_baseline:
f = open(page_url_file, "r")
page_url_data = f.read().strip()
f.close()
f = open(level_1_file, "r")
level_1_data = json.loads(f.read())
f.close()
f = open(level_2_file, "r")
level_2_data = json.loads(f.read())
f.close()
f = open(level_3_file, "r")
level_3_data = json.loads(f.read())
f.close()
domain_fail = (
"\n*\nPage Domain Mismatch Failure: "
"Current Page Domain doesn't match the Page Domain of the "
"Baseline! Can't compare two completely different sites! "
"Run with --visual_baseline to reset the baseline!"
)
level_1_failure = (
"\n*\n*** Exception: <Level 1> Visual Diff Failure:\n"
"* HTML tags don't match the baseline!"
)
level_2_failure = (
"\n*\n*** Exception: <Level 2> Visual Diff Failure:\n"
"* HTML tag attribute names don't match the baseline!"
)
level_3_failure = (
"\n*\n*** Exception: <Level 3> Visual Diff Failure:\n"
"* HTML tag attribute values don't match the baseline!"
)
page_domain = self.get_domain_url(page_url)
page_data_domain = self.get_domain_url(page_url_data)
unittest.TestCase.maxDiff = 3200
if level != 0 and check_domain:
self.assertEqual(page_data_domain, page_domain, domain_fail)
unittest.TestCase.maxDiff = 6400 # Use `None` for no limit
if level == 3:
if not full_diff:
self.__assert_eq(level_3_data, level_3, level_3_failure)
else:
self.assertEqual(level_3_data, level_3, level_3_failure)
unittest.TestCase.maxDiff = 3200
if level == 2:
if not full_diff:
self.__assert_eq(level_2_data, level_2, level_2_failure)
else:
self.assertEqual(level_2_data, level_2, level_2_failure)
if level == 1:
if not full_diff:
self.__assert_eq(level_1_data, level_1, level_1_failure)
else:
self.assertEqual(level_1_data, level_1, level_1_failure)
unittest.TestCase.maxDiff = 6400 # Use `None` for no limit
if level == 0:
try:
unittest.TestCase.maxDiff = 3200
if check_domain:
self.assertEqual(
page_domain, page_data_domain, domain_fail
)
try:
if not full_diff:
self.__assert_eq(
level_1_data, level_1, level_1_failure
)
else:
self.assertEqual(
level_1_data, level_1, level_1_failure
)
except Exception as e:
print(e)
try:
if not full_diff:
self.__assert_eq(
level_2_data, level_2, level_2_failure
)
else:
self.assertEqual(
level_2_data, level_2, level_2_failure
)
except Exception as e:
print(e)
unittest.TestCase.maxDiff = 6400 # Use `None` for no limit
if not full_diff:
self.__assert_eq(
level_3_data, level_3, level_3_failure
)
else:
self.assertEqual(
level_3_data, level_3, level_3_failure
)
except Exception as e:
print(e) # Level-0 Dry Run (Only print the differences)
unittest.TestCase.maxDiff = None # Reset unittest.TestCase.maxDiff
############
def __get_new_timeout(self, timeout):
""" When using --timeout_multiplier=#.# """
import math
self.__check_scope()
try:
timeout_multiplier = float(self.timeout_multiplier)
if timeout_multiplier <= 0.5:
timeout_multiplier = 0.5
timeout = int(math.ceil(timeout_multiplier * timeout))
return timeout
except Exception:
# Wrong data type for timeout_multiplier (expecting int or float)
return timeout
############
def __check_scope(self):
if hasattr(self, "browser"): # self.browser stores the type of browser
return # All good: setUp() already initialized variables in "self"
else:
from seleniumbase.common.exceptions import OutOfScopeException
message = (
"\n It looks like you are trying to call a SeleniumBase method"
"\n from outside the scope of your test class's `self` object,"
"\n which is initialized by calling BaseCase's setUp() method."
"\n The `self` object is where all test variables are defined."
"\n If you created a custom setUp() method (that overrided the"
"\n the default one), make sure to call super().setUp() in it."
"\n When using page objects, be sure to pass the `self` object"
"\n from your test class into your page object methods so that"
"\n they can call BaseCase class methods with all the required"
"\n variables, which are initialized during the setUp() method"
"\n that runs automatically before all tests called by pytest."
)
raise OutOfScopeException(message)
############
def __get_exception_message(self):
"""This method extracts the message from an exception if there
was an exception that occurred during the test, assuming
that the exception was in a try/except block and not thrown."""
exception_info = sys.exc_info()[1]
if hasattr(exception_info, "msg"):
exc_message = exception_info.msg
elif hasattr(exception_info, "message"):
exc_message = exception_info.message
else:
exc_message = sys.exc_info()
return exc_message
def __add_deferred_assert_failure(self):
""" Add a deferred_assert failure to a list for future processing. """
self.__check_scope()
current_url = self.driver.current_url
message = self.__get_exception_message()
self.__deferred_assert_failures.append(
"CHECK #%s: (%s)\n %s"
% (self.__deferred_assert_count, current_url, message)
)
############
def deferred_assert_element(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
"""A non-terminating assertion for an element on a page.
Failures will be saved until the process_deferred_asserts()
method is called from inside a test, likely at the end of it."""
self.__check_scope()
if not timeout:
timeout = settings.MINI_TIMEOUT
if self.timeout_multiplier and timeout == settings.MINI_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__deferred_assert_count += 1
try:
url = self.get_current_url()
if url == self.__last_url_of_deferred_assert:
timeout = 1
else:
self.__last_url_of_deferred_assert = url
except Exception:
pass
try:
self.wait_for_element_visible(selector, by=by, timeout=timeout)
return True
except Exception:
self.__add_deferred_assert_failure()
return False
def deferred_assert_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
"""A non-terminating assertion for text from an element on a page.
Failures will be saved until the process_deferred_asserts()
method is called from inside a test, likely at the end of it."""
self.__check_scope()
if not timeout:
timeout = settings.MINI_TIMEOUT
if self.timeout_multiplier and timeout == settings.MINI_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__deferred_assert_count += 1
try:
url = self.get_current_url()
if url == self.__last_url_of_deferred_assert:
timeout = 1
else:
self.__last_url_of_deferred_assert = url
except Exception:
pass
try:
self.wait_for_text_visible(text, selector, by=by, timeout=timeout)
return True
except Exception:
self.__add_deferred_assert_failure()
return False
def deferred_check_window(
self,
name="default",
level=0,
baseline=False,
check_domain=True,
full_diff=False,
):
"""A non-terminating assertion for the check_window() method.
Failures will be saved until the process_deferred_asserts()
method is called from inside a test, likely at the end of it."""
self.__check_scope()
self.__deferred_assert_count += 1
try:
self.check_window(
name=name,
level=level,
baseline=baseline,
check_domain=check_domain,
full_diff=full_diff,
)
return True
except Exception:
self.__add_deferred_assert_failure()
return False
def process_deferred_asserts(self, print_only=False):
"""To be used with any test that uses deferred_asserts, which are
non-terminating verifications that only raise exceptions
after this method is called.
This is useful for pages with multiple elements to be checked when
you want to find as many bugs as possible in a single test run
before having all the exceptions get raised simultaneously.
Might be more useful if this method is called after processing all
the deferred asserts on a single html page so that the failure
screenshot matches the location of the deferred asserts.
If "print_only" is set to True, the exception won't get raised."""
if self.__deferred_assert_failures:
exception_output = ""
exception_output += "\n*** DEFERRED ASSERTION FAILURES FROM: "
exception_output += "%s\n" % self.id()
all_failing_checks = self.__deferred_assert_failures
self.__deferred_assert_failures = []
for tb in all_failing_checks:
exception_output += "%s\n" % tb
if print_only:
print(exception_output)
else:
raise Exception(exception_output.replace("\\n", "\n"))
############
# Alternate naming scheme for the "deferred_assert" methods.
def delayed_assert_element(
self, selector, by=By.CSS_SELECTOR, timeout=None
):
""" Same as self.deferred_assert_element() """
return self.deferred_assert_element(
selector=selector, by=by, timeout=timeout
)
def delayed_assert_text(
self, text, selector="html", by=By.CSS_SELECTOR, timeout=None
):
""" Same as self.deferred_assert_text() """
return self.deferred_assert_text(
text=text, selector=selector, by=by, timeout=timeout
)
def delayed_check_window(
self,
name="default",
level=0,
baseline=False,
check_domain=True,
full_diff=False
):
""" Same as self.deferred_check_window() """
return self.deferred_check_window(
name=name,
level=level,
baseline=baseline,
check_domain=check_domain,
full_diff=full_diff,
)
def process_delayed_asserts(self, print_only=False):
""" Same as self.process_deferred_asserts() """
self.process_deferred_asserts(print_only=print_only)
############
def __js_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using pure JS. Does not use jQuery. """
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""var simulateClick = function (elem) {
var evt = new MouseEvent('click', {
bubbles: true,
cancelable: true,
view: window
});
var canceled = !elem.dispatchEvent(evt);
};
var someLink = document.querySelector('%s');
simulateClick(someLink);"""
% css_selector
)
self.execute_script(script)
def __js_click_all(self, selector, by=By.CSS_SELECTOR):
""" Clicks all matching elements using pure JS. (No jQuery) """
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector) # Add "\\" to special chars
css_selector = self.__escape_quotes_if_needed(css_selector)
script = (
"""var simulateClick = function (elem) {
var evt = new MouseEvent('click', {
bubbles: true,
cancelable: true,
view: window
});
var canceled = !elem.dispatchEvent(evt);
};
var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
simulateClick($elements[index]);}"""
% css_selector
)
self.execute_script(script)
def __jquery_slow_scroll_to(self, selector, by=By.CSS_SELECTOR):
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
dist = js_utils.get_scroll_distance_to_element(self.driver, element)
time_offset = 0
try:
if dist and abs(dist) > constants.Values.SSMD:
time_offset = int(
float(abs(dist) - constants.Values.SSMD) / 12.5
)
if time_offset > 950:
time_offset = 950
except Exception:
time_offset = 0
scroll_time_ms = 550 + time_offset
sleep_time = 0.625 + (float(time_offset) / 1000.0)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
scroll_script = (
"""jQuery([document.documentElement, document.body]).animate({"""
"""scrollTop: jQuery('%s').offset().top - 130}, %s);"""
% (selector, scroll_time_ms)
)
if js_utils.is_jquery_activated(self.driver):
self.execute_script(scroll_script)
else:
self.__slow_scroll_to_element(element)
self.sleep(sleep_time)
def __jquery_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using jQuery. Different from using pure JS. """
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
click_script = """jQuery('%s')[0].click();""" % selector
self.safe_execute_script(click_script)
def __get_href_from_link_text(self, link_text, hard_fail=True):
href = self.get_link_attribute(link_text, "href", hard_fail)
if not href:
return None
if href.startswith("//"):
link = "http:" + href
elif href.startswith("/"):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
return link
def __click_dropdown_link_text(self, link_text, link_css):
""" When a link may be hidden under a dropdown menu, use this. """
soup = self.get_beautiful_soup()
drop_down_list = []
for item in soup.select("li[class]"):
drop_down_list.append(item)
csstype = link_css.split("[")[1].split("=")[0]
for item in drop_down_list:
item_text_list = item.text.split("\n")
if link_text in item_text_list and csstype in item.decode():
dropdown_css = ""
try:
for css_class in item["class"]:
dropdown_css += "."
dropdown_css += css_class
except Exception:
continue
dropdown_css = item.name + dropdown_css
matching_dropdowns = self.find_visible_elements(dropdown_css)
for dropdown in matching_dropdowns:
# The same class names might be used for multiple dropdowns
if dropdown.is_displayed():
try:
try:
page_actions.hover_element(
self.driver,
dropdown,
)
except Exception:
# If hovering fails, driver is likely outdated
# Time to go directly to the hidden link text
self.open(
self.__get_href_from_link_text(link_text)
)
return True
page_actions.hover_element_and_click(
self.driver,
dropdown,
link_text,
click_by=By.LINK_TEXT,
timeout=0.12,
)
return True
except Exception:
pass
return False
def __get_href_from_partial_link_text(self, link_text, hard_fail=True):
href = self.get_partial_link_text_attribute(
link_text, "href", hard_fail
)
if not href:
return None
if href.startswith("//"):
link = "http:" + href
elif href.startswith("/"):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
return link
def __click_dropdown_partial_link_text(self, link_text, link_css):
""" When a partial link may be hidden under a dropdown, use this. """
soup = self.get_beautiful_soup()
drop_down_list = []
for item in soup.select("li[class]"):
drop_down_list.append(item)
csstype = link_css.split("[")[1].split("=")[0]
for item in drop_down_list:
item_text_list = item.text.split("\n")
if link_text in item_text_list and csstype in item.decode():
dropdown_css = ""
try:
for css_class in item["class"]:
dropdown_css += "."
dropdown_css += css_class
except Exception:
continue
dropdown_css = item.name + dropdown_css
matching_dropdowns = self.find_visible_elements(dropdown_css)
for dropdown in matching_dropdowns:
# The same class names might be used for multiple dropdowns
if dropdown.is_displayed():
try:
try:
page_actions.hover_element(
self.driver, dropdown
)
except Exception:
# If hovering fails, driver is likely outdated
# Time to go directly to the hidden link text
self.open(
self.__get_href_from_partial_link_text(
link_text
)
)
return True
page_actions.hover_element_and_click(
self.driver,
dropdown,
link_text,
click_by=By.LINK_TEXT,
timeout=0.12,
)
return True
except Exception:
pass
return False
def __recalculate_selector(self, selector, by, xp_ok=True):
"""Use autodetection to return the correct selector with "by" updated.
If "xp_ok" is False, don't call convert_css_to_xpath(), which is
used to make the ":contains()" selector valid outside JS calls."""
_type = type(selector) # First make sure the selector is a string
not_string = False
if sys.version_info[0] < 3:
if _type is not str and _type is not unicode: # noqa: F821
not_string = True
else:
if _type is not str:
not_string = True
if not_string:
msg = "Expecting a selector of type: \"<class 'str'>\" (string)!"
raise Exception('Invalid selector type: "%s"\n%s' % (_type, msg))
if page_utils.is_xpath_selector(selector):
by = By.XPATH
if page_utils.is_link_text_selector(selector):
selector = page_utils.get_link_text_from_selector(selector)
by = By.LINK_TEXT
if page_utils.is_partial_link_text_selector(selector):
selector = page_utils.get_partial_link_text_from_selector(selector)
by = By.PARTIAL_LINK_TEXT
if page_utils.is_name_selector(selector):
name = page_utils.get_name_from_selector(selector)
selector = '[name="%s"]' % name
by = By.CSS_SELECTOR
if xp_ok:
if ":contains(" in selector and by == By.CSS_SELECTOR:
selector = self.convert_css_to_xpath(selector)
by = By.XPATH
return (selector, by)
def __looks_like_a_page_url(self, url):
"""Returns True if the url parameter looks like a URL. This method
is slightly more lenient than page_utils.is_valid_url(url) due to
possible typos when calling self.get(url), which will try to
navigate to the page if a URL is detected, but will instead call
self.get_element(URL_AS_A_SELECTOR) if the input in not a URL."""
if (
url.startswith("http:")
or url.startswith("https:")
or url.startswith("://")
or url.startswith("chrome:")
or url.startswith("about:")
or url.startswith("data:")
or url.startswith("file:")
or url.startswith("edge:")
or url.startswith("opera:")
):
return True
else:
return False
def __make_css_match_first_element_only(self, selector):
# Only get the first match
return page_utils.make_css_match_first_element_only(selector)
def __demo_mode_pause_if_active(self, tiny=False):
if self.demo_mode:
wait_time = settings.DEFAULT_DEMO_MODE_TIMEOUT
if self.demo_sleep:
wait_time = float(self.demo_sleep)
if not tiny:
time.sleep(wait_time)
else:
time.sleep(wait_time / 3.4)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def __slow_mode_pause_if_active(self):
if self.slow_mode:
wait_time = settings.DEFAULT_DEMO_MODE_TIMEOUT
if self.demo_sleep:
wait_time = float(self.demo_sleep)
time.sleep(wait_time)
def __demo_mode_scroll_if_active(self, selector, by):
if self.demo_mode:
self.slow_scroll_to(selector, by=by)
def __demo_mode_highlight_if_active(self, selector, by):
if self.demo_mode:
# Includes self.slow_scroll_to(selector, by=by) by default
self.highlight(selector, by=by)
elif self.slow_mode:
# Just do the slow scroll part of the highlight() method
time.sleep(0.08)
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
try:
scroll_distance = js_utils.get_scroll_distance_to_element(
self.driver, element
)
if abs(scroll_distance) > constants.Values.SSMD:
self.__jquery_slow_scroll_to(selector, by)
else:
self.__slow_scroll_to_element(element)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
self.__slow_scroll_to_element(element)
time.sleep(0.12)
def __scroll_to_element(self, element, selector=None, by=By.CSS_SELECTOR):
success = js_utils.scroll_to_element(self.driver, element)
if not success and selector:
self.wait_for_ready_state_complete()
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=settings.SMALL_TIMEOUT
)
self.__demo_mode_pause_if_active(tiny=True)
def __slow_scroll_to_element(self, element):
try:
js_utils.slow_scroll_to_element(self.driver, element, self.browser)
except Exception:
# Scroll to the element instantly if the slow scroll fails
js_utils.scroll_to_element(self.driver, element)
def __highlight_with_assert_success(
self, message, selector, by=By.CSS_SELECTOR
):
selector, by = self.__recalculate_selector(selector, by, xp_ok=False)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
try:
scroll_distance = js_utils.get_scroll_distance_to_element(
self.driver, element
)
if abs(scroll_distance) > constants.Values.SSMD:
self.__jquery_slow_scroll_to(selector, by)
else:
self.__slow_scroll_to_element(element)
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT
)
self.__slow_scroll_to_element(element)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't highlight if can't convert to CSS_SELECTOR
return
o_bs = "" # original_box_shadow
try:
style = element.get_attribute("style")
except Exception:
self.wait_for_ready_state_complete()
time.sleep(0.12)
element = self.wait_for_element_visible(
selector, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT
)
style = element.get_attribute("style")
if style:
if "box-shadow: " in style:
box_start = style.find("box-shadow: ")
box_end = style.find(";", box_start) + 1
original_box_shadow = style[box_start:box_end]
o_bs = original_box_shadow
if ":contains" not in selector and ":first" not in selector:
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
self.__highlight_with_js_2(message, selector, o_bs)
else:
selector = self.__make_css_match_first_element_only(selector)
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
try:
self.__highlight_with_jquery_2(message, selector, o_bs)
except Exception:
pass # JQuery probably couldn't load. Skip highlighting.
time.sleep(0.065)
def __highlight_with_js_2(self, message, selector, o_bs):
js_utils.highlight_with_js_2(
self.driver, message, selector, o_bs, self.message_duration
)
def __highlight_with_jquery_2(self, message, selector, o_bs):
js_utils.highlight_with_jquery_2(
self.driver, message, selector, o_bs, self.message_duration
)
############
from seleniumbase.common import decorators
@decorators.deprecated("You should use re.escape() instead.")
def jq_format(self, code):
# DEPRECATED - re.escape() already performs the intended action.
return js_utils._jq_format(code)
############
def setUp(self, masterqa_mode=False):
"""
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass setUp() method:
super(SubClassOfBaseCase, self).setUp()
"""
if not hasattr(self, "_using_sb_fixture") and self.__called_setup:
# This test already called setUp()
return
self.__called_setup = True
self.__called_teardown = False
self.masterqa_mode = masterqa_mode
self.is_pytest = None
try:
# This raises an exception if the test is not coming from pytest
self.is_pytest = sb_config.is_pytest
except Exception:
# Not using pytest (probably nosetests)
self.is_pytest = False
if self.is_pytest:
# pytest-specific code
test_id = self.__get_test_id()
self.test_id = test_id
if hasattr(self, "_using_sb_fixture"):
self.test_id = sb_config._test_id
self.browser = sb_config.browser
self.data = sb_config.data
self.var1 = sb_config.var1
self.var2 = sb_config.var2
self.var3 = sb_config.var3
self.slow_mode = sb_config.slow_mode
self.demo_mode = sb_config.demo_mode
self.demo_sleep = sb_config.demo_sleep
self.highlights = sb_config.highlights
self.time_limit = sb_config._time_limit
sb_config.time_limit = sb_config._time_limit # Reset between tests
self.environment = sb_config.environment
self.env = self.environment # Add a shortened version
self.with_selenium = sb_config.with_selenium # Should be True
self.headless = sb_config.headless
self.headless_active = False
self.headed = sb_config.headed
self.xvfb = sb_config.xvfb
self.locale_code = sb_config.locale_code
self.interval = sb_config.interval
self.start_page = sb_config.start_page
self.log_path = sb_config.log_path
self.with_testing_base = sb_config.with_testing_base
self.with_basic_test_info = sb_config.with_basic_test_info
self.with_screen_shots = sb_config.with_screen_shots
self.with_page_source = sb_config.with_page_source
self.with_db_reporting = sb_config.with_db_reporting
self.with_s3_logging = sb_config.with_s3_logging
self.protocol = sb_config.protocol
self.servername = sb_config.servername
self.port = sb_config.port
self.proxy_string = sb_config.proxy_string
self.user_agent = sb_config.user_agent
self.mobile_emulator = sb_config.mobile_emulator
self.device_metrics = sb_config.device_metrics
self.cap_file = sb_config.cap_file
self.cap_string = sb_config.cap_string
self.settings_file = sb_config.settings_file
self.database_env = sb_config.database_env
self.message_duration = sb_config.message_duration
self.js_checking_on = sb_config.js_checking_on
self.ad_block_on = sb_config.ad_block_on
self.block_images = sb_config.block_images
self.chromium_arg = sb_config.chromium_arg
self.firefox_arg = sb_config.firefox_arg
self.firefox_pref = sb_config.firefox_pref
self.verify_delay = sb_config.verify_delay
self.recorder_mode = sb_config.recorder_mode
self.recorder_ext = sb_config.recorder_mode
self.disable_csp = sb_config.disable_csp
self.disable_ws = sb_config.disable_ws
self.enable_ws = sb_config.enable_ws
if not self.disable_ws:
self.enable_ws = True
self.enable_sync = sb_config.enable_sync
self.use_auto_ext = sb_config.use_auto_ext
self.no_sandbox = sb_config.no_sandbox
self.disable_gpu = sb_config.disable_gpu
self.incognito = sb_config.incognito
self.guest_mode = sb_config.guest_mode
self.devtools = sb_config.devtools
self.remote_debug = sb_config.remote_debug
self._multithreaded = sb_config._multithreaded
self._reuse_session = sb_config.reuse_session
self._crumbs = sb_config.crumbs
self.dashboard = sb_config.dashboard
self._dash_initialized = sb_config._dashboard_initialized
if self.dashboard and self._multithreaded:
import fasteners
self.dash_lock = fasteners.InterProcessLock(
constants.Dashboard.LOCKFILE
)
self.swiftshader = sb_config.swiftshader
self.user_data_dir = sb_config.user_data_dir
self.extension_zip = sb_config.extension_zip
self.extension_dir = sb_config.extension_dir
self.maximize_option = sb_config.maximize_option
self.save_screenshot_after_test = sb_config.save_screenshot
self.visual_baseline = sb_config.visual_baseline
self.timeout_multiplier = sb_config.timeout_multiplier
self.pytest_html_report = sb_config.pytest_html_report
self.report_on = False
if self.pytest_html_report:
self.report_on = True
self.use_grid = False
if self.servername != "localhost":
# Use Selenium Grid (Use --server="127.0.0.1" for a local Grid)
self.use_grid = True
if self.with_db_reporting:
import getpass
import uuid
from seleniumbase.core.application_manager import (
ApplicationManager,
)
from seleniumbase.core.testcase_manager import (
ExecutionQueryPayload,
)
from seleniumbase.core.testcase_manager import (
TestcaseDataPayload,
)
from seleniumbase.core.testcase_manager import TestcaseManager
self.execution_guid = str(uuid.uuid4())
self.testcase_guid = None
self.execution_start_time = 0
self.case_start_time = 0
self.application = None
self.testcase_manager = None
self.error_handled = False
self.testcase_manager = TestcaseManager(self.database_env)
#
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload)
#
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if self.with_selenium:
data_payload.browser = self.browser
else:
data_payload.browser = "N/A"
data_payload.test_address = test_id
application = ApplicationManager.generate_application_string(
self._testMethodName
)
data_payload.env = application.split(".")[0]
data_payload.start_time = application.split(".")[1]
data_payload.state = constants.State.UNTESTED
self.__skip_reason = None
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int(time.time() * 1000)
if self.headless or self.xvfb:
width = settings.HEADLESS_START_WIDTH
height = settings.HEADLESS_START_HEIGHT
try:
# from pyvirtualdisplay import Display # Skip for own lib
from sbvirtualdisplay import Display
self.display = Display(visible=0, size=(width, height))
self.display.start()
self.headless_active = True
except Exception:
# pyvirtualdisplay might not be necessary anymore because
# Chrome and Firefox now have built-in headless displays
pass
else:
# (Nosetests / Not Pytest)
pass # Setup performed in plugins
# Verify that SeleniumBase is installed successfully
if not hasattr(self, "browser"):
raise Exception(
'SeleniumBase plugins DID NOT load! * Please REINSTALL!\n'
'*** Either install SeleniumBase in Dev Mode from a clone:\n'
' >>> "pip install -e ." (Run in DIR with setup.py)\n'
'*** Or install the latest SeleniumBase version from PyPI:\n'
' >>> "pip install -U seleniumbase" (Run in any DIR)'
)
if not hasattr(sb_config, "_is_timeout_changed"):
# Should only be reachable from pure Python runs
sb_config._is_timeout_changed = False
sb_config._SMALL_TIMEOUT = settings.SMALL_TIMEOUT
sb_config._LARGE_TIMEOUT = settings.LARGE_TIMEOUT
if sb_config._is_timeout_changed:
if sb_config._SMALL_TIMEOUT and sb_config._LARGE_TIMEOUT:
settings.SMALL_TIMEOUT = sb_config._SMALL_TIMEOUT
settings.LARGE_TIMEOUT = sb_config._LARGE_TIMEOUT
if not hasattr(sb_config, "_recorded_actions"):
# Only filled when Recorder Mode is enabled
sb_config._recorded_actions = {}
if not hasattr(settings, "SWITCH_TO_NEW_TABS_ON_CLICK"):
# If using an older settings file, set the new definitions manually
settings.SWITCH_TO_NEW_TABS_ON_CLICK = True
# Parse the settings file
if self.settings_file:
from seleniumbase.core import settings_parser
settings_parser.set_settings(self.settings_file)
# Set variables that may be useful to developers
self.log_abspath = os.path.abspath(self.log_path)
self.data_path = os.path.join(self.log_path, self.__get_test_id())
self.data_abspath = os.path.abspath(self.data_path)
# Mobile Emulator device metrics: CSS Width, CSS Height, & Pixel-Ratio
if self.device_metrics:
metrics_string = self.device_metrics
metrics_string = metrics_string.replace(" ", "")
metrics_list = metrics_string.split(",")
exception_string = (
"Invalid input for Mobile Emulator device metrics!\n"
"Expecting a comma-separated string with three\n"
"integer values for Width, Height, and Pixel-Ratio.\n"
'Example: --metrics="411,731,3" '
)
if len(metrics_list) != 3:
raise Exception(exception_string)
try:
self.__device_width = int(metrics_list[0])
self.__device_height = int(metrics_list[1])
self.__device_pixel_ratio = int(metrics_list[2])
self.mobile_emulator = True
except Exception:
raise Exception(exception_string)
if self.mobile_emulator:
if not self.user_agent:
# Use the Pixel 4 user agent by default if not specified
self.user_agent = (
"Mozilla/5.0 (Linux; Android 11; Pixel 4 XL) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/89.0.4389.105 Mobile Safari/537.36"
)
if self.browser in ["firefox", "ie", "safari", "opera"]:
# The Recorder Mode browser extension is only for Chrome/Edge.
if self.recorder_mode:
message = (
"Recorder Mode ONLY supports Chrome and Edge!\n"
'(Your browser choice was: "%s")' % self.browser)
raise Exception(message)
# Dashboard pre-processing:
if self.dashboard:
if self._multithreaded:
with self.dash_lock:
sb_config._sbase_detected = True
sb_config._only_unittest = False
if not self._dash_initialized:
sb_config._dashboard_initialized = True
self._dash_initialized = True
self.__process_dashboard(False, init=True)
else:
sb_config._sbase_detected = True
sb_config._only_unittest = False
if not self._dash_initialized:
sb_config._dashboard_initialized = True
self._dash_initialized = True
self.__process_dashboard(False, init=True)
# Set the JS start time for Recorder Mode if reusing the session.
# Use this to skip saving recorded actions from previous tests.
if self.recorder_mode and self._reuse_session:
self.__js_start_time = int(time.time() * 1000.0)
has_url = False
if self._reuse_session:
if not hasattr(sb_config, "shared_driver"):
sb_config.shared_driver = None
if sb_config.shared_driver:
try:
self._default_driver = sb_config.shared_driver
self.driver = sb_config.shared_driver
self._drivers_list = [sb_config.shared_driver]
url = self.get_current_url()
if url is not None:
has_url = True
if len(self.driver.window_handles) > 1:
while len(self.driver.window_handles) > 1:
self.switch_to_window(
len(self.driver.window_handles) - 1
)
self.driver.close()
self.switch_to_window(0)
if self._crumbs:
self.driver.delete_all_cookies()
except Exception:
pass
if self._reuse_session and sb_config.shared_driver and has_url:
good_start_page = False
if self.recorder_ext:
self.__js_start_time = int(time.time() * 1000.0)
if self.start_page and len(self.start_page) >= 4:
if page_utils.is_valid_url(self.start_page):
good_start_page = True
self.__new_window_on_rec_open = False
self.open(self.start_page)
self.__new_window_on_rec_open = True
else:
new_start_page = "https://" + self.start_page
if page_utils.is_valid_url(new_start_page):
good_start_page = True
self.__dont_record_open = True
self.open(new_start_page)
self.__dont_record_open = False
if self.recorder_ext or (self._crumbs and not good_start_page):
if self.get_current_url() != "data:,":
self.__new_window_on_rec_open = False
self.open("data:,")
self.__new_window_on_rec_open = True
if self.recorder_ext:
self.__js_start_time = int(time.time() * 1000.0)
else:
# Launch WebDriver for both Pytest and Nosetests
self.driver = self.get_new_driver(
browser=self.browser,
headless=self.headless,
locale_code=self.locale_code,
protocol=self.protocol,
servername=self.servername,
port=self.port,
proxy=self.proxy_string,
agent=self.user_agent,
switch_to=True,
cap_file=self.cap_file,
cap_string=self.cap_string,
recorder_ext=self.recorder_ext,
disable_csp=self.disable_csp,
enable_ws=self.enable_ws,
enable_sync=self.enable_sync,
use_auto_ext=self.use_auto_ext,
no_sandbox=self.no_sandbox,
disable_gpu=self.disable_gpu,
incognito=self.incognito,
guest_mode=self.guest_mode,
devtools=self.devtools,
remote_debug=self.remote_debug,
swiftshader=self.swiftshader,
ad_block_on=self.ad_block_on,
block_images=self.block_images,
chromium_arg=self.chromium_arg,
firefox_arg=self.firefox_arg,
firefox_pref=self.firefox_pref,
user_data_dir=self.user_data_dir,
extension_zip=self.extension_zip,
extension_dir=self.extension_dir,
is_mobile=self.mobile_emulator,
d_width=self.__device_width,
d_height=self.__device_height,
d_p_r=self.__device_pixel_ratio,
)
self._default_driver = self.driver
if self._reuse_session:
sb_config.shared_driver = self.driver
if self.browser in ["firefox", "ie", "safari", "opera"]:
# Only Chrome and Edge browsers have the mobile emulator.
# Some actions such as hover-clicking are different on mobile.
self.mobile_emulator = False
# Configure the test time limit (if used).
self.set_time_limit(self.time_limit)
# Set the start time for the test (in ms).
# Although the pytest clock starts before setUp() begins,
# the time-limit clock starts at the end of the setUp() method.
sb_config.start_time_ms = int(time.time() * 1000.0)
if not self.__start_time_ms:
# Call this once in case of multiple setUp() calls in the same test
self.__start_time_ms = sb_config.start_time_ms
def __set_last_page_screenshot(self):
"""self.__last_page_screenshot is only for pytest html report logs.
self.__last_page_screenshot_png is for all screenshot log files."""
if not self.__last_page_screenshot and (
not self.__last_page_screenshot_png
):
try:
element = self.driver.find_element(
by=By.TAG_NAME, value="body"
)
if self.is_pytest and self.report_on:
self.__last_page_screenshot_png = (
self.driver.get_screenshot_as_png()
)
self.__last_page_screenshot = element.screenshot_as_base64
else:
self.__last_page_screenshot_png = element.screenshot_as_png
except Exception:
if not self.__last_page_screenshot:
if self.is_pytest and self.report_on:
try:
self.__last_page_screenshot = (
self.driver.get_screenshot_as_base64()
)
except Exception:
self.__last_page_screenshot = (
constants.Warnings.SCREENSHOT_UNDEFINED
)
if not self.__last_page_screenshot_png:
try:
self.__last_page_screenshot_png = (
self.driver.get_screenshot_as_png()
)
except Exception:
self.__last_page_screenshot_png = (
constants.Warnings.SCREENSHOT_UNDEFINED
)
def __set_last_page_url(self):
if not self.__last_page_url:
try:
self.__last_page_url = log_helper.get_last_page(self.driver)
except Exception:
self.__last_page_url = None
def __set_last_page_source(self):
if not self.__last_page_source:
try:
self.__last_page_source = (
log_helper.get_html_source_with_base_href(
self.driver, self.driver.page_source
)
)
except Exception:
self.__last_page_source = (
constants.Warnings.PAGE_SOURCE_UNDEFINED
)
def __get_exception_info(self):
exc_message = None
if (
sys.version_info[0] >= 3
and hasattr(self, "_outcome")
and (hasattr(self._outcome, "errors") and self._outcome.errors)
):
try:
exc_message = self._outcome.errors[0][1][1]
except Exception:
exc_message = "(Unknown Exception)"
else:
try:
exc_message = sys.last_value
except Exception:
exc_message = "(Unknown Exception)"
return str(exc_message)
def __insert_test_result(self, state, err):
from seleniumbase.core.testcase_manager import TestcaseDataPayload
data_payload = TestcaseDataPayload()
data_payload.runtime = int(time.time() * 1000) - self.case_start_time
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
data_payload.state = state
if err:
import traceback
tb_string = traceback.format_exc()
if "Message: " in tb_string:
data_payload.message = (
"Message: " + tb_string.split("Message: ")[-1]
)
elif "Exception: " in tb_string:
data_payload.message = tb_string.split("Exception: ")[-1]
elif "Error: " in tb_string:
data_payload.message = tb_string.split("Error: ")[-1]
else:
data_payload.message = self.__get_exception_info()
else:
test_id = self.__get_test_id_2()
if (
self.is_pytest
and test_id in sb_config._results.keys()
and (sb_config._results[test_id] == "Skipped")
):
if self.__skip_reason:
data_payload.message = "Skipped: " + self.__skip_reason
else:
data_payload.message = "Skipped: (no reason given)"
self.testcase_manager.update_testcase_data(data_payload)
def __add_pytest_html_extra(self):
if not self.__added_pytest_html_extra:
try:
if self.with_selenium:
if not self.__last_page_screenshot:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
if self.report_on:
extra_url = {}
extra_url["name"] = "URL"
extra_url["format"] = "url"
extra_url["content"] = self.get_current_url()
extra_url["mime_type"] = None
extra_url["extension"] = None
extra_image = {}
extra_image["name"] = "Screenshot"
extra_image["format"] = "image"
extra_image["content"] = self.__last_page_screenshot
extra_image["mime_type"] = "image/png"
extra_image["extension"] = "png"
self.__added_pytest_html_extra = True
if self.__last_page_screenshot != (
constants.Warnings.SCREENSHOT_UNDEFINED
):
self._html_report_extra.append(extra_url)
self._html_report_extra.append(extra_image)
except Exception:
pass
def __quit_all_drivers(self):
if self._reuse_session and sb_config.shared_driver:
if len(self._drivers_list) > 0:
if self._drivers_list[0] != sb_config.shared_driver:
if sb_config.shared_driver in self._drivers_list:
self._drivers_list.remove(sb_config.shared_driver)
self._drivers_list.insert(0, sb_config.shared_driver)
self._default_driver = self._drivers_list[0]
self.switch_to_default_driver()
if len(self._drivers_list) > 1:
self._drivers_list = self._drivers_list[1:]
else:
self._drivers_list = []
# Close all open browser windows
self._drivers_list.reverse() # Last In, First Out
for driver in self._drivers_list:
try:
driver.quit()
except AttributeError:
pass
except Exception:
pass
self.driver = None
self._default_driver = None
self._drivers_list = []
def __has_exception(self):
has_exception = False
if hasattr(sys, "last_traceback") and sys.last_traceback is not None:
has_exception = True
elif sys.version_info[0] >= 3 and hasattr(self, "_outcome"):
if hasattr(self._outcome, "errors") and self._outcome.errors:
has_exception = True
else:
if sys.version_info[0] >= 3:
has_exception = sys.exc_info()[1] is not None
else:
if not hasattr(self, "_using_sb_fixture_class") and (
not hasattr(self, "_using_sb_fixture_no_class")
):
has_exception = sys.exc_info()[1] is not None
else:
has_exception = len(str(sys.exc_info()[1]).strip()) > 0
if hasattr(self, "_using_sb_fixture") and self.__will_be_skipped:
has_exception = False
return has_exception
def __get_test_id(self):
""" The id used in various places such as the test log path. """
test_id = "%s.%s.%s" % (
self.__class__.__module__,
self.__class__.__name__,
self._testMethodName,
)
if self._sb_test_identifier and len(str(self._sb_test_identifier)) > 6:
test_id = self._sb_test_identifier
return test_id
def __get_test_id_2(self):
""" The id for SeleniumBase Dashboard entries. """
if "PYTEST_CURRENT_TEST" in os.environ:
return os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
test_id = "%s.%s.%s" % (
self.__class__.__module__.split(".")[-1],
self.__class__.__name__,
self._testMethodName,
)
if self._sb_test_identifier and len(str(self._sb_test_identifier)) > 6:
test_id = self._sb_test_identifier
if test_id.count(".") > 1:
test_id = ".".join(test_id.split(".")[1:])
return test_id
def __get_display_id(self):
""" The id for running a test from pytest. (Displayed on Dashboard) """
if "PYTEST_CURRENT_TEST" in os.environ:
return os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
test_id = "%s.py::%s::%s" % (
self.__class__.__module__.replace(".", "/"),
self.__class__.__name__,
self._testMethodName,
)
if self._sb_test_identifier and len(str(self._sb_test_identifier)) > 6:
test_id = self._sb_test_identifier
if hasattr(self, "_using_sb_fixture_class"):
if test_id.count(".") >= 2:
parts = test_id.split(".")
full = parts[-3] + ".py::" + parts[-2] + "::" + parts[-1]
test_id = full
elif hasattr(self, "_using_sb_fixture_no_class"):
if test_id.count(".") >= 1:
parts = test_id.split(".")
full = parts[-2] + ".py::" + parts[-1]
test_id = full
return test_id
def __get_filename(self):
""" The filename of the current SeleniumBase test. (NOT Path) """
filename = None
if "PYTEST_CURRENT_TEST" in os.environ:
test_id = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
filename = test_id.split("::")[0].split("/")[-1]
else:
filename = self.__class__.__module__.split(".")[-1] + ".py"
return filename
def __create_log_path_as_needed(self, test_logpath):
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath)
except Exception:
pass # Only reachable during multi-threaded runs
def __process_dashboard(self, has_exception, init=False):
""" SeleniumBase Dashboard Processing """
if self._multithreaded:
existing_res = sb_config._results # For recording "Skipped" tests
abs_path = os.path.abspath(".")
dash_json_loc = constants.Dashboard.DASH_JSON
dash_jsonpath = os.path.join(abs_path, dash_json_loc)
if not init and os.path.exists(dash_jsonpath):
with open(dash_jsonpath, "r") as f:
dash_json = f.read().strip()
dash_data, d_id, dash_rt, tlp, d_stats = json.loads(dash_json)
num_passed, num_failed, num_skipped, num_untested = d_stats
sb_config._results = dash_data
sb_config._display_id = d_id
sb_config._duration = dash_rt # Dashboard Run Time
sb_config._d_t_log_path = tlp # Test Log Path
sb_config.item_count_passed = num_passed
sb_config.item_count_failed = num_failed
sb_config.item_count_skipped = num_skipped
sb_config.item_count_untested = num_untested
if len(sb_config._extra_dash_entries) > 0:
# First take care of existing entries from non-SeleniumBase tests
for test_id in sb_config._extra_dash_entries:
if test_id in sb_config._results.keys():
if sb_config._results[test_id] == "Skipped":
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
elif sb_config._results[test_id] == "Failed":
sb_config.item_count_failed += 1
sb_config.item_count_untested -= 1
elif sb_config._results[test_id] == "Passed":
sb_config.item_count_passed += 1
sb_config.item_count_untested -= 1
else: # Mark "Skipped" if unknown
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
sb_config._extra_dash_entries = [] # Reset the list to empty
# Process new entries
log_dir = self.log_path
ft_id = self.__get_test_id() # Full test id with path to log files
test_id = self.__get_test_id_2() # The test id used by the DashBoard
dud = "seleniumbase/plugins/pytest_plugin.py::BaseClass::base_method"
dud2 = "pytest_plugin.BaseClass.base_method"
if hasattr(self, "_using_sb_fixture") and self.__will_be_skipped:
test_id = sb_config._test_id
if not init:
duration_ms = int(time.time() * 1000) - self.__start_time_ms
duration = float(duration_ms) / 1000.0
duration = "{:.2f}".format(duration)
sb_config._duration[test_id] = duration
if (
has_exception
or self.save_screenshot_after_test
or self.__screenshot_count > 0
or self.__will_be_skipped
):
sb_config._d_t_log_path[test_id] = os.path.join(log_dir, ft_id)
else:
sb_config._d_t_log_path[test_id] = None
if test_id not in sb_config._display_id.keys():
sb_config._display_id[test_id] = self.__get_display_id()
if sb_config._display_id[test_id] == dud:
return
if (
hasattr(self, "_using_sb_fixture")
and test_id not in sb_config._results.keys()
):
if test_id.count(".") > 1:
alt_test_id = ".".join(test_id.split(".")[1:])
if alt_test_id in sb_config._results.keys():
sb_config._results.pop(alt_test_id)
elif test_id.count(".") == 1:
alt_test_id = sb_config._display_id[test_id]
alt_test_id = alt_test_id.replace(".py::", ".")
alt_test_id = alt_test_id.replace("::", ".")
if alt_test_id in sb_config._results.keys():
sb_config._results.pop(alt_test_id)
if test_id in sb_config._results.keys() and (
sb_config._results[test_id] == "Skipped"
):
if self.__passed_then_skipped:
# Multiple calls of setUp() and tearDown() in the same test
sb_config.item_count_passed -= 1
sb_config.item_count_untested += 1
self.__passed_then_skipped = False
sb_config._results[test_id] = "Skipped"
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
elif (
self._multithreaded
and test_id in existing_res.keys()
and existing_res[test_id] == "Skipped"
):
sb_config._results[test_id] = "Skipped"
sb_config.item_count_skipped += 1
sb_config.item_count_untested -= 1
elif has_exception:
if test_id not in sb_config._results.keys():
sb_config._results[test_id] = "Failed"
sb_config.item_count_failed += 1
sb_config.item_count_untested -= 1
elif not sb_config._results[test_id] == "Failed":
# tearDown() was called more than once in the test
if sb_config._results[test_id] == "Passed":
# Passed earlier, but last run failed
sb_config._results[test_id] = "Failed"
sb_config.item_count_failed += 1
sb_config.item_count_passed -= 1
else:
sb_config._results[test_id] = "Failed"
sb_config.item_count_failed += 1
sb_config.item_count_untested -= 1
else:
# pytest-rerunfailures caused a duplicate failure
sb_config._results[test_id] = "Failed"
else:
if (
test_id in sb_config._results.keys()
and sb_config._results[test_id] == "Failed"
):
# pytest-rerunfailures reran a test that failed
sb_config._d_t_log_path[test_id] = os.path.join(
log_dir, ft_id
)
sb_config.item_count_failed -= 1
sb_config.item_count_untested += 1
elif (
test_id in sb_config._results.keys()
and sb_config._results[test_id] == "Passed"
):
# tearDown() was called more than once in the test
sb_config.item_count_passed -= 1
sb_config.item_count_untested += 1
sb_config._results[test_id] = "Passed"
sb_config.item_count_passed += 1
sb_config.item_count_untested -= 1
else:
pass # Only initialize the Dashboard on the first processing
num_passed = sb_config.item_count_passed
num_failed = sb_config.item_count_failed
num_skipped = sb_config.item_count_skipped
num_untested = sb_config.item_count_untested
self.create_pie_chart(title=constants.Dashboard.TITLE)
self.add_data_point("Passed", num_passed, color="#84d474")
self.add_data_point("Untested", num_untested, color="#eaeaea")
self.add_data_point("Skipped", num_skipped, color="#efd8b4")
self.add_data_point("Failed", num_failed, color="#f17476")
style = (
'<link rel="stylesheet" charset="utf-8" '
'href="%s">' % constants.Dashboard.STYLE_CSS
)
auto_refresh_html = ""
if num_untested > 0:
# Refresh every X seconds when waiting for more test results
auto_refresh_html = constants.Dashboard.META_REFRESH_HTML
else:
# The tests are complete
if sb_config._using_html_report:
# Add the pie chart to the pytest html report
sb_config._saved_dashboard_pie = self.extract_chart()
if self._multithreaded:
abs_path = os.path.abspath(".")
dash_pie = json.dumps(sb_config._saved_dashboard_pie)
dash_pie_loc = constants.Dashboard.DASH_PIE
pie_path = os.path.join(abs_path, dash_pie_loc)
pie_file = codecs.open(pie_path, "w+", encoding="utf-8")
pie_file.writelines(dash_pie)
pie_file.close()
head = (
'<head><meta charset="utf-8">'
'<meta name="viewport" content="shrink-to-fit=no">'
'<link rel="shortcut icon" href="%s">'
"%s"
"<title>Dashboard</title>"
"%s</head>"
% (constants.Dashboard.DASH_PIE_PNG_1, auto_refresh_html, style)
)
table_html = (
"<div></div>"
'<table border="1px solid #e6e6e6;" width="100%;" padding: 5px;'
' font-size="12px;" text-align="left;" id="results-table">'
'<thead id="results-table-head">'
'<tr style="background-color: #F7F7FD;">'
'<th col="result">Result</th><th col="name">Test</th>'
'<th col="duration">Duration</th><th col="links">Links</th>'
"</tr></thead>"
)
the_failed = []
the_skipped = []
the_passed_hl = [] # Passed and has logs
the_passed_nl = [] # Passed and no logs
the_untested = []
if dud2 in sb_config._results.keys():
sb_config._results.pop(dud2)
for key in sb_config._results.keys():
t_res = sb_config._results[key]
t_dur = sb_config._duration[key]
t_d_id = sb_config._display_id[key]
t_l_path = sb_config._d_t_log_path[key]
res_low = t_res.lower()
if sb_config._results[key] == "Failed":
if not sb_config._d_t_log_path[key]:
sb_config._d_t_log_path[key] = os.path.join(log_dir, ft_id)
the_failed.append([res_low, t_res, t_d_id, t_dur, t_l_path])
elif sb_config._results[key] == "Skipped":
the_skipped.append([res_low, t_res, t_d_id, t_dur, t_l_path])
elif sb_config._results[key] == "Passed" and t_l_path:
the_passed_hl.append([res_low, t_res, t_d_id, t_dur, t_l_path])
elif sb_config._results[key] == "Passed" and not t_l_path:
the_passed_nl.append([res_low, t_res, t_d_id, t_dur, t_l_path])
elif sb_config._results[key] == "Untested":
the_untested.append([res_low, t_res, t_d_id, t_dur, t_l_path])
for row in the_failed:
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #FFF8F8;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td><a href="%s">Logs</a> / <a href="%s/">Data</a>'
"</td></tr></tbody>"
"" % (row[0], row[1], row[2], row[3], log_dir, row[4])
)
table_html += row
for row in the_skipped:
if not row[4]:
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #FEFEF9;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
"<td>-</td></tr></tbody>"
% (row[0], row[1], row[2], row[3])
)
else:
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #FEFEF9;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td><a href="%s">Logs</a> / <a href="%s/">Data</a>'
"</td></tr></tbody>"
"" % (row[0], row[1], row[2], row[3], log_dir, row[4])
)
table_html += row
for row in the_passed_hl:
# Passed and has logs
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #F8FFF8;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
'<td><a href="%s">Logs</a> / <a href="%s/">Data</a>'
"</td></tr></tbody>"
"" % (row[0], row[1], row[2], row[3], log_dir, row[4])
)
table_html += row
for row in the_passed_nl:
# Passed and no logs
row = (
'<tbody class="%s results-table-row">'
'<tr style="background-color: #F8FFF8;">'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
"<td>-</td></tr></tbody>" % (row[0], row[1], row[2], row[3])
)
table_html += row
for row in the_untested:
row = (
'<tbody class="%s results-table-row"><tr>'
'<td class="col-result">%s</td><td>%s</td><td>%s</td>'
"<td>-</td></tr></tbody>" % (row[0], row[1], row[2], row[3])
)
table_html += row
table_html += "</table>"
add_more = "<br /><b>Last updated:</b> "
timestamp, the_date, the_time = log_helper.get_master_time()
last_updated = "%s at %s" % (the_date, the_time)
add_more = add_more + "%s" % last_updated
status = "<p></p><div><b>Status:</b> Awaiting results..."
status += " (Refresh the page for updates)"
if num_untested == 0:
status = "<p></p><div><b>Status:</b> Test Run Complete:"
if num_failed == 0:
if num_passed > 0:
if num_skipped == 0:
status += " <b>Success!</b> (All tests passed)"
else:
status += " <b>Success!</b> (No failing tests)"
else:
status += " All tests were skipped!"
else:
latest_logs_dir = "latest_logs/"
log_msg = "See latest logs for details"
if num_failed == 1:
status += (
" <b>1 test failed!</b> --- "
'(<b><a href="%s">%s</a></b>)'
"" % (latest_logs_dir, log_msg)
)
else:
status += (
" <b>%s tests failed!</b> --- "
'(<b><a href="%s">%s</a></b>)'
"" % (num_failed, latest_logs_dir, log_msg)
)
status += "</div><p></p>"
add_more = add_more + status
gen_by = (
'<p><div>Generated by: <b><a href="https://seleniumbase.io/">'
"SeleniumBase</a></b></div></p><p></p>"
)
add_more = add_more + gen_by
# Have dashboard auto-refresh on updates when using an http server
refresh_line = (
'<script type="text/javascript" src="%s">'
"</script>" % constants.Dashboard.LIVE_JS
)
if num_untested == 0 and sb_config._using_html_report:
sb_config._dash_final_summary = status
add_more = add_more + refresh_line
the_html = (
'<html lang="en">'
+ head
+ self.extract_chart()
+ table_html
+ add_more
)
abs_path = os.path.abspath(".")
file_path = os.path.join(abs_path, "dashboard.html")
out_file = codecs.open(file_path, "w+", encoding="utf-8")
out_file.writelines(the_html)
out_file.close()
sb_config._dash_html = the_html
if self._multithreaded:
d_stats = (num_passed, num_failed, num_skipped, num_untested)
_results = sb_config._results
_display_id = sb_config._display_id
_rt = sb_config._duration # Run Time (RT)
_tlp = sb_config._d_t_log_path # Test Log Path (TLP)
dash_json = json.dumps((_results, _display_id, _rt, _tlp, d_stats))
dash_json_loc = constants.Dashboard.DASH_JSON
dash_jsonpath = os.path.join(abs_path, dash_json_loc)
dash_json_file = codecs.open(dash_jsonpath, "w+", encoding="utf-8")
dash_json_file.writelines(dash_json)
dash_json_file.close()
def has_exception(self):
"""(This method should ONLY be used in custom tearDown() methods.)
This method returns True if the test failed or raised an exception.
This is useful for performing additional steps in your tearDown()
method (based on whether or not the test passed or failed).
Example use cases:
* Performing cleanup steps if a test didn't complete.
* Sending test data and/or results to a dashboard service.
"""
return self.__has_exception()
def save_teardown_screenshot(self):
"""(Should ONLY be used at the start of custom tearDown() methods.)
This method takes a screenshot of the current web page for a
failing test (or when running your tests with --save-screenshot).
That way your tearDown() method can navigate away from the last
page where the test failed, and still get the correct screenshot
before performing tearDown() steps on other pages. If this method
is not included in your custom tearDown() method, a screenshot
will still be taken after the last step of your tearDown(), where
you should be calling "super(SubClassOfBaseCase, self).tearDown()"
"""
try:
self.__check_scope()
except Exception:
return
if self.__has_exception() or self.save_screenshot_after_test:
test_id = self.__get_test_id()
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
if self.is_pytest:
self.__add_pytest_html_extra()
def tearDown(self):
"""
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass's tearDown():
super(SubClassOfBaseCase, self).tearDown()
"""
if not hasattr(self, "_using_sb_fixture") and self.__called_teardown:
# This test already called tearDown()
return
if self.recorder_mode:
self.__process_recorded_actions()
self.__called_teardown = True
self.__called_setup = False
try:
is_pytest = self.is_pytest # This fails if overriding setUp()
if is_pytest:
with_selenium = self.with_selenium
except Exception:
sub_class_name = (
str(self.__class__.__bases__[0]).split(".")[-1].split("'")[0]
)
sub_file_name = str(self.__class__.__bases__[0]).split(".")[-2]
sub_file_name = sub_file_name + ".py"
class_name = str(self.__class__).split(".")[-1].split("'")[0]
file_name = str(self.__class__).split(".")[-2] + ".py"
class_name_used = sub_class_name
file_name_used = sub_file_name
if sub_class_name == "BaseCase":
class_name_used = class_name
file_name_used = file_name
fix_setup = "super(%s, self).setUp()" % class_name_used
fix_teardown = "super(%s, self).tearDown()" % class_name_used
message = (
"You're overriding SeleniumBase's BaseCase setUp() "
"method with your own setUp() method, which breaks "
"SeleniumBase. You can fix this by going to your "
"%s class located in your %s file and adding the "
"following line of code AT THE BEGINNING of your "
"setUp() method:\n%s\n\nAlso make sure "
"you have added the following line of code AT THE "
"END of your tearDown() method:\n%s\n"
% (class_name_used, file_name_used, fix_setup, fix_teardown)
)
raise Exception(message)
# *** Start tearDown() officially ***
self.__slow_mode_pause_if_active()
has_exception = self.__has_exception()
if self.__overrided_default_timeouts:
# Reset default timeouts in case there are more tests
# These were changed in set_default_timeout()
if sb_config._SMALL_TIMEOUT and sb_config._LARGE_TIMEOUT:
settings.SMALL_TIMEOUT = sb_config._SMALL_TIMEOUT
settings.LARGE_TIMEOUT = sb_config._LARGE_TIMEOUT
sb_config._is_timeout_changed = False
self.__overrided_default_timeouts = False
if self.__deferred_assert_failures:
print(
"\nWhen using self.deferred_assert_*() methods in your tests, "
"remember to call self.process_deferred_asserts() afterwards. "
"Now calling in tearDown()...\nFailures Detected:"
)
if not has_exception:
self.process_deferred_asserts()
else:
self.process_deferred_asserts(print_only=True)
if self.is_pytest:
# pytest-specific code
test_id = self.__get_test_id()
if with_selenium:
# Save a screenshot if logging is on when an exception occurs
if has_exception:
self.__add_pytest_html_extra()
sb_config._has_exception = True
if (
self.with_testing_base
and not has_exception
and self.save_screenshot_after_test
):
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png,
)
self.__add_pytest_html_extra()
if self.with_testing_base and has_exception:
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
if (
not self.with_screen_shots
and not self.with_basic_test_info
and not self.with_page_source
):
# Log everything if nothing specified (if testing_base)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png,
)
log_helper.log_test_failure_data(
self,
test_logpath,
self.driver,
self.browser,
self.__last_page_url,
)
log_helper.log_page_source(
test_logpath, self.driver, self.__last_page_source
)
else:
if self.with_screen_shots:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png,
)
if self.with_basic_test_info:
log_helper.log_test_failure_data(
self,
test_logpath,
self.driver,
self.browser,
self.__last_page_url,
)
if self.with_page_source:
log_helper.log_page_source(
test_logpath,
self.driver,
self.__last_page_source,
)
if self.dashboard:
if self._multithreaded:
with self.dash_lock:
self.__process_dashboard(has_exception)
else:
self.__process_dashboard(has_exception)
# (Pytest) Finally close all open browser windows
self.__quit_all_drivers()
if self.headless or self.xvfb:
if self.headless_active:
try:
self.display.stop()
except AttributeError:
pass
except Exception:
pass
self.display = None
if self.with_db_reporting:
if has_exception:
self.__insert_test_result(constants.State.FAILED, True)
else:
test_id = self.__get_test_id_2()
if test_id in sb_config._results.keys() and (
sb_config._results[test_id] == "Skipped"
):
self.__insert_test_result(
constants.State.SKIPPED, False
)
else:
self.__insert_test_result(
constants.State.PASSED, False
)
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(
self.execution_guid, runtime
)
if self.with_s3_logging and has_exception:
""" If enabled, upload logs to S3 during test exceptions. """
import uuid
from seleniumbase.core.s3_manager import S3LoggingBucket
s3_bucket = S3LoggingBucket()
guid = str(uuid.uuid4().hex)
path = "%s/%s" % (self.log_path, test_id)
uploaded_files = []
for logfile in os.listdir(path):
logfile_name = "%s/%s/%s" % (
guid,
test_id,
logfile.split(path)[-1],
)
s3_bucket.upload_file(
logfile_name, "%s/%s" % (path, logfile)
)
uploaded_files.append(logfile_name)
s3_bucket.save_uploaded_file_names(uploaded_files)
index_file = s3_bucket.upload_index_file(test_id, guid)
print("\n\n*** Log files uploaded: ***\n%s\n" % index_file)
logging.info(
"\n\n*** Log files uploaded: ***\n%s\n" % index_file
)
if self.with_db_reporting:
from seleniumbase.core.testcase_manager import (
TestcaseDataPayload,
)
from seleniumbase.core.testcase_manager import (
TestcaseManager,
)
self.testcase_manager = TestcaseManager(self.database_env)
data_payload = TestcaseDataPayload()
data_payload.guid = self.testcase_guid
data_payload.logURL = index_file
self.testcase_manager.update_testcase_log_url(data_payload)
else:
# (Nosetests)
if has_exception:
test_id = self.__get_test_id()
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
log_helper.log_test_failure_data(
self,
test_logpath,
self.driver,
self.browser,
self.__last_page_url,
)
if len(self._drivers_list) > 0:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png,
)
log_helper.log_page_source(
test_logpath, self.driver, self.__last_page_source
)
elif self.save_screenshot_after_test:
test_id = self.__get_test_id()
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath, self.driver, self.__last_page_screenshot_png
)
if self.report_on:
self._last_page_screenshot = self.__last_page_screenshot_png
try:
self._last_page_url = self.get_current_url()
except Exception:
self._last_page_url = "(Error: Unknown URL)"
# Finally close all open browser windows
self.__quit_all_drivers()
| 42.505697 | 79 | 0.562692 |
a241b3b62ae171f83d428f85cc6d0ebc7acfc555 | 474 | py | Python | revolt/errors.py | MutedByte/revolt.py | be2af590af458e926dd6364f6fc4a16008840e47 | [
"MIT"
] | 28 | 2021-08-17T23:25:48.000Z | 2021-12-16T00:13:05.000Z | revolt/errors.py | MutedByte/revolt.py | be2af590af458e926dd6364f6fc4a16008840e47 | [
"MIT"
] | 14 | 2021-08-31T06:00:12.000Z | 2021-12-28T22:59:08.000Z | revolt/errors.py | MutedByte/revolt.py | be2af590af458e926dd6364f6fc4a16008840e47 | [
"MIT"
] | 12 | 2021-08-18T00:55:53.000Z | 2021-11-20T16:58:18.000Z | __all__ = (
"RevoltError",
"HTTPError",
"ServerError",
"FeatureDisabled",
"AutumnDisabled",
)
class RevoltError(Exception):
"Base exception for revolt"
class HTTPError(RevoltError):
"Base exception for http errors"
class ServerError(RevoltError):
"Internal server error"
class FeatureDisabled(RevoltError):
"""Base class for any any feature disabled"""
class AutumnDisabled(FeatureDisabled):
"""The autumn feature is disabled"""
| 20.608696 | 49 | 0.706751 |
2faf28ac5183c73554cd5b7a991dbc47c54f0338 | 383 | py | Python | app/migrations/0004_auto_20210714_1444.py | peterken674/instagram-clone | 950ff7902d8fd29ea9f94bf17b1ae0d69e0b2614 | [
"MIT"
] | null | null | null | app/migrations/0004_auto_20210714_1444.py | peterken674/instagram-clone | 950ff7902d8fd29ea9f94bf17b1ae0d69e0b2614 | [
"MIT"
] | null | null | null | app/migrations/0004_auto_20210714_1444.py | peterken674/instagram-clone | 950ff7902d8fd29ea9f94bf17b1ae0d69e0b2614 | [
"MIT"
] | 1 | 2022-03-07T13:04:35.000Z | 2022-03-07T13:04:35.000Z | # Generated by Django 3.0 on 2021-07-14 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20210714_1436'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(upload_to='posts/'),
),
]
| 20.157895 | 56 | 0.590078 |
03b03a537ca78a666a3633ded71c8d2514238fff | 1,884 | py | Python | lib/app/reportdatasources/report_objectdetaillinks.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | lib/app/reportdatasources/report_objectdetaillinks.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | lib/app/reportdatasources/report_objectdetaillinks.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# ReportObjectDetailLinks datasource
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from pymongo import ReadPreference
# NOC modules
from .base import BaseReportColumn
from noc.core.mongo.connection import get_db
class ReportObjectDetailLinks(BaseReportColumn):
"""Report for MO links detail"""
name = "objectdetaillink"
builtin_sorted = True
unknown_value = ([],)
ATTRS = ["Links"]
def extract(self):
match = {"int.managed_object": {"$in": self.sync_ids}}
group = {
"_id": "$int.managed_object",
"links": {
"$push": {
"link": "$_id",
"iface_n": "$int.name",
"iface_id": "$int._id",
"mo": "$int.managed_object",
}
},
}
value = (
get_db()["noc.links"]
.with_options(read_preference=ReadPreference.SECONDARY_PREFERRED)
.aggregate(
[
{"$unwind": "$interfaces"},
{
"$lookup": {
"from": "noc.interfaces",
"localField": "interfaces",
"foreignField": "_id",
"as": "int",
}
},
{"$match": match},
{"$group": group},
{"$sort": {"_id": 1}},
],
allowDiskUse=True,
)
)
for val in value:
yield val["_id"][0], val["links"]
| 31.4 | 77 | 0.381635 |
80f995a8b87b3a58afd9f02109f0a44679396b54 | 374 | py | Python | profile/generate-singly-bbn.py | gitter-badger/py-bbn | badb5073bb1640e376f6d9db871e91365fd5d20f | [
"Apache-2.0"
] | 48 | 2017-03-21T21:13:39.000Z | 2022-03-11T08:13:01.000Z | profile/generate-singly-bbn.py | SourangshuGhosh/py-bbn | f1e296ea4bb9d1392ec585fbafe34011ea2f85fd | [
"Apache-2.0"
] | 7 | 2018-07-18T16:01:15.000Z | 2022-02-10T14:42:33.000Z | profile/generate-singly-bbn.py | SourangshuGhosh/py-bbn | f1e296ea4bb9d1392ec585fbafe34011ea2f85fd | [
"Apache-2.0"
] | 14 | 2017-09-19T18:34:08.000Z | 2022-03-04T11:52:51.000Z | import json
import numpy as np
from pybbn.generator.bbngenerator import generate_singly_bbn, convert_for_exact_inference
from pybbn.graph.dag import Bbn
np.random.seed(37)
g, p = generate_singly_bbn(900, max_iter=10)
s_bbn = convert_for_exact_inference(g, p)
with open('singly-bbn.json', 'w') as f:
f.write(json.dumps(Bbn.to_dict(s_bbn), sort_keys=True, indent=2))
| 24.933333 | 89 | 0.775401 |
67c73c85324abb5087b68a0a9d15b9d78f35e7fe | 159 | py | Python | 06_Django_Basico/blog/views.py | Joao-Inacio/Curso-de-Python3 | 179d85f43f77dced640ffb143a87214538254cf3 | [
"MIT"
] | 1 | 2021-07-19T12:31:49.000Z | 2021-07-19T12:31:49.000Z | 06_Django_Basico/blog/views.py | Joao-Inacio/Curso-de-Python3 | 179d85f43f77dced640ffb143a87214538254cf3 | [
"MIT"
] | null | null | null | 06_Django_Basico/blog/views.py | Joao-Inacio/Curso-de-Python3 | 179d85f43f77dced640ffb143a87214538254cf3 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse('Olá mundo')
| 17.666667 | 36 | 0.773585 |
3411a962236bde964d5306c9112f576e46b17d73 | 1,278 | py | Python | bustime/wsocket_cmd.py | norn/bustime | 5817951ce6b569589182784c890e064e4713ded7 | [
"MIT"
] | 52 | 2015-12-22T06:39:34.000Z | 2022-01-15T11:41:08.000Z | bustime/wsocket_cmd.py | oostap/bustime | 5817951ce6b569589182784c890e064e4713ded7 | [
"MIT"
] | null | null | null | bustime/wsocket_cmd.py | oostap/bustime | 5817951ce6b569589182784c890e064e4713ded7 | [
"MIT"
] | 14 | 2015-10-06T03:52:46.000Z | 2021-06-30T06:25:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from autobahn.asyncio import wamp, websocket
from autobahn.wamp import types
try:
import asyncio
except ImportError:
import trollius as asyncio
def magic_box(sproto, extra):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
component_config = types.ComponentConfig(realm="realm1", extra=extra)
session_factory = wamp.ApplicationSessionFactory(config=component_config)
session_factory.session = sproto
transport_factory = websocket.WampWebSocketClientFactory(session_factory)
coro = loop.create_connection(transport_factory, '127.0.0.1', 9002)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
return
class CommandProtocol(wamp.ApplicationSession):
@asyncio.coroutine
def onJoin(self, details):
us_id, cmd, params = self.config.extra
serialized = {"us_cmd": cmd, "params":params}
channel = "ru.bustime.us__%s" % us_id
if us_id == "public":
channel = "ru.bustime.public"
self.publish(channel, serialized)
self.disconnect()
def onDisconnect(self):
asyncio.get_event_loop().stop()
def wsocket_cmd(us_id, cmd, params):
magic_box(CommandProtocol, [us_id, cmd, params])
| 28.4 | 77 | 0.701095 |
b38d447be23e79b6f9ab3cab28d873c2464e24d1 | 577 | py | Python | 001.Two Sum/solution.py | lovexln001/LeetCode | 5ca0e743a4f10ac5f6cdf3314f6c81d356181f81 | [
"MIT"
] | null | null | null | 001.Two Sum/solution.py | lovexln001/LeetCode | 5ca0e743a4f10ac5f6cdf3314f6c81d356181f81 | [
"MIT"
] | null | null | null | 001.Two Sum/solution.py | lovexln001/LeetCode | 5ca0e743a4f10ac5f6cdf3314f6c81d356181f81 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d = {}
for index, num in enumerate(nums):
if num in d:
return [d[num], index]
d[target - num] = index
if __name__ == "__main__":
s = Solution()
nums = [2, 7, 11, 13]
target = 13
assert (s.twoSum(nums, target) == [0, 2])
nums = [3, 2, 8]
target = 11
assert (s.twoSum(nums, target) == [0, 2])
| 21.37037 | 45 | 0.481802 |
fab69b8191cdc6642dc09e3cd37bbbbca1eb6def | 2,790 | py | Python | Software/Estadística/MCMC/Paper/graficar_todo.py | matiasleize/tesis_licenciatura | 5df6e341314583702b466b8ed7977d410f0ee457 | [
"MIT"
] | null | null | null | Software/Estadística/MCMC/Paper/graficar_todo.py | matiasleize/tesis_licenciatura | 5df6e341314583702b466b8ed7977d410f0ee457 | [
"MIT"
] | null | null | null | Software/Estadística/MCMC/Paper/graficar_todo.py | matiasleize/tesis_licenciatura | 5df6e341314583702b466b8ed7977d410f0ee457 | [
"MIT"
] | null | null | null | import numpy as np
from getdist import plots, MCSamples
from matplotlib import pyplot as plt
import emcee
import sys
import os
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/Clases')
from funciones_graficador import Graficador
#%% Importo las cadenas
os.chdir(path_datos_global+'/Resultados_cadenas/Paper/LCDM')
filename_1 = "sample_LCDM_CC+SN_4params.h5"
reader_1 = emcee.backends.HDFBackend(filename_1)
filename_2 = "sample_LCDM_CC+SN+AGN_3params.h5"
reader_2 = emcee.backends.HDFBackend(filename_2)
filename_3 = "sample_LCDM_CC+SN+BAO_3params.h5"
reader_3 = emcee.backends.HDFBackend(filename_3)
filename_4 = "sample_LCDM_CC+SN+BAO+AGN_3params.h5"
reader_4 = emcee.backends.HDFBackend(filename_4)
# Algunos valores
samples_1 = reader_1.get_chain()
burnin_1 = int(0.2*len(samples_1[:,0])) #Burnin del 20%
samples_2 = reader_2.get_chain()
burnin_2 = int(0.2*len(samples_2[:,0])) #Burnin del 20%
samples_3 = reader_3.get_chain()
burnin_3 = int(0.2*len(samples_3[:,0])) #Burnin del 20%
samples_4 = reader_4.get_chain()
burnin_4 = int(0.2*len(samples_4[:,0])) #Burnin del 20%
thin = 1
#%%
samples_1 = reader_1.get_chain(flat=True, discard=burnin_1,thin=1)
samples_2 = reader_2.get_chain(flat=True, discard=burnin_2,thin=1)
samples_3 = reader_3.get_chain(flat=True, discard=burnin_3,thin=1)
samples_4 = reader_4.get_chain(flat=True, discard=burnin_4,thin=1)
#%%
%matplotlib qt5
ndim = 3
names = ['M_abs','\omega_m','H0']
labels=names
samples1 = MCSamples(samples=samples_1,names = names, labels = labels)
samples1 = samples1.copy(label=r'Lowest-order with $0.3\sigma$ smoothing',
settings={'mult_bias_correction_order':0,'smooth_scale_2D':0.3, 'smooth_scale_1D':0.3})
samples2 = MCSamples(samples=samples_2,names = names, labels = labels)
samples2 = samples2.copy(label=r'Lowest-order with $0.3\sigma$ smoothing',
settings={'mult_bias_correction_order':0,'smooth_scale_2D':0.3, 'smooth_scale_1D':0.3})
samples3 = MCSamples(samples=samples_3,names = names, labels = labels)
samples3 = samples3.copy(label=r'Lowest-order with $0.3\sigma$ smoothing',
settings={'mult_bias_correction_order':0,'smooth_scale_2D':0.3, 'smooth_scale_1D':0.3})
samples4 = MCSamples(samples=samples_4,names = names, labels = labels)
samples4 = samples4.copy(label=r'Lowest-order with $0.3\sigma$ smoothing',
settings={'mult_bias_correction_order':0,'smooth_scale_2D':0.3, 'smooth_scale_1D':0.3})
g = plots.get_subplot_plotter()
g.triangle_plot([samples1, samples2, samples3, samples4], filled=True, params = ['M_abs','\omega_m','H0'] ,legend_labels = ['CC+SN', 'CC+SN+AGN', 'CC+SN+BAO','CC+SN+BAO+AGN'])
plt.savefig('/home/matias/Desktop/contornos_LCDM.png')
| 37.2 | 175 | 0.751613 |
f68a38f7515f47e2f64f38306c78ac41d741d30a | 28,606 | py | Python | test/test_punt.py | artem-belov/vpp | 09267f705f408c061e03d07a559efba661900f2d | [
"Apache-2.0"
] | null | null | null | test/test_punt.py | artem-belov/vpp | 09267f705f408c061e03d07a559efba661900f2d | [
"Apache-2.0"
] | null | null | null | test/test_punt.py | artem-belov/vpp | 09267f705f408c061e03d07a559efba661900f2d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import binascii
import random
import socket
import os
import threading
import struct
from struct import unpack, unpack_from
try:
import unittest2 as unittest
except ImportError:
import unittest
from util import ppp, ppc
from re import compile
import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
import scapy.layers.inet6 as inet6
from scapy.layers.inet6 import IPv6, ICMPv6DestUnreach
import six
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath
NUM_PKTS = 67
# Format MAC Address
def get_mac_addr(bytes_addr):
return ':'.join('%02x' % scapy.compat.orb(b) for b in bytes_addr)
# Format IP Address
def ipv4(bytes_addr):
return '.'.join('%d' % scapy.compat.orb(b) for b in bytes_addr)
# Unpack Ethernet Frame
def ethernet_frame(data):
dest_mac, src_mac, proto = struct.unpack('! 6s 6s H', data[:14])
return dest_mac, src_mac, socket.htons(proto), data[14:]
# Unpack IPv4 Packets
def ipv4_packet(data):
proto, src, target = struct.unpack('! 8x 1x B 2x 4s 4s', data[:20])
return proto, src, target, data[20:]
# Unpack IPv6 Packets
def ipv6_packet(data):
nh, src, target = struct.unpack('! 6x B 1x 16s 16s', data[:40])
return nh, src, target, data[40:]
# Unpacks any UDP Packet
def udp_seg(data):
src_port, dest_port, size = struct.unpack('! H H 2x H', data[:8])
return src_port, dest_port, size, data[8:]
# Unpacks any TCP Packet
def tcp_seg(data):
src_port, dest_port, seq, flag = struct.unpack('! H H L 4x H', data[:14])
return src_port, dest_port, seq, data[((flag >> 12) * 4):]
def receivePackets(sock, counters):
# Wait for some packets on socket
while True:
data = sock.recv(65536)
# punt socket metadata
# packet_desc = data[0:8]
# Ethernet
_, _, eth_proto, data = ethernet_frame(data[8:])
# Ipv4
if eth_proto == 8:
proto, _, _, data = ipv4_packet(data)
# TCP
if proto == 6:
_, dst_port, _, data = udp_seg(data)
# UDP
elif proto == 17:
_, dst_port, _, data = udp_seg(data)
counters[dst_port] = 0
# Ipv6
elif eth_proto == 0xdd86:
nh, _, _, data = ipv6_packet(data)
# TCP
if nh == 6:
_, dst_port, _, data = udp_seg(data)
# UDP
elif nh == 17:
_, dst_port, _, data = udp_seg(data)
counters[dst_port] = 0
class serverSocketThread(threading.Thread):
""" Socket server thread"""
def __init__(self, threadID, sockName, counters):
threading.Thread.__init__(self)
self.threadID = threadID
self.sockName = sockName
self.sock = None
self.counters = counters
def run(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
os.unlink(self.sockName)
except:
pass
self.sock.bind(self.sockName)
receivePackets(self.sock, self.counters)
class TestPuntSocket(VppTestCase):
""" Punt Socket """
ports = [1111, 2222, 3333, 4444]
sock_servers = list()
portsCheck = dict()
nr_packets = 256
@classmethod
def setUpClass(cls):
super(TestPuntSocket, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestPuntSocket, cls).tearDownClass()
@classmethod
def setUpConstants(cls):
cls.extra_vpp_punt_config = [
"punt", "{", "socket", cls.tempdir+"/socket_punt", "}"]
super(TestPuntSocket, cls).setUpConstants()
def setUp(self):
super(TestPuntSocket, self).setUp()
random.seed()
self.create_pg_interfaces(range(2))
for i in self.pg_interfaces:
i.admin_up()
def tearDown(self):
del self.sock_servers[:]
super(TestPuntSocket, self).tearDown()
def socket_client_create(self, sock_name, id=None):
thread = serverSocketThread(id, sock_name, self.portsCheck)
self.sock_servers.append(thread)
thread.start()
def socket_client_close(self):
for thread in self.sock_servers:
thread.sock.close()
class TestIP4PuntSocket(TestPuntSocket):
""" Punt Socket for IPv4 """
@classmethod
def setUpClass(cls):
super(TestIP4PuntSocket, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestIP4PuntSocket, cls).tearDownClass()
def setUp(self):
super(TestIP4PuntSocket, self).setUp()
for i in self.pg_interfaces:
i.config_ip4()
i.resolve_arp()
def tearDown(self):
super(TestIP4PuntSocket, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def test_punt_socket_dump(self):
""" Punt socket registration/deregistration"""
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
self.vapi.punt_socket_register(1111, b"%s/socket_punt_1111" %
six.ensure_binary(self.tempdir))
self.vapi.punt_socket_register(2222, b"%s/socket_punt_2222" %
six.ensure_binary(self.tempdir))
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 2)
self.assertEqual(punts[0].punt.l4_port, 1111)
self.assertEqual(punts[1].punt.l4_port, 2222)
#
# deregister a punt socket
#
self.vapi.punt_socket_deregister(1111)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 1)
#
# configure a punt socket again
#
self.vapi.punt_socket_register(1111, b"%s/socket_punt_1111" %
six.ensure_binary(self.tempdir))
self.vapi.punt_socket_register(3333, b"%s/socket_punt_3333" %
six.ensure_binary(self.tempdir))
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 3)
#
# deregister all punt socket
#
self.vapi.punt_socket_deregister(1111)
self.vapi.punt_socket_deregister(2222)
self.vapi.punt_socket_deregister(3333)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_single_port_single_socket(self):
""" Punt socket traffic single port single socket"""
port = self.ports[0]
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=9876, dport=port) /
Raw('\xa5' * 100))
pkts = p * self.nr_packets
self.portsCheck[port] = self.nr_packets
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
#
# expect ICMP - port unreachable for all packets
#
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket deregister is implemented
# rx = self.pg0.get_capture(self.nr_packets)
# for p in rx:
# self.assertEqual(int(p[IP].proto), 1) # ICMP
# self.assertEqual(int(p[ICMP].code), 3) # unreachable
#
# configure a punt socket
#
self.socket_client_create(b"%s/socket_%d" % (
six.ensure_binary(self.tempdir), port))
self.vapi.punt_socket_register(port, b"%s/socket_%d" % (
six.ensure_binary(self.tempdir), port))
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 1)
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[port]), port)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
self.assertEqual(self.portsCheck[port], 0)
#
# remove punt socket. expect ICMP - port unreachable for all packets
#
self.vapi.punt_socket_deregister(port)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket deregister is implemented
# self.pg0.get_capture(nr_packets)
def test_punt_socket_traffic_multi_port_multi_sockets(self):
""" Punt socket traffic multi ports and multi sockets"""
for p in self.ports:
self.portsCheck[p] = 0
#
# create stream with random packets count per given ports
#
pkts = list()
for _ in range(0, self.nr_packets):
# choose port from port list
p = random.choice(self.ports)
pkts.append((
Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=9876, dport=p) /
Raw('\xa5' * 100)))
self.portsCheck[p] += 1
#
# no punt socket
#
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
for p in self.ports:
self.socket_client_create(b"%s/socket_%d" % (
six.ensure_binary(self.tempdir), p))
self.vapi.punt_socket_register(p, b"%s/socket_%d" % (
six.ensure_binary(self.tempdir), p))
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), len(self.ports))
for p in self.ports:
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[p]), p)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
for p in self.ports:
self.assertEqual(self.portsCheck[p], 0)
self.vapi.punt_socket_deregister(p)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_multi_ports_single_socket(self):
""" Punt socket traffic multi ports and single socket"""
for p in self.ports:
self.portsCheck[p] = 0
#
# create stream with random packets count per given ports
#
pkts = list()
for _ in range(0, self.nr_packets):
# choose port from port list
p = random.choice(self.ports)
pkts.append((
Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=9876, dport=p) /
Raw('\xa5' * 100)))
self.portsCheck[p] += 1
#
# no punt socket
#
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
# configure a punt socket
#
self.socket_client_create(b"%s/socket_multi" %
six.ensure_binary(self.tempdir))
for p in self.ports:
self.vapi.punt_socket_register(p,
b"%s/socket_multi" %
six.ensure_binary(self.tempdir))
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), len(self.ports))
for p in self.ports:
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[p]), p)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
for p in self.ports:
self.assertEqual(self.portsCheck[p], 0)
self.vapi.punt_socket_deregister(p)
punts = self.vapi.punt_socket_dump(is_ip6=0)
self.assertEqual(len(punts), 0)
class TestIP6PuntSocket(TestPuntSocket):
""" Punt Socket for IPv6"""
@classmethod
def setUpClass(cls):
super(TestIP6PuntSocket, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestIP6PuntSocket, cls).tearDownClass()
def setUp(self):
super(TestIP6PuntSocket, self).setUp()
for i in self.pg_interfaces:
i.config_ip6()
i.resolve_ndp()
def tearDown(self):
super(TestIP6PuntSocket, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip6()
i.admin_down()
def test_punt_socket_dump(self):
""" Punt socket registration """
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
self.vapi.punt_socket_register(1111, b"%s/socket_1111" %
six.ensure_binary(self.tempdir),
is_ip4=0)
self.vapi.punt_socket_register(2222, b"%s/socket_2222" %
six.ensure_binary(self.tempdir),
is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 2)
self.assertEqual(punts[0].punt.l4_port, 1111)
self.assertEqual(punts[1].punt.l4_port, 2222)
#
# deregister a punt socket
#
self.vapi.punt_socket_deregister(1111, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 1)
#
# configure a punt socket again
#
self.vapi.punt_socket_register(1111, b"%s/socket_1111" %
six.ensure_binary(self.tempdir),
is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 2)
#
# deregister all punt socket
#
self.vapi.punt_socket_deregister(1111, is_ip4=0)
self.vapi.punt_socket_deregister(2222, is_ip4=0)
self.vapi.punt_socket_deregister(3333, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_single_port_single_socket(self):
""" Punt socket traffic single port single socket"""
port = self.ports[0]
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
inet6.UDP(sport=9876, dport=port) /
Raw('\xa5' * 100))
pkts = p * self.nr_packets
self.portsCheck[port] = self.nr_packets
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
#
# expect ICMPv6 - destination unreachable for all packets
#
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket deregister is implemented
# rx = self.pg0.get_capture(self.nr_packets)
# for p in rx:
# self.assertEqual(int(p[IPv6].nh), 58) # ICMPv6
# self.assertEqual(int(p[ICMPv6DestUnreach].code),4) # unreachable
#
# configure a punt socket
#
self.socket_client_create(b"%s/socket_%d" % (
six.ensure_binary(self.tempdir), port))
self.vapi.punt_socket_register(port, b"%s/socket_%d" % (
six.ensure_binary(self.tempdir), port), is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 1)
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[port]), port)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
self.assertEqual(self.portsCheck[port], 0)
#
# remove punt socket. expect ICMP - dest. unreachable for all packets
#
self.vapi.punt_socket_deregister(port, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# FIXME - when punt socket deregister is implemented
# self.pg0.get_capture(nr_packets)
def test_punt_socket_traffic_multi_port_multi_sockets(self):
""" Punt socket traffic multi ports and multi sockets"""
for p in self.ports:
self.portsCheck[p] = 0
#
# create stream with random packets count per given ports
#
pkts = list()
for _ in range(0, self.nr_packets):
# choose port from port list
p = random.choice(self.ports)
pkts.append((
Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
inet6.UDP(sport=9876, dport=p) /
Raw('\xa5' * 100)))
self.portsCheck[p] += 1
#
# no punt socket
#
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
for p in self.ports:
self.socket_client_create(b"%s/socket_%d" % (
six.ensure_binary(self.tempdir), p))
self.vapi.punt_socket_register(p, b"%s/socket_%d" % (
six.ensure_binary(self.tempdir), p), is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), len(self.ports))
for p in self.ports:
self.logger.debug("Sending %s packets to port %d",
str(self.portsCheck[p]), p)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
for p in self.ports:
self.assertEqual(self.portsCheck[p], 0)
self.vapi.punt_socket_deregister(p, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
def test_punt_socket_traffic_multi_ports_single_socket(self):
""" Punt socket traffic multi ports and single socket"""
for p in self.ports:
self.portsCheck[p] = 0
#
# create stream with random packets count per given ports
#
pkts = list()
for _ in range(0, self.nr_packets):
# choose port from port list
p = random.choice(self.ports)
pkts.append((
Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
inet6.UDP(sport=9876, dport=p) /
Raw('\xa5' * 100)))
self.portsCheck[p] += 1
#
# no punt socket
#
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
#
# configure a punt socket
#
self.socket_client_create(b"%s/socket_multi" %
six.ensure_binary(self.tempdir))
for p in self.ports:
self.vapi.punt_socket_register(p,
b"%s/socket_multi" %
six.ensure_binary(self.tempdir),
is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), len(self.ports))
for p in self.ports:
self.logger.debug("Send %s packets to port %d",
str(self.portsCheck[p]), p)
#
# expect punt socket and no packets on pg0
#
self.vapi.cli("clear errors")
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.get_capture(0)
self.logger.info(self.vapi.cli("show trace"))
self.socket_client_close()
for p in self.ports:
self.assertEqual(self.portsCheck[p], 0)
self.vapi.punt_socket_deregister(p, is_ip4=0)
punts = self.vapi.punt_socket_dump(is_ip6=1)
self.assertEqual(len(punts), 0)
class TestPunt(VppTestCase):
""" Punt Test Case """
@classmethod
def setUpClass(cls):
super(TestPunt, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestPunt, cls).tearDownClass()
def setUp(self):
super(TestPunt, self).setUp()
self.create_pg_interfaces(range(4))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
i.config_ip6()
i.resolve_ndp()
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.ip6_disable()
i.admin_down()
super(TestPunt, self).tearDown()
def test_punt(self):
""" Excpetion Path testing """
#
# Using the test CLI we will hook in a exception path to
# send ACL deny packets out of pg0 and pg1.
# the ACL is src,dst = 1.1.1.1,1.1.1.2
#
ip_1_1_1_2 = VppIpRoute(self, "1.1.1.2", 32,
[VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index)])
ip_1_1_1_2.add_vpp_config()
ip_1_2 = VppIpRoute(self, "1::2", 128,
[VppRoutePath(self.pg3.remote_ip6,
self.pg3.sw_if_index,
proto=DpoProto.DPO_PROTO_IP6)],
is_ip6=1)
ip_1_2.add_vpp_config()
p4 = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src="1.1.1.1", dst="1.1.1.2") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
p6 = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IPv6(src="1::1", dst="1::2") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.send_and_expect(self.pg2, p4*1, self.pg3)
self.send_and_expect(self.pg2, p6*1, self.pg3)
#
# apply the punting features
#
self.vapi.cli("test punt pg2")
#
# pkts now dropped
#
self.send_and_assert_no_replies(self.pg2, p4*NUM_PKTS)
self.send_and_assert_no_replies(self.pg2, p6*NUM_PKTS)
#
# Check state:
# 1 - node error counters
# 2 - per-reason counters
# 2, 3 are the index of the assigned punt reason
#
stats = self.statistics.get_counter(
"/err/punt-dispatch/No registrations")
self.assertEqual(stats, 2*NUM_PKTS)
stats = self.statistics.get_counter("/net/punt")
self.assertEqual(stats[0][7]['packets'], NUM_PKTS)
self.assertEqual(stats[0][8]['packets'], NUM_PKTS)
#
# use the test CLI to test a client that punts exception
# packets out of pg0
#
self.vapi.cli("test punt pg0 %s" % self.pg0.remote_ip4)
self.vapi.cli("test punt pg0 %s" % self.pg0.remote_ip6)
rx4s = self.send_and_expect(self.pg2, p4*NUM_PKTS, self.pg0)
rx6s = self.send_and_expect(self.pg2, p6*NUM_PKTS, self.pg0)
#
# check the packets come out IP unmodified but destined to pg0 host
#
for rx in rx4s:
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[Ether].src, self.pg0.local_mac)
self.assertEqual(p4[IP].dst, rx[IP].dst)
self.assertEqual(p4[IP].ttl, rx[IP].ttl)
for rx in rx6s:
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[Ether].src, self.pg0.local_mac)
self.assertEqual(p6[IPv6].dst, rx[IPv6].dst)
self.assertEqual(p6[IPv6].hlim, rx[IPv6].hlim)
stats = self.statistics.get_counter("/net/punt")
self.assertEqual(stats[0][7]['packets'], 2*NUM_PKTS)
self.assertEqual(stats[0][8]['packets'], 2*NUM_PKTS)
#
# add another registration for the same reason to send packets
# out of pg1
#
self.vapi.cli("test punt pg1 %s" % self.pg1.remote_ip4)
self.vapi.cli("test punt pg1 %s" % self.pg1.remote_ip6)
self.vapi.cli("clear trace")
self.pg2.add_stream(p4 * NUM_PKTS)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rxd = self.pg0.get_capture(NUM_PKTS)
for rx in rxd:
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[Ether].src, self.pg0.local_mac)
self.assertEqual(p4[IP].dst, rx[IP].dst)
self.assertEqual(p4[IP].ttl, rx[IP].ttl)
rxd = self.pg1.get_capture(NUM_PKTS)
for rx in rxd:
self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
self.assertEqual(rx[Ether].src, self.pg1.local_mac)
self.assertEqual(p4[IP].dst, rx[IP].dst)
self.assertEqual(p4[IP].ttl, rx[IP].ttl)
self.vapi.cli("clear trace")
self.pg2.add_stream(p6 * NUM_PKTS)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rxd = self.pg0.get_capture(NUM_PKTS)
for rx in rxd:
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[Ether].src, self.pg0.local_mac)
self.assertEqual(p6[IPv6].dst, rx[IPv6].dst)
self.assertEqual(p6[IPv6].hlim, rx[IPv6].hlim)
rxd = self.pg1.get_capture(NUM_PKTS)
for rx in rxd:
self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
self.assertEqual(rx[Ether].src, self.pg1.local_mac)
self.assertEqual(p6[IPv6].dst, rx[IPv6].dst)
self.assertEqual(p6[IPv6].hlim, rx[IPv6].hlim)
stats = self.statistics.get_counter("/net/punt")
self.assertEqual(stats[0][7]['packets'], 3*NUM_PKTS)
self.assertEqual(stats[0][8]['packets'], 3*NUM_PKTS)
self.logger.info(self.vapi.cli("show vlib graph punt-dispatch"))
self.logger.info(self.vapi.cli("show punt client"))
self.logger.info(self.vapi.cli("show punt reason"))
self.logger.info(self.vapi.cli("show punt stats"))
self.logger.info(self.vapi.cli("show punt db"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 33.575117 | 79 | 0.575718 |
06ea958434bbb60aa993e32b18da405a48eee728 | 3,695 | py | Python | vk/bot_framework/dispatcher/storage.py | PythonReforker/vk.py | 71e4d914140d0ed054d47912e1303132caf87c51 | [
"MIT"
] | 2 | 2020-02-28T11:31:11.000Z | 2020-09-16T06:11:11.000Z | vk/bot_framework/dispatcher/storage.py | Cl0ckHvH/vk.py-1.0.0a6 | 02128b6e5043d6ebf98724ec8b09dce09aa2a669 | [
"MIT"
] | null | null | null | vk/bot_framework/dispatcher/storage.py | Cl0ckHvH/vk.py-1.0.0a6 | 02128b6e5043d6ebf98724ec8b09dce09aa2a669 | [
"MIT"
] | 1 | 2020-03-03T20:49:57.000Z | 2020-03-03T20:49:57.000Z | """
A simple util for dispatcher for storage your data. e.g: database connection, messages count.
"""
import typing
from abc import ABC
from abc import abstractmethod
class AbstractStorage(ABC):
@abstractmethod
def place(self, key: typing.AnyStr, value: typing.Any) -> None:
"""
Place value to storage.
:param key:
:param value:
:return:
"""
@abstractmethod
def get(
self, key: typing.AnyStr, default: typing.Any = None
) -> typing.Optional[typing.Any]:
"""
Get value by the key from storage or get default value.
:param key:
:param default:
:return:
"""
@abstractmethod
def delete(self, key: typing.AnyStr) -> None:
"""
Delete key/value from storage by key
:param key:
:return:
"""
@abstractmethod
def update(self, key: typing.AnyStr, value: typing.Any):
"""
Update value in storage by the key.
:param key:
:param value:
:return:
"""
@abstractmethod
def exists(self, key: typing.AnyStr):
"""
Check value exists in storage
:param key:
:return:
"""
class AbstractAsyncStorage(ABC):
@abstractmethod
async def place(self, key: typing.AnyStr, value: typing.Any) -> None:
"""
Place value to storage.
:param key:
:param value:
:return:
"""
@abstractmethod
async def get(
self, key: typing.AnyStr, default: typing.Any = None
) -> typing.Optional[typing.Any]:
"""
Get value by the key from storage or get default value.
:param key:
:param default:
:return:
"""
@abstractmethod
async def delete(self, key: typing.AnyStr) -> None:
"""
Delete key/value from storage by key
:param key:
:return:
"""
@abstractmethod
async def update(self, key: typing.AnyStr, value: typing.Any):
"""
Update value in storage by the key.
:param key:
:param value:
:return:
"""
@abstractmethod
async def exists(self, key: typing.AnyStr):
"""
Check that value is exists in storage
:param key:
:return:
"""
class AbstractExpiredStorage(AbstractStorage):
@abstractmethod
def place(
self, key: typing.AnyStr, value: typing.Any, expire: int
) -> None: # noqa
pass
class AbstractAsyncExpiredStorage(AbstractAsyncStorage):
@abstractmethod
async def place(
self, key: typing.AnyStr, value: typing.Any, expire: int
) -> None: # noqa
pass
class Storage(AbstractStorage):
"""
Basic storage
"""
def __init__(self):
self._dct = {}
def place(self, key: typing.AnyStr, value: typing.Any) -> None:
if key in self._dct:
raise RuntimeError("Storage already have this key.")
self._dct[key] = value
def get(
self, key: typing.AnyStr, default: typing.Any = None
) -> typing.Optional[typing.Any]:
if key in self._dct:
return self._dct[key]
else:
return default
def delete(self, key: typing.AnyStr) -> None:
if key in self._dct:
del self._dct[key]
else:
raise RuntimeError("Undefined key.")
def update(self, key: typing.AnyStr, value: typing.Any) -> None:
if key not in self._dct:
raise RuntimeError("Storage don`t have this key.")
self._dct[key] = value
def exists(self, key: typing.AnyStr):
return key in self._dct
| 24.150327 | 93 | 0.564547 |
6dc42d07e203879ea2e1816469bd740b334feb39 | 2,844 | py | Python | djasana/admin.py | zaptim/django-asana | ab6d7166f28945292d5632ef766fc13cc2ea4cf3 | [
"MIT"
] | null | null | null | djasana/admin.py | zaptim/django-asana | ab6d7166f28945292d5632ef766fc13cc2ea4cf3 | [
"MIT"
] | null | null | null | djasana/admin.py | zaptim/django-asana | ab6d7166f28945292d5632ef766fc13cc2ea4cf3 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib import admin
from django.contrib.admin import widgets
from django.utils.safestring import mark_safe
from djasana import models
def asana_link(obj):
return mark_safe('<a href="{}" target="_blank">View on Asana</a>'.format(obj.asana_url()))
class ParentRawIdWidget(widgets.ForeignKeyRawIdWidget):
def url_parameters(self):
params = super().url_parameters()
object_ = self.attrs.get('object', None)
if object_:
# Filter parent choices by project
params['projects__id__exact'] = object_.projects.first().pk
return params
@admin.register(models.Attachment)
class AttachmentAdmin(admin.ModelAdmin):
list_display = ('__str__', 'name', 'parent', asana_link)
raw_id_fields = ('parent',)
readonly_fields = (asana_link, 'gid')
@admin.register(models.Project)
class ProjectAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_display = ('__str__', 'owner', 'archived', asana_link)
list_filter = ('workspace', 'team', 'archived')
readonly_fields = ('workspace', 'team', asana_link, 'gid')
search_fields = ('remote_id', 'name')
class TaskForm(forms.ModelForm):
class Meta:
fields = ('name', 'assignee', 'completed', 'completed_at',
'due_at', 'due_on', 'parent', 'notes', 'projects')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance.pk:
self.fields['parent'].widget = ParentRawIdWidget(
rel=self.instance._meta.get_field('parent').remote_field,
admin_site=admin.site,
# Pass the object to attrs
attrs={'object': self.instance}
)
@admin.register(models.Task)
class TaskAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
form = TaskForm
list_display = ('name', 'assignee', 'completed', 'due', asana_link)
list_filter = ('completed', 'projects__workspace', 'projects__team', 'assignee', 'projects')
raw_id_fields = ('assignee', 'parent')
readonly_fields = (asana_link, 'gid')
search_fields = ('remote_id', 'name')
@admin.register(models.Team)
class TeamAdmin(admin.ModelAdmin):
list_display = ('__str__', asana_link)
readonly_fields = (asana_link, 'gid')
@admin.register(models.User)
class UserAdmin(admin.ModelAdmin):
list_display = ('__str__',)
readonly_fields = (asana_link, 'gid')
@admin.register(models.Webhook)
class WebhookAdmin(admin.ModelAdmin):
list_display = ('__str__', 'project')
readonly_fields = ('secret', 'project')
def has_add_permission(self, request):
return False
@admin.register(models.Workspace)
class WorkspaceAdmin(admin.ModelAdmin):
list_display = ('__str__', asana_link)
readonly_fields = (asana_link, 'gid')
| 30.580645 | 96 | 0.669128 |
cacc92b2885500112ab062a72dd0bcb33924ce34 | 5,489 | py | Python | houdini/handlers/games/__init__.py | AllinolCP/houdini-asyncio | a2ae7957eaa26b9a2e96da37eee8982034cd2c6c | [
"MIT"
] | null | null | null | houdini/handlers/games/__init__.py | AllinolCP/houdini-asyncio | a2ae7957eaa26b9a2e96da37eee8982034cd2c6c | [
"MIT"
] | null | null | null | houdini/handlers/games/__init__.py | AllinolCP/houdini-asyncio | a2ae7957eaa26b9a2e96da37eee8982034cd2c6c | [
"MIT"
] | null | null | null | import random
import time
from sqlalchemy.dialects.postgresql import insert
from houdini import handlers
from houdini.constants import ClientType
from houdini.converters import OptionalConverter
from houdini.data.game import PenguinGameData
from houdini.data.room import Room
from houdini.handlers import XTPacket
from houdini.handlers.play.moderation import cheat_ban
from houdini.handlers.play.navigation import handle_join_room
default_score_games = {904, 905, 906, 912, 916, 917, 918, 919, 950, 952}
def determine_coins_earned(p, score):
return score if p.room.id in default_score_games else score // 10
async def determine_coins_overdose(p, coins):
overdose_key = f'{p.id}.overdose'
last_overdose = await p.server.redis.get(overdose_key)
if last_overdose is None:
return True
minutes_since_last_dose = ((time.time() - float(last_overdose)) // 60) + 1
max_game_coins = p.server.config.max_coins_per_min * minutes_since_last_dose
if coins > max_game_coins:
return True
return False
@handlers.handler(XTPacket('j', 'jr'), before=handle_join_room)
async def handle_overdose_key(p, room: Room):
if p.room.game and not room.game:
overdose_key = f'{p.id}.overdose'
await p.server.redis.delete(overdose_key)
elif room.game:
overdose_key = f'{p.id}.overdose'
await p.server.redis.set(overdose_key, time.time())
@handlers.disconnected
@handlers.player_attribute(joined_world=True)
async def disconnect_overdose_key(p):
if p.room.game:
overdose_key = f'{p.id}.overdose'
await p.server.redis.delete(overdose_key)
async def game_over_cooling(p):
await p.send_xt('zo', p.coins, '', 0, 0, 0)
@handlers.handler(XTPacket('m', ext='z'))
@handlers.player_in_room(802)
async def handle_send_move_puck(p, _, x: int, y: int, speed_x: int, speed_y: int):
p.server.puck = (x, y)
await p.room.send_xt('zm', p.id, x, y, speed_x, speed_y)
@handlers.handler(XTPacket('gz', ext='z'))
@handlers.player_in_room(802)
async def handle_get_puck(p):
await p.send_xt('gz', *p.server.puck)
@handlers.handler(XTPacket('zo', ext='z'))
@handlers.cooldown(10, callback=game_over_cooling)
async def handle_get_game_over(p, score: int):
if p.room.game and not p.waddle and not p.table:
coins_earned = determine_coins_earned(p, score)
if await determine_coins_overdose(p, coins_earned):
return await cheat_ban(p, p.id, comment='Coins overdose')
collected_stamps_string, total_collected_stamps, total_game_stamps, total_stamps = '', 0, 0, 0
if p.room.stamp_group:
game_stamps = [stamp for stamp in p.server.stamps.values() if stamp.group_id == p.room.stamp_group]
collected_stamps = [stamp for stamp in game_stamps if stamp.id in p.stamps]
total_stamps = len([stamp for stamp in p.stamps.values() if p.server.stamps[stamp.stamp_id].group_id])
total_collected_stamps = len(collected_stamps)
total_game_stamps = len(game_stamps)
collected_stamps_string = '|'.join(str(stamp.id) for stamp in collected_stamps)
if total_collected_stamps == total_game_stamps:
coins_earned *= 2
await p.update(coins=min(p.coins + coins_earned, p.server.config.max_coins)).apply()
await p.send_xt('zo', p.coins,
collected_stamps_string,
total_collected_stamps,
total_game_stamps,
total_stamps)
@handlers.handler(XTPacket('ggd', ext='z'), client=ClientType.Vanilla)
async def handle_get_game_data(p, index: int = 0):
game_data = await PenguinGameData.select('data').where((PenguinGameData.penguin_id == p.id) &
(PenguinGameData.room_id == p.room.id) &
(PenguinGameData.index == index)).gino.scalar()
await p.send_xt('ggd', game_data or '')
@handlers.handler(XTPacket('sgd', ext='z'), client=ClientType.Vanilla)
@handlers.cooldown(5)
async def handle_set_game_data(p, index: OptionalConverter(int) = 0, *, game_data: str):
if p.room.game:
data_insert = insert(PenguinGameData).values(penguin_id=p.id, room_id=p.room.id, index=index, data=game_data)
data_insert = data_insert.on_conflict_do_update(
constraint='penguin_game_data_pkey',
set_=dict(data=game_data),
where=((PenguinGameData.penguin_id == p.id)
& (PenguinGameData.room_id == p.room.id)
& (PenguinGameData.index == index))
)
await data_insert.gino.scalar()
@handlers.handler(XTPacket('zr', ext='z'), client=ClientType.Vanilla)
@handlers.player_attribute(agent_status=True)
async def handle_get_game_again(p):
games = list(range(1, 11))
games_string = f'{games.pop(random.randrange(len(games)))},' \
f'{games.pop(random.randrange(len(games)))},' \
f'{games.pop(random.randrange(len(games)))}'
await p.send_xt('zr', games_string, random.randint(1, 6))
@handlers.handler(XTPacket('zc', ext='z'), client=ClientType.Vanilla)
@handlers.player_attribute(agent_status=True)
@handlers.cooldown(5)
async def handle_game_complete(p, medals: int):
medals = min(6, medals)
await p.update(career_medals=p.career_medals + medals,
agent_medals=p.agent_medals + medals).apply()
| 38.384615 | 117 | 0.672982 |
f9fa6a993550b6b1307afe3086f925ecd30cf451 | 7,172 | py | Python | Liquid-job-benchmarks/perfzero/lib/benchmark.py | PasaLab/YAO | 2e70203197cd79f9522d65731ee5dc0eb236b005 | [
"Apache-2.0"
] | 2 | 2021-08-30T14:12:09.000Z | 2022-01-20T02:14:22.000Z | Liquid-job-benchmarks/perfzero/lib/benchmark.py | PasaLab/YAO | 2e70203197cd79f9522d65731ee5dc0eb236b005 | [
"Apache-2.0"
] | null | null | null | Liquid-job-benchmarks/perfzero/lib/benchmark.py | PasaLab/YAO | 2e70203197cd79f9522d65731ee5dc0eb236b005 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Execute benchmark."""
from __future__ import print_function
import argparse
import json
import logging
import multiprocessing
import os
import re
import sys
import time
import perfzero.benchmark_method_runner as benchmark_method_runner
import perfzero.perfzero_config as perfzero_config
import perfzero.utils as utils
class BenchmarkRunner(object):
"""Execute benchmark and report results."""
def __init__(self, config):
self.config = config
self.project_dir = os.path.abspath(
os.path.dirname(os.path.dirname(__file__)))
self.workspace_dir = os.path.join(self.project_dir, config.workspace)
self.site_packages_dir = os.path.join(self.workspace_dir, 'site-packages')
self.root_output_dir = os.path.join(self.workspace_dir, 'output')
self.benchmark_execution_time = {}
def _setup(self):
"""Download data and checkout git repository."""
# Acticate gcloud service
start_time = time.time()
utils.setup_python_path(self.site_packages_dir, self.config.python_path_str)
utils.active_gcloud_service(self.config.gcloud_key_file_url,
self.workspace_dir)
utils.make_dir_if_not_exist(self.root_output_dir)
self.benchmark_execution_time['activate_gcloud_service'] = (
time.time() - start_time)
# Download data
start_time = time.time()
utils.download_data(utils.parse_data_downloads_str(
self.config.root_data_dir, self.config.gcs_downloads_str))
utils.download_data(utils.parse_data_downloads_str(
self.config.root_data_dir, self.config.data_downloads_str))
self.benchmark_execution_time['download_data'] = time.time() - start_time
# Checkout git repositories
start_time = time.time()
site_package_info = utils.checkout_git_repos(
self.config.get_git_repos(self.site_packages_dir),
self.config.use_cached_site_packages)
self.benchmark_execution_time['checkout_repository'] = (
time.time() - start_time)
# Start cloud TPU.
if self.config.tpu_parameters is not None:
start_time = time.time()
utils.setup_tpu(self.config.tpu_parameters)
self.benchmark_execution_time['start_tpu'] = time.time() - start_time
self.stream_handler = logging.StreamHandler(sys.stdout)
self.stream_handler.setFormatter(
logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logging.getLogger().addHandler(self.stream_handler)
return site_package_info
def _get_benchmark_methods(self):
"""Returns list of benchmark methods to execute."""
filter_prefix = 'filter:'
benchmark_methods = []
for benchmark_method_pattern in self.config.benchmark_method_patterns:
if filter_prefix not in benchmark_method_pattern:
benchmark_methods.append(benchmark_method_pattern)
else:
index = benchmark_method_pattern.find(filter_prefix)
benchmark_class = benchmark_method_pattern[:index - 1]
pattern = benchmark_method_pattern[index + len(filter_prefix):]
class_instance = utils.instantiate_benchmark_class(benchmark_class,
'/dev/null',
'',
None,
{})
for benchmark_method_name in dir(class_instance):
if re.match(pattern, benchmark_method_name):
benchmark_methods.append(benchmark_class + '.' +
benchmark_method_name)
logging.info('The following benchmark methods will be executed: %s',
benchmark_methods)
return benchmark_methods
def run_benchmark(self):
"""Run benchmark."""
harness_info = utils.get_git_repo_info(self.project_dir)
site_package_info = self._setup()
has_exception = False
benchmark_success_results = {}
benchmark_output_dirs = {}
try:
for benchmark_method in self._get_benchmark_methods():
# Run the benchmark method in a separate process so that its memory usage
# will not affect the execution of other benchmark method
# This is a walkaround before we fix all memory leak issues in TensorFlow
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=benchmark_method_runner.run,
args=(benchmark_method,
harness_info,
site_package_info,
self.root_output_dir,
self.config, queue))
process.start()
process.join()
method_has_exception, method_execution_time, succeeded, output_dir = queue.get() # pylint: disable=line-too-long
has_exception |= method_has_exception
self.benchmark_execution_time[benchmark_method] = method_execution_time
benchmark_success_results[benchmark_method] = succeeded
benchmark_output_dirs[benchmark_method] = output_dir
finally:
if self.config.tpu_parameters is not None:
has_exception |= utils.cleanup_tpu(self.config.tpu_parameters)
print('Benchmark execution time in seconds by operation:\n {}'.format(
json.dumps(self.benchmark_execution_time, indent=2)))
print('Benchmark success results:\n{}'.format(
json.dumps(benchmark_success_results, indent=2)))
print('Benchmark local output directories:\n{}'.format(
json.dumps(benchmark_output_dirs, indent=2)))
if has_exception:
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
perfzero_config.add_benchmark_parser_arguments(parser)
FLAGS, unparsed = parser.parse_known_args()
level = logging.DEBUG if FLAGS.debug else logging.INFO
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=level)
if unparsed:
logging.error('Arguments %s are not recognized', unparsed)
sys.exit(1)
config_ = perfzero_config.PerfZeroConfig(mode='flags', flags=FLAGS)
benchmark_runner = BenchmarkRunner(config_)
benchmark_runner.run_benchmark()
| 42.946108 | 122 | 0.657278 |
8f54fbd630e2a314b2535bfbe2b20396b48c315a | 2,809 | py | Python | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeEniMonitorDataRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeEniMonitorDataRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeEniMonitorDataRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeEniMonitorDataRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeEniMonitorData','ecs')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_EniId(self):
return self.get_query_params().get('EniId')
def set_EniId(self,EniId):
self.add_query_param('EniId',EniId) | 33.047059 | 80 | 0.764685 |
bc12665de4a31310b536606837e1750925d5217a | 12,207 | py | Python | glance/gateway.py | kfwang/Glance-OVA-OVF | e983c3c79987e59d644917646edc6b0b7fd219d0 | [
"Apache-2.0"
] | null | null | null | glance/gateway.py | kfwang/Glance-OVA-OVF | e983c3c79987e59d644917646edc6b0b7fd219d0 | [
"Apache-2.0"
] | null | null | null | glance/gateway.py | kfwang/Glance-OVA-OVF | e983c3c79987e59d644917646edc6b0b7fd219d0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store
from oslo_log import log as logging
from glance.api import authorization
from glance.api import policy
from glance.api import property_protections
from glance.common import exception
from glance.common import property_utils
from glance.common import store_utils
import glance.db
import glance.domain
from glance.i18n import _LE
import glance.location
import glance.notifier
import glance.quota
try:
import glance.search
glance_search = glance.search
except ImportError:
glance_search = None
LOG = logging.getLogger(__name__)
class Gateway(object):
def __init__(self, db_api=None, store_api=None, notifier=None,
policy_enforcer=None, es_api=None):
self.db_api = db_api or glance.db.get_api()
self.store_api = store_api or glance_store
self.store_utils = store_utils
self.notifier = notifier or glance.notifier.Notifier()
self.policy = policy_enforcer or policy.Enforcer()
if es_api:
self.es_api = es_api
else:
self.es_api = glance_search.get_api() if glance_search else None
def get_image_factory(self, context):
image_factory = glance.domain.ImageFactory()
store_image_factory = glance.location.ImageFactoryProxy(
image_factory, context, self.store_api, self.store_utils)
quota_image_factory = glance.quota.ImageFactoryProxy(
store_image_factory, context, self.db_api, self.store_utils)
policy_image_factory = policy.ImageFactoryProxy(
quota_image_factory, context, self.policy)
notifier_image_factory = glance.notifier.ImageFactoryProxy(
policy_image_factory, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pif = property_protections.ProtectedImageFactoryProxy(
notifier_image_factory, context, property_rules)
authorized_image_factory = authorization.ImageFactoryProxy(
pif, context)
else:
authorized_image_factory = authorization.ImageFactoryProxy(
notifier_image_factory, context)
return authorized_image_factory
def get_image_member_factory(self, context):
image_factory = glance.domain.ImageMemberFactory()
quota_image_factory = glance.quota.ImageMemberFactoryProxy(
image_factory, context, self.db_api, self.store_utils)
policy_member_factory = policy.ImageMemberFactoryProxy(
quota_image_factory, context, self.policy)
authorized_image_factory = authorization.ImageMemberFactoryProxy(
policy_member_factory, context)
return authorized_image_factory
def get_repo(self, context):
image_repo = glance.db.ImageRepo(context, self.db_api)
store_image_repo = glance.location.ImageRepoProxy(
image_repo, context, self.store_api, self.store_utils)
quota_image_repo = glance.quota.ImageRepoProxy(
store_image_repo, context, self.db_api, self.store_utils)
policy_image_repo = policy.ImageRepoProxy(
quota_image_repo, context, self.policy)
notifier_image_repo = glance.notifier.ImageRepoProxy(
policy_image_repo, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pir = property_protections.ProtectedImageRepoProxy(
notifier_image_repo, context, property_rules)
authorized_image_repo = authorization.ImageRepoProxy(
pir, context)
else:
authorized_image_repo = authorization.ImageRepoProxy(
notifier_image_repo, context)
return authorized_image_repo
def get_task_factory(self, context):
task_factory = glance.domain.TaskFactory()
policy_task_factory = policy.TaskFactoryProxy(
task_factory, context, self.policy)
notifier_task_factory = glance.notifier.TaskFactoryProxy(
policy_task_factory, context, self.notifier)
authorized_task_factory = authorization.TaskFactoryProxy(
notifier_task_factory, context)
return authorized_task_factory
def get_task_repo(self, context):
task_repo = glance.db.TaskRepo(context, self.db_api)
policy_task_repo = policy.TaskRepoProxy(
task_repo, context, self.policy)
notifier_task_repo = glance.notifier.TaskRepoProxy(
policy_task_repo, context, self.notifier)
authorized_task_repo = authorization.TaskRepoProxy(
notifier_task_repo, context)
return authorized_task_repo
def get_task_stub_repo(self, context):
task_stub_repo = glance.db.TaskRepo(context, self.db_api)
policy_task_stub_repo = policy.TaskStubRepoProxy(
task_stub_repo, context, self.policy)
notifier_task_stub_repo = glance.notifier.TaskStubRepoProxy(
policy_task_stub_repo, context, self.notifier)
authorized_task_stub_repo = authorization.TaskStubRepoProxy(
notifier_task_stub_repo, context)
return authorized_task_stub_repo
def get_task_executor_factory(self, context):
task_repo = self.get_task_repo(context)
image_repo = self.get_repo(context)
image_factory = self.get_image_factory(context)
return glance.domain.TaskExecutorFactory(task_repo,
image_repo,
image_factory)
def get_metadef_namespace_factory(self, context):
ns_factory = glance.domain.MetadefNamespaceFactory()
policy_ns_factory = policy.MetadefNamespaceFactoryProxy(
ns_factory, context, self.policy)
notifier_ns_factory = glance.notifier.MetadefNamespaceFactoryProxy(
policy_ns_factory, context, self.notifier)
authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy(
notifier_ns_factory, context)
return authorized_ns_factory
def get_metadef_namespace_repo(self, context):
ns_repo = glance.db.MetadefNamespaceRepo(context, self.db_api)
policy_ns_repo = policy.MetadefNamespaceRepoProxy(
ns_repo, context, self.policy)
notifier_ns_repo = glance.notifier.MetadefNamespaceRepoProxy(
policy_ns_repo, context, self.notifier)
authorized_ns_repo = authorization.MetadefNamespaceRepoProxy(
notifier_ns_repo, context)
return authorized_ns_repo
def get_metadef_object_factory(self, context):
object_factory = glance.domain.MetadefObjectFactory()
policy_object_factory = policy.MetadefObjectFactoryProxy(
object_factory, context, self.policy)
notifier_object_factory = glance.notifier.MetadefObjectFactoryProxy(
policy_object_factory, context, self.notifier)
authorized_object_factory = authorization.MetadefObjectFactoryProxy(
notifier_object_factory, context)
return authorized_object_factory
def get_metadef_object_repo(self, context):
object_repo = glance.db.MetadefObjectRepo(context, self.db_api)
policy_object_repo = policy.MetadefObjectRepoProxy(
object_repo, context, self.policy)
notifier_object_repo = glance.notifier.MetadefObjectRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefObjectRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_resource_type_factory(self, context):
resource_type_factory = glance.domain.MetadefResourceTypeFactory()
policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy(
resource_type_factory, context, self.policy)
notifier_resource_type_factory = (
glance.notifier.MetadefResourceTypeFactoryProxy(
policy_resource_type_factory, context, self.notifier)
)
authorized_resource_type_factory = (
authorization.MetadefResourceTypeFactoryProxy(
notifier_resource_type_factory, context)
)
return authorized_resource_type_factory
def get_metadef_resource_type_repo(self, context):
resource_type_repo = glance.db.MetadefResourceTypeRepo(
context, self.db_api)
policy_object_repo = policy.MetadefResourceTypeRepoProxy(
resource_type_repo, context, self.policy)
notifier_object_repo = glance.notifier.MetadefResourceTypeRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefResourceTypeRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_property_factory(self, context):
prop_factory = glance.domain.MetadefPropertyFactory()
policy_prop_factory = policy.MetadefPropertyFactoryProxy(
prop_factory, context, self.policy)
notifier_prop_factory = glance.notifier.MetadefPropertyFactoryProxy(
policy_prop_factory, context, self.notifier)
authorized_prop_factory = authorization.MetadefPropertyFactoryProxy(
notifier_prop_factory, context)
return authorized_prop_factory
def get_metadef_property_repo(self, context):
prop_repo = glance.db.MetadefPropertyRepo(context, self.db_api)
policy_prop_repo = policy.MetadefPropertyRepoProxy(
prop_repo, context, self.policy)
notifier_prop_repo = glance.notifier.MetadefPropertyRepoProxy(
policy_prop_repo, context, self.notifier)
authorized_prop_repo = authorization.MetadefPropertyRepoProxy(
notifier_prop_repo, context)
return authorized_prop_repo
def get_metadef_tag_factory(self, context):
tag_factory = glance.domain.MetadefTagFactory()
policy_tag_factory = policy.MetadefTagFactoryProxy(
tag_factory, context, self.policy)
notifier_tag_factory = glance.notifier.MetadefTagFactoryProxy(
policy_tag_factory, context, self.notifier)
authorized_tag_factory = authorization.MetadefTagFactoryProxy(
notifier_tag_factory, context)
return authorized_tag_factory
def get_metadef_tag_repo(self, context):
tag_repo = glance.db.MetadefTagRepo(context, self.db_api)
policy_tag_repo = policy.MetadefTagRepoProxy(
tag_repo, context, self.policy)
notifier_tag_repo = glance.notifier.MetadefTagRepoProxy(
policy_tag_repo, context, self.notifier)
authorized_tag_repo = authorization.MetadefTagRepoProxy(
notifier_tag_repo, context)
return authorized_tag_repo
def get_catalog_search_repo(self, context):
if self.es_api is None:
LOG.error(_LE('The search and index services are not available. '
'Ensure you have the necessary prerequisite '
'dependencies installed like elasticsearch to use '
'these services.'))
raise exception.SearchNotAvailable()
search_repo = glance.search.CatalogSearchRepo(context, self.es_api)
policy_search_repo = policy.CatalogSearchRepoProxy(
search_repo, context, self.policy)
return policy_search_repo
| 46.770115 | 78 | 0.709921 |
16b395970659274a4f2a8cf357348497720aef6d | 23,154 | py | Python | src/datasets/formatting/formatting.py | Jebrankhan/datasets | 920faf40c0766cb71d2dbba71554181881cc3e7b | [
"Apache-2.0"
] | null | null | null | src/datasets/formatting/formatting.py | Jebrankhan/datasets | 920faf40c0766cb71d2dbba71554181881cc3e7b | [
"Apache-2.0"
] | null | null | null | src/datasets/formatting/formatting.py | Jebrankhan/datasets | 920faf40c0766cb71d2dbba71554181881cc3e7b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
# Lint as: python3
from typing import Any, Callable, Dict, Generic, Iterable, List, MutableMapping, Optional, TypeVar, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from ..features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper
from ..table import Table
from ..utils import no_op_if_value_is_null
T = TypeVar("T")
RowFormat = TypeVar("RowFormat")
ColumnFormat = TypeVar("ColumnFormat")
BatchFormat = TypeVar("BatchFormat")
def _is_range_contiguous(key: range) -> bool:
return key.step == 1 and key.stop >= key.start
def _raise_bad_key_type(key: Any):
raise TypeError(
f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable."
)
def _query_table_with_indices_mapping(
table: Table, key: Union[int, slice, range, str, Iterable], indices: Table
) -> pa.Table:
"""
Query a pyarrow Table to extract the subtable that correspond to the given key.
The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into
account a shuffling or an indices selection for example.
The indices table must contain one column named "indices" of type uint64.
"""
if isinstance(key, int):
key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py()
return _query_table(table, key)
if isinstance(key, slice):
key = range(*key.indices(indices.num_rows))
if isinstance(key, range):
if _is_range_contiguous(key) and key.start >= 0:
return _query_table(
table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)]
)
else:
pass # treat as an iterable
if isinstance(key, str):
table = table.drop([column for column in table.column_names if column != key])
return _query_table(table, indices.column(0).to_pylist())
if isinstance(key, Iterable):
return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key])
_raise_bad_key_type(key)
def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table:
"""
Query a pyarrow Table to extract the subtable that correspond to the given key.
"""
if isinstance(key, int):
return table.fast_slice(key % table.num_rows, 1)
if isinstance(key, slice):
key = range(*key.indices(table.num_rows))
if isinstance(key, range):
if _is_range_contiguous(key) and key.start >= 0:
return table.fast_slice(key.start, key.stop - key.start)
else:
pass # treat as an iterable
if isinstance(key, str):
return table.table.drop([column for column in table.column_names if column != key])
if isinstance(key, Iterable):
key = np.fromiter(key, np.int64)
if len(key) == 0:
return table.table.slice(0, 0)
# don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773)
return table.fast_gather(key % table.num_rows)
_raise_bad_key_type(key)
def _is_array_with_nulls(pa_array: pa.Array) -> bool:
return pa_array.null_count > 0
class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
Arrow extractor are used to extract data from pyarrow tables.
It makes it possible to extract rows, columns and batches.
These three extractions types have to be implemented.
"""
def extract_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def extract_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def extract_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError
def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]:
"""Return the first element of a batch (dict) as a row (dict)"""
return {key: array[0] for key, array in py_dict.items()}
class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]):
def extract_row(self, pa_table: pa.Table) -> pa.Table:
return pa_table
def extract_column(self, pa_table: pa.Table) -> pa.Array:
return pa_table.column(0)
def extract_batch(self, pa_table: pa.Table) -> pa.Table:
return pa_table
class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]):
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(pa_table.to_pydict())
def extract_column(self, pa_table: pa.Table) -> list:
return pa_table.column(0).to_pylist()
def extract_batch(self, pa_table: pa.Table) -> dict:
return pa_table.to_pydict()
class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]):
def __init__(self, **np_array_kwargs):
self.np_array_kwargs = np_array_kwargs
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(self.extract_batch(pa_table))
def extract_column(self, pa_table: pa.Table) -> np.ndarray:
return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]])
def extract_batch(self, pa_table: pa.Table) -> dict:
return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names}
def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray:
if isinstance(pa_array, pa.ChunkedArray):
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
if pa_array.type.shape[0] is None:
array: List = [
row
for chunk in pa_array.chunks
for row in chunk.to_list_of_numpy(zero_copy_only=zero_copy_only)
]
else:
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and all(
not _is_array_with_nulls(chunk) for chunk in pa_array.chunks
)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
if pa_array.type.shape[0] is None:
array: List = pa_array.to_list_of_numpy(zero_copy_only=zero_copy_only)
else:
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only)
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist()
if len(array) > 0:
if any(
(isinstance(x, np.ndarray) and (x.dtype == np.object or x.shape != array[0].shape))
or (isinstance(x, float) and np.isnan(x))
for x in array
):
return np.array(array, copy=False, **{**self.np_array_kwargs, "dtype": np.object})
return np.array(array, copy=False, **self.np_array_kwargs)
class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]):
def extract_row(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper)
def extract_column(self, pa_table: pa.Table) -> pd.Series:
return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]]
def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.to_pandas(types_mapper=pandas_types_mapper)
class PythonFeaturesDecoder:
def __init__(self, features):
self.features = features
def decode_row(self, row: dict) -> dict:
return self.features.decode_example(row) if self.features else row
def decode_column(self, column: list, column_name: str) -> list:
return self.features.decode_column(column, column_name) if self.features else column
def decode_batch(self, batch: dict) -> dict:
return self.features.decode_batch(batch) if self.features else batch
class PandasFeaturesDecoder:
def __init__(self, features):
self.features = features
def decode_row(self, row: pd.DataFrame) -> pd.DataFrame:
decode = (
{
column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
for column_name, feature in self.features.items()
if self.features._column_requires_decoding[column_name]
}
if self.features
else {}
)
if decode:
row[list(decode.keys())] = row.transform(decode)
return row
def decode_column(self, column: pd.Series, column_name: str) -> pd.Series:
decode = (
no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
else None
)
if decode:
column = column.transform(decode)
return column
def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
return self.decode_row(batch)
class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
A formatter is an object that extracts and formats data from pyarrow tables.
It defines the formatting for rows, columns and batches.
"""
simple_arrow_extractor = SimpleArrowExtractor
python_arrow_extractor = PythonArrowExtractor
numpy_arrow_extractor = NumpyArrowExtractor
pandas_arrow_extractor = PandasArrowExtractor
def __init__(self, features=None, decoded=True):
self.features = features
self.decoded = decoded
self.python_features_decoder = PythonFeaturesDecoder(self.features)
self.pandas_features_decoder = PandasFeaturesDecoder(self.features)
def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
if query_type == "row":
return self.format_row(pa_table)
elif query_type == "column":
return self.format_column(pa_table)
elif query_type == "batch":
return self.format_batch(pa_table)
def format_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def format_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError
class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]):
def format_row(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_row(pa_table)
def format_column(self, pa_table: pa.Table) -> pa.Array:
return self.simple_arrow_extractor().extract_column(pa_table)
def format_batch(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_batch(pa_table)
class PythonFormatter(Formatter[dict, list, dict]):
def format_row(self, pa_table: pa.Table) -> dict:
row = self.python_arrow_extractor().extract_row(pa_table)
if self.decoded:
row = self.python_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> list:
column = self.python_arrow_extractor().extract_column(pa_table)
if self.decoded:
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.python_arrow_extractor().extract_batch(pa_table)
if self.decoded:
batch = self.python_features_decoder.decode_batch(batch)
return batch
class NumpyFormatter(Formatter[dict, np.ndarray, dict]):
def __init__(self, features=None, decoded=True, **np_array_kwargs):
super().__init__(features=features, decoded=decoded)
self.np_array_kwargs = np_array_kwargs
def format_row(self, pa_table: pa.Table) -> dict:
row = self.numpy_arrow_extractor(**self.np_array_kwargs).extract_row(pa_table)
if self.decoded:
row = self.python_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> np.ndarray:
column = self.numpy_arrow_extractor(**self.np_array_kwargs).extract_column(pa_table)
if self.decoded:
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.numpy_arrow_extractor(**self.np_array_kwargs).extract_batch(pa_table)
if self.decoded:
batch = self.python_features_decoder.decode_batch(batch)
return batch
class PandasFormatter(Formatter):
def format_row(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_row(pa_table)
if self.decoded:
row = self.pandas_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> pd.Series:
column = self.pandas_arrow_extractor().extract_column(pa_table)
if self.decoded:
column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_batch(pa_table)
if self.decoded:
row = self.pandas_features_decoder.decode_batch(row)
return row
class CustomFormatter(Formatter[dict, ColumnFormat, dict]):
"""
A user-defined custom formatter function defined by a ``transform``.
The transform must take as input a batch of data extracted for an arrow table using the python extractor,
and return a batch.
If the output batch is not a dict, then output_all_columns won't work.
If the ouput batch has several fields, then querying a single column won't work since we don't know which field
to return.
"""
def __init__(self, transform: Callable[[dict], dict], features=None, decoded=True, **kwargs):
super().__init__(features=features, decoded=decoded)
self.transform = transform
def format_row(self, pa_table: pa.Table) -> dict:
formatted_batch = self.format_batch(pa_table)
try:
return _unnest(formatted_batch)
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
) from exc
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
formatted_batch = self.format_batch(pa_table)
if hasattr(formatted_batch, "keys"):
if len(formatted_batch.keys()) > 1:
raise TypeError(
"Tried to query a column but the custom formatting function returns too many columns. "
f"Only one column was expected but got columns {list(formatted_batch.keys())}."
)
else:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
)
try:
return formatted_batch[pa_table.column_names[0]]
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
) from exc
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.python_arrow_extractor().extract_batch(pa_table)
if self.decoded:
batch = self.python_features_decoder.decode_batch(batch)
return self.transform(batch)
def _check_valid_column_key(key: str, columns: List[str]) -> None:
if key not in columns:
raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}")
def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None:
if isinstance(key, int):
if (key < 0 and key + size < 0) or (key >= size):
raise IndexError(f"Invalid key: {key} is out of bounds for size {size}")
return
elif isinstance(key, slice):
pass
elif isinstance(key, range):
if len(key) > 0:
_check_valid_index_key(max(key), size=size)
_check_valid_index_key(min(key), size=size)
elif isinstance(key, Iterable):
if len(key) > 0:
_check_valid_index_key(int(max(key)), size=size)
_check_valid_index_key(int(min(key)), size=size)
else:
_raise_bad_key_type(key)
def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str:
if isinstance(key, int):
return "row"
elif isinstance(key, str):
return "column"
elif isinstance(key, (slice, range, Iterable)):
return "batch"
_raise_bad_key_type(key)
def query_table(
table: Table,
key: Union[int, slice, range, str, Iterable],
indices: Optional[Table] = None,
) -> pa.Table:
"""
Query a Table to extract the subtable that correspond to the given key.
Args:
table (``datasets.table.Table``): The input Table to query from
key (``Union[int, slice, range, str, Iterable]``): The key can be of different types:
- an integer i: the subtable containing only the i-th row
- a slice [i:j:k]: the subtable containing the rows that correspond to this slice
- a range(i, j, k): the subtable containing the rows that correspond to this range
- a string c: the subtable containing all the rows but only the column c
- an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable
indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows.
The indices table must contain one column named "indices" of type uint64.
This is used in case of shuffling or rows selection.
Returns:
``pyarrow.Table``: the result of the query on the input table
"""
# Check if key is valid
if not isinstance(key, (int, slice, range, str, Iterable)):
_raise_bad_key_type(key)
if isinstance(key, str):
_check_valid_column_key(key, table.column_names)
else:
size = indices.num_rows if indices is not None else table.num_rows
_check_valid_index_key(key, size)
# Query the main table
if indices is None:
pa_subtable = _query_table(table, key)
else:
pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices)
return pa_subtable
def format_table(
table: Table,
key: Union[int, slice, range, str, Iterable],
formatter: Formatter,
format_columns: Optional[list] = None,
output_all_columns=False,
):
"""
Format a Table depending on the key that was used and a Formatter object.
Args:
table (``datasets.table.Table``): The input Table to format
key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats
the table as either a row, a column or a batch.
formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as
PythonFormatter, NumpyFormatter, etc.
format_columns (Optional ``List[str]``): if not None, it defines the columns that will be formatted using the
given formatter. Other columns are discarded (unless ``output_all_columns`` is True)
output_all_columns (``bool``, defaults to False). If True, the formatted output is completed using the columns
that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used.
Returns:
A row, column or batch formatted object defined by the Formatter:
- the PythonFormatter returns a dictionary for a row or a batch, and a list for a column.
- the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column.
- the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column.
- the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column.
- the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column.
"""
if isinstance(table, Table):
pa_table = table.table
else:
pa_table = table
query_type = key_to_query_type(key)
python_formatter = PythonFormatter(features=None)
if format_columns is None:
return formatter(pa_table, query_type=query_type)
elif query_type == "column":
if key in format_columns:
return formatter(pa_table, query_type)
else:
return python_formatter(pa_table, query_type=query_type)
else:
pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns)
formatted_output = formatter(pa_table_to_format, query_type=query_type)
if output_all_columns:
if isinstance(formatted_output, MutableMapping):
pa_table_with_remaining_columns = pa_table.drop(
col for col in pa_table.column_names if col in format_columns
)
remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type)
formatted_output.update(remaining_columns_dict)
else:
raise TypeError(
f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}"
)
return formatted_output
| 41.794224 | 133 | 0.667574 |
dcfa4d4b533ef7b43e668b87982822a53a8f7f55 | 822 | py | Python | app/migrations/0003_auto_20200813_1551.py | codesankalp/E-commerce-Website | 9091a7e27a9e63242b9067377a697196879fc707 | [
"CC0-1.0"
] | null | null | null | app/migrations/0003_auto_20200813_1551.py | codesankalp/E-commerce-Website | 9091a7e27a9e63242b9067377a697196879fc707 | [
"CC0-1.0"
] | null | null | null | app/migrations/0003_auto_20200813_1551.py | codesankalp/E-commerce-Website | 9091a7e27a9e63242b9067377a697196879fc707 | [
"CC0-1.0"
] | null | null | null | # Generated by Django 3.0.8 on 2020-08-13 10:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0002_auto_20200813_1520'),
]
operations = [
migrations.AlterField(
model_name='checkout',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app.Cart'),
),
migrations.AlterField(
model_name='checkout',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 30.444444 | 134 | 0.654501 |
8d5d83690f6037c91662ff7f79cb35b47730b1c3 | 21,756 | py | Python | geomstats/_backend/pytorch/__init__.py | tfunatomi/geomstats | a5651680f98dea95c1f82a48af1a6dccf3e26bd1 | [
"MIT"
] | 2 | 2020-01-23T04:01:02.000Z | 2020-08-18T19:20:27.000Z | geomstats/_backend/pytorch/__init__.py | tfunatomi/geomstats | a5651680f98dea95c1f82a48af1a6dccf3e26bd1 | [
"MIT"
] | null | null | null | geomstats/_backend/pytorch/__init__.py | tfunatomi/geomstats | a5651680f98dea95c1f82a48af1a6dccf3e26bd1 | [
"MIT"
] | null | null | null | """Pytorch based computation backend."""
import math
from functools import wraps
import numpy as _np
import torch
from torch import ( # NOQA
acos as arccos,
arange,
argmin,
asin as arcsin,
atan2 as arctan2,
bool as t_bool,
broadcast_tensors as broadcast_arrays,
ceil,
clamp as clip,
cos,
cosh,
cross,
div as divide,
empty_like,
eq,
erf,
exp,
eye,
flatten,
float32,
float64,
floor,
fmod as mod,
ger as outer,
gt as greater,
imag,
int32,
int64,
isnan,
log,
lt as less,
matmul,
max as amax,
mean,
meshgrid,
min as amin,
nonzero,
ones,
ones_like,
polygamma,
pow as power,
real,
repeat_interleave as repeat,
reshape,
sign,
sin,
sinh,
stack,
std,
tan,
tanh,
tril,
uint8,
zeros,
zeros_like
)
from . import autograd # NOQA
from . import linalg # NOQA
from . import random # NOQA
from ..constants import pytorch_atol, pytorch_rtol
DTYPES = {
int32: 0,
int64: 1,
float32: 2,
float64: 3}
atol = pytorch_atol
rtol = pytorch_rtol
def _raise_not_implemented_error(*args, **kwargs):
raise NotImplementedError
searchsorted = _raise_not_implemented_error
def _box_scalar(function):
@wraps(function)
def wrapper(x):
if not torch.is_tensor(x):
x = torch.tensor(x)
return function(x)
return wrapper
abs = _box_scalar(abs)
ceil = _box_scalar(ceil)
cos = _box_scalar(cos)
cosh = _box_scalar(cosh)
exp = _box_scalar(exp)
imag = _box_scalar(imag)
log = _box_scalar(log)
real = _box_scalar(real)
sin = _box_scalar(sin)
sinh = _box_scalar(sinh)
tan = _box_scalar(tan)
def comb(n, k):
return math.factorial(n) // math.factorial(k) // math.factorial(n - k)
def to_numpy(x):
return x.numpy()
def one_hot(labels, num_classes):
if not torch.is_tensor(labels):
labels = torch.LongTensor(labels)
return torch.nn.functional.one_hot(
labels, num_classes).type(torch.uint8)
def argmax(a, **kwargs):
if a.dtype == torch.bool:
return torch.as_tensor(_np.argmax(a.data.numpy(), **kwargs))
return torch.argmax(a, **kwargs)
def convert_to_wider_dtype(tensor_list):
dtype_list = [DTYPES[x.dtype] for x in tensor_list]
wider_dtype_index = max(dtype_list)
wider_dtype = list(DTYPES.keys())[wider_dtype_index]
tensor_list = [cast(x, dtype=wider_dtype) for x in tensor_list]
return tensor_list
def less_equal(x, y, **kwargs):
if not torch.is_tensor(x):
x = torch.tensor(x)
if not torch.is_tensor(y):
y = torch.tensor(y)
return torch.le(x, y, **kwargs)
def empty(shape, dtype=float64):
return torch.empty(*shape, dtype=dtype)
def split(x, indices_or_sections, axis=0):
if isinstance(indices_or_sections, int):
indices_or_sections = x.shape[axis] // indices_or_sections
return torch.split(x, indices_or_sections, dim=axis)
indices_or_sections = _np.array(indices_or_sections)
intervals_length = indices_or_sections[1:] - indices_or_sections[:-1]
last_interval_length = x.shape[axis] - indices_or_sections[-1]
if last_interval_length > 0:
intervals_length = _np.append(intervals_length, last_interval_length)
intervals_length = _np.insert(intervals_length, 0, indices_or_sections[0])
return torch.split(x, tuple(intervals_length), dim=axis)
def logical_or(x, y):
return x or y
def logical_and(x, y):
if torch.is_tensor(x):
return x & y
return x and y
def any(x, axis=None):
if not torch.is_tensor(x):
x = torch.tensor(x)
if axis is None:
return torch.any(x)
if isinstance(axis, int):
return torch.any(x.bool(), axis)
if len(axis) == 1:
return torch.any(x, *axis)
axis = list(axis)
for i_axis, one_axis in enumerate(axis):
if one_axis < 0:
axis[i_axis] = ndim(x) + one_axis
new_axis = tuple(k - 1 if k >= 0 else k for k in axis[1:])
return any(torch.any(x.bool(), axis[0]), new_axis)
def cast(x, dtype):
if torch.is_tensor(x):
return x.to(dtype=dtype)
return array(x).to(dtype=dtype)
def flip(x, axis):
if isinstance(axis, int):
axis = [axis]
if axis is None:
axis = list(range(x.ndim))
return torch.flip(x, dims=axis)
def concatenate(seq, axis=0, out=None):
seq = convert_to_wider_dtype(seq)
return torch.cat(seq, dim=axis, out=out)
def hstack(seq):
return concatenate(seq, axis=1)
def vstack(seq):
return concatenate(seq)
def _get_largest_dtype(seq):
dtype_dict = {0: t_bool,
1: uint8,
2: int32,
3: int64,
4: float32,
5: float64}
reverse_dict = {dtype_dict[key]: key for key in dtype_dict}
dtype_code_set = {reverse_dict[t.dtype] for t in seq}
return dtype_dict[max(dtype_code_set)]
def array(val, dtype=None):
if isinstance(val, (list, tuple)):
if isinstance(val[0], (list, tuple)):
aux_list = [array(t, dtype) for t in val]
if dtype is None:
local_dtype = _get_largest_dtype(aux_list)
aux_list = [cast(t, local_dtype) for t in aux_list]
return stack(aux_list)
if not any([isinstance(t, torch.Tensor) for t in val]):
val = _np.copy(_np.array(val))
elif any([not isinstance(t, torch.Tensor) for t in val]):
tensor_members = [t for t in val if torch.is_tensor(t)]
local_dtype = _get_largest_dtype(tensor_members)
for index, t in enumerate(val):
if torch.is_tensor(t) and t.dtype != local_dtype:
cast(t, local_dtype)
elif torch.is_tensor(t):
val[index] = cast(t, dtype=local_dtype)
else:
val[index] = torch.tensor(t, dtype=local_dtype)
val = stack(val)
else:
val = stack(val)
if isinstance(val, (bool, int, float)):
val = _np.array(val)
if isinstance(val, _np.ndarray):
val = torch.from_numpy(val)
if not isinstance(val, torch.Tensor):
val = torch.Tensor([val])
if dtype is not None:
if val.dtype != dtype:
val = cast(val, dtype)
elif val.dtype == torch.float64:
val = val.float()
return val
def all(x, axis=None):
if not torch.is_tensor(x):
x = torch.tensor(x)
if axis is None:
return x.bool().all()
if isinstance(axis, int):
return torch.all(x.bool(), axis)
if len(axis) == 1:
return torch.all(x, *axis)
axis = list(axis)
for i_axis, one_axis in enumerate(axis):
if one_axis < 0:
axis[i_axis] = ndim(x) + one_axis
new_axis = tuple(k - 1 if k >= 0 else k for k in axis[1:])
return all(torch.all(x.bool(), axis[0]), new_axis)
def get_slice(x, indices):
"""Return a slice of an array, following Numpy's style.
Parameters
----------
x : array-like, shape=[dim]
Initial array.
indices : iterable(iterable(int))
Indices which are kept along each axis, starting from 0.
Returns
-------
slice : array-like
Slice of x given by indices.
Notes
-----
This follows Numpy's convention: indices are grouped by axis.
Examples
--------
>>> a = torch.tensor(range(30)).reshape(3,10)
>>> get_slice(a, ((0, 2), (8, 9)))
tensor([8, 29])
"""
return x[indices]
def allclose(a, b, atol=atol, rtol=rtol):
if not isinstance(a, torch.Tensor):
a = torch.tensor(a)
if not isinstance(b, torch.Tensor):
b = torch.tensor(b)
a = to_ndarray(a.float(), to_ndim=1)
b = to_ndarray(b.float(), to_ndim=1)
n_a = a.shape[0]
n_b = b.shape[0]
nb_dim = a.dim()
if n_a > n_b:
reps = (int(n_a / n_b),) + (nb_dim - 1) * (1,)
b = tile(b, reps)
elif n_a < n_b:
reps = (int(n_b / n_a),) + (nb_dim - 1) * (1,)
a = tile(a, reps)
return torch.allclose(a, b, atol=atol, rtol=rtol)
def arccosh(x):
c0 = torch.log(x)
c1 = torch.log1p(torch.sqrt(x * x - 1) / x)
return c0 + c1
def arcsinh(x):
return torch.log(x + torch.sqrt(x * x + 1))
def arcosh(x):
return torch.log(x + torch.sqrt(x * x - 1))
def shape(val):
return val.shape
def dot(a, b):
return einsum('...i,...i->...', a, b)
def maximum(a, b):
return torch.max(array(a), array(b))
def to_ndarray(x, to_ndim, axis=0):
x = array(x)
if x.dim() == to_ndim - 1:
x = torch.unsqueeze(x, dim=axis)
return x
def broadcast_to(x, shape):
if not torch.is_tensor(x):
x = torch.tensor(x)
return x.expand(shape)
def sqrt(x):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x).float()
return torch.sqrt(x)
def isclose(x, y, rtol=rtol, atol=atol):
if not torch.is_tensor(x):
x = torch.tensor(x)
if not torch.is_tensor(y):
y = torch.tensor(y)
return torch.isclose(x, y, atol=atol, rtol=rtol)
def sum(x, axis=None, keepdims=None, **kwargs):
if axis is None:
if keepdims is None:
return torch.sum(x, **kwargs)
return torch.sum(x, keepdim=keepdims, **kwargs)
if keepdims is None:
return torch.sum(x, dim=axis, **kwargs)
return torch.sum(x, dim=axis, keepdim=keepdims, **kwargs)
def einsum(*args, **kwargs):
einsum_str = args[0]
input_tensors_list = args[1:]
input_tensors_list = convert_to_wider_dtype(input_tensors_list)
if len(input_tensors_list) == 1:
return torch.einsum(einsum_str, input_tensors_list)
einsum_list = einsum_str.split('->')
input_str = einsum_list[0]
if len(einsum_list) > 1:
output_str = einsum_list[1]
input_str_list = input_str.split(',')
is_ellipsis = [input_str[:3] == '...' for input_str in input_str_list]
all_ellipsis = bool(_np.prod(is_ellipsis))
if all_ellipsis:
ndims = [len(input_str[3:]) for input_str in input_str_list]
if len(input_str_list) > 2:
raise NotImplementedError(
'Ellipsis support not implemented for >2 input tensors')
tensor_a = input_tensors_list[0]
tensor_b = input_tensors_list[1]
initial_ndim_a = tensor_a.ndim
initial_ndim_b = tensor_b.ndim
tensor_a = to_ndarray(tensor_a, to_ndim=ndims[0] + 1)
tensor_b = to_ndarray(tensor_b, to_ndim=ndims[1] + 1)
n_tensor_a = tensor_a.shape[0]
n_tensor_b = tensor_b.shape[0]
cond = (
n_tensor_a == n_tensor_b == 1
and initial_ndim_a != tensor_a.ndim
and initial_ndim_b != tensor_b.ndim)
if cond:
tensor_a = squeeze(tensor_a, axis=0)
tensor_b = squeeze(tensor_b, axis=0)
input_prefix_list = ['', '']
output_prefix = ''
elif n_tensor_a != n_tensor_b:
if n_tensor_a == 1:
tensor_a = squeeze(tensor_a, axis=0)
input_prefix_list = ['', 'r']
output_prefix = 'r'
elif n_tensor_b == 1:
tensor_b = squeeze(tensor_b, axis=0)
input_prefix_list = ['r', '']
output_prefix = 'r'
else:
raise ValueError('Shape mismatch for einsum.')
else:
input_prefix_list = ['r', 'r']
output_prefix = 'r'
input_str_list = [
input_str.replace('...', prefix) for input_str, prefix in zip(
input_str_list, input_prefix_list)]
input_str = input_str_list[0] + ',' + input_str_list[1]
einsum_str = input_str
if len(einsum_list) > 1:
output_str = output_str.replace('...', output_prefix)
einsum_str = input_str + '->' + output_str
result = torch.einsum(einsum_str, tensor_a, tensor_b, **kwargs)
return result
return torch.einsum(*args, **kwargs)
def T(x):
return torch.t(x)
def transpose(x, axes=None):
if axes:
return x.permute(axes)
if x.dim() == 1:
return x
if x.dim() > 2 and axes is None:
return x.permute(tuple(range(x.ndim)[::-1]))
return x.t()
def squeeze(x, axis=None):
if axis is None:
return torch.squeeze(x)
return torch.squeeze(x, dim=axis)
def trace(x, axis1=0, axis2=1):
min_axis = min(axis1, axis2)
max_axis = max(axis1, axis2)
if min_axis == 1 and max_axis == 2:
return torch.einsum('...ii', x)
if min_axis == -2 and max_axis == -1:
return torch.einsum('...ii', x)
if min_axis == 0 and max_axis == 1:
return torch.einsum('ii...', x)
if min_axis == 0 and max_axis == 2:
return torch.einsum('i...i', x)
raise NotImplementedError()
@_box_scalar
def arctanh(x):
return 0.5 * torch.log((1 + x) / (1 - x))
def linspace(start, stop, num):
return torch.linspace(start=start, end=stop, steps=num)
def equal(a, b, **kwargs):
if a.dtype == torch.ByteTensor:
a = cast(a, torch.uint8).float()
if b.dtype == torch.ByteTensor:
b = cast(b, torch.uint8).float()
return torch.eq(a, b, **kwargs)
def diag_indices(*args, **kwargs):
return tuple(map(torch.from_numpy, _np.diag_indices(*args, **kwargs)))
def tril_indices(*args, **kwargs):
return tuple(map(torch.from_numpy, _np.tril_indices(*args, **kwargs)))
def triu_indices(*args, **kwargs):
return tuple(map(torch.from_numpy, _np.triu_indices(*args, **kwargs)))
def tile(x, y):
if not torch.is_tensor(x):
x = torch.tensor(x)
return x.repeat(y)
def expand_dims(x, axis=0):
return torch.unsqueeze(x, dim=axis)
def ndim(x):
return x.dim()
def hsplit(x, indices_or_section):
if isinstance(indices_or_section, int):
indices_or_section = x.shape[1] // indices_or_section
return torch.split(x, indices_or_section, dim=1)
def diagonal(x, offset=0, axis1=0, axis2=1):
return torch.diagonal(x, offset=offset, dim1=axis1, dim2=axis2)
def set_diag(x, new_diag):
"""Set the diagonal along the last two axis.
Parameters
----------
x : array-like, shape=[dim]
Initial array.
new_diag : array-like, shape=[dim[-2]]
Values to set on the diagonal.
Returns
-------
None
Notes
-----
This mimics tensorflow.linalg.set_diag(x, new_diag), when new_diag is a
1-D array, but modifies x instead of creating a copy.
"""
arr_shape = x.shape
off_diag = (1 - torch.eye(arr_shape[-1])) * x
diag = torch.einsum(
'ij,...i->...ij', torch.eye(new_diag.shape[-1]), new_diag)
return diag + off_diag
def prod(x, axis=None):
if axis is None:
return torch.prod(x)
return torch.prod(x, dim=axis)
def where(condition, x=None, y=None):
if x is None and y is None:
return torch.where(condition)
if not torch.is_tensor(x):
x = torch.tensor(x)
if not torch.is_tensor(y):
y = torch.tensor(y)
y = cast(y, x.dtype)
return torch.where(condition, x, y)
def get_mask_i_float(i, n):
"""Create a 1D array of zeros with one element at one, with floating type.
Parameters
----------
i : int
Index of the non-zero element.
n: n
Length of the created array.
Returns
-------
mask_i_float : array-like, shape=[n,]
1D array of zeros except at index i, where it is one
"""
range_n = arange(cast(array(n), int32))
i_float = cast(array(i), int32)
mask_i = equal(range_n, i_float)
mask_i_float = cast(mask_i, float32)
return mask_i_float
def _is_boolean(x):
if isinstance(x, bool):
return True
if isinstance(x, (tuple, list)):
return _is_boolean(x[0])
if torch.is_tensor(x):
return x.dtype in [torch.bool, torch.uint8]
return False
def _is_iterable(x):
if isinstance(x, (list, tuple)):
return True
if torch.is_tensor(x):
return ndim(x) > 0
return False
def assignment(x, values, indices, axis=0):
"""Assign values at given indices of an array.
Parameters
----------
x: array-like, shape=[dim]
Initial array.
values: {float, list(float)}
Value or list of values to be assigned.
indices: {int, tuple, list(int), list(tuple)}
Single int or tuple, or list of ints or tuples of indices where value
is assigned.
If the length of the tuples is shorter than ndim(x), values are
assigned to each copy along axis.
axis: int, optional
Axis along which values are assigned, if vectorized.
Returns
-------
x_new : array-like, shape=[dim]
Copy of x with the values assigned at the given indices.
Notes
-----
If a single value is provided, it is assigned at all the indices.
If a list is given, it must have the same length as indices.
"""
x_new = copy(x)
use_vectorization = hasattr(indices, '__len__') and len(indices) < ndim(x)
if _is_boolean(indices):
x_new[indices] = values
return x_new
zip_indices = _is_iterable(indices) and _is_iterable(indices[0])
len_indices = len(indices) if _is_iterable(indices) else 1
if zip_indices:
indices = tuple(zip(*indices))
if not use_vectorization:
if not zip_indices:
len_indices = len(indices) if _is_iterable(indices) else 1
len_values = len(values) if _is_iterable(values) else 1
if len_values > 1 and len_values != len_indices:
raise ValueError('Either one value or as many values as indices')
x_new[indices] = values
else:
indices = tuple(
list(indices[:axis]) + [slice(None)] + list(indices[axis:]))
x_new[indices] = values
return x_new
def assignment_by_sum(x, values, indices, axis=0):
"""Add values at given indices of an array.
Parameters
----------
x: array-like, shape=[dim]
Initial array.
values: {float, list(float)}
Value or list of values to be assigned.
indices: {int, tuple, list(int), list(tuple)}
Single int or tuple, or list of ints or tuples of indices where value
is assigned.
If the length of the tuples is shorter than ndim(x), values are
assigned to each copy along axis.
axis: int, optional
Axis along which values are assigned, if vectorized.
Returns
-------
x_new : array-like, shape=[dim]
Copy of x with the values assigned at the given indices.
Notes
-----
If a single value is provided, it is assigned at all the indices.
If a list is given, it must have the same length as indices.
"""
x_new = copy(x)
values = array(values)
use_vectorization = hasattr(indices, '__len__') and len(indices) < ndim(x)
if _is_boolean(indices):
x_new[indices] += values
return x_new
zip_indices = _is_iterable(indices) and _is_iterable(indices[0])
if zip_indices:
indices = list(zip(*indices))
if not use_vectorization:
len_indices = len(indices) if _is_iterable(indices) else 1
len_values = len(values) if _is_iterable(values) else 1
if len_values > 1 and len_values != len_indices:
raise ValueError('Either one value or as many values as indices')
x_new[indices] += values
else:
indices = tuple(
list(indices[:axis]) + [slice(None)] + list(indices[axis:]))
x_new[indices] += values
return x_new
def copy(x):
return x.clone()
def cumsum(x, axis=None):
if not torch.is_tensor(x):
x = array(x)
if axis is None:
return x.flatten().cumsum(dim=0)
return torch.cumsum(x, dim=axis)
def cumprod(x, axis=None):
if axis is None:
return x.flatten().cumprod(dim=0)
return torch.cumprod(x, dim=axis)
def array_from_sparse(indices, data, target_shape):
"""Create an array of given shape, with values at specific indices.
The rest of the array will be filled with zeros.
Parameters
----------
indices : iterable(tuple(int))
Index of each element which will be assigned a specific value.
data : iterable(scalar)
Value associated at each index.
target_shape : tuple(int)
Shape of the output array.
Returns
-------
a : array, shape=target_shape
Array of zeros with specified values assigned to specified indices.
"""
return torch.sparse.FloatTensor(
torch.LongTensor(indices).t(),
torch.FloatTensor(cast(data, float32)),
torch.Size(target_shape)).to_dense()
def vectorize(x, pyfunc, multiple_args=False, **kwargs):
if multiple_args:
return stack(list(map(lambda y: pyfunc(*y), zip(*x))))
return stack(list(map(pyfunc, x)))
def triu_to_vec(x, k=0):
n = x.shape[-1]
rows, cols = triu_indices(n, k=k)
return x[..., rows, cols]
def mat_from_diag_triu_tril(diag, tri_upp, tri_low):
"""Build matrix from given components.
Forms a matrix from diagonal, strictly upper triangular and
strictly lower traingular parts.
Parameters
----------
diag : array_like, shape=[..., n]
tri_upp : array_like, shape=[..., (n * (n - 1)) / 2]
tri_low : array_like, shape=[..., (n * (n - 1)) / 2]
Returns
-------
mat : array_like, shape=[..., n, n]
"""
n = diag.shape[-1]
i, = diag_indices(n, ndim=1)
j, k = triu_indices(n, k=1)
mat = torch.zeros((diag.shape + (n, )))
mat[..., i, i] = diag
mat[..., j, k] = tri_upp
mat[..., k, j] = tri_low
return mat
| 26.243667 | 78 | 0.605166 |
8eb062f404e960f61163405d427830fcc03bc732 | 1,811 | py | Python | growth-marketing-churn-analyses/more/campaign_vis.py | DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 7adab3877fc1d3f1d5f57e6c1743dae8f76f72c5 | [
"Apache-2.0"
] | 3,266 | 2017-08-06T16:51:46.000Z | 2022-03-30T07:34:24.000Z | growth-marketing-churn-analyses/more/campaign_vis.py | nuhaltinsoy/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 6017441f2d476f9c6c568dd886da43c6c0fd89bd | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | growth-marketing-churn-analyses/more/campaign_vis.py | nuhaltinsoy/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 6017441f2d476f9c6c568dd886da43c6c0fd89bd | [
"Apache-2.0"
] | 1,449 | 2017-08-06T17:40:59.000Z | 2022-03-31T12:03:24.000Z | import matplotlib.pyplot as plt
from sqlalchemy import *
import numpy as np
from sqlalchemy.orm import sessionmaker
from churndata import *
from pandas import DataFrame
from util import query_to_df
from util import campaign_to_num,event_to_num,transform_column,hist_and_show,vectorize
db = create_engine('sqlite:///forjar.db')
metadata = MetaData(db)
Session = sessionmaker(bind=db)
session = Session()
def transform_column(df,column_name,fn):
"""
Transforms a column with the given function
"""
df[column_name] = df[column_name].apply(fn).astype('float')
campaign_to_num = {
'TW' : 1,
'RE' : 2,
'FB' : 3,
'PI' : 4
}
event_to_num = {
'like' : 1,
'share' : 2,
'nothing' : 3,
'bought' : 4
}
meal_to_num = {
'japanese': 1,
'chinese' : 2,
'french' : 3,
'german' : 4,
'italian' : 5,
'mexican' : 6,
'vietnamese' : 7
}
"""
Counts the users by campaign id
"""
user_dist = session.query(Users)
user_df = query_to_df(session,user_dist)
transform_column(user_df,'Users_Campaign_ID',campaign_to_num.get)
q = session.query(Users.Campaign_ID,Event.Type,Users.id)
d = query_to_df(session,q)
column_transforms = {
'Users_Campaign_ID' : campaign_to_num.get,
'Event_Type' : event_to_num.get
}
sub_plot_size = len(d.columns) * len(d.columns)
"""
Subplot call here
"""
for column in d.columns:
if column_transforms.has_key(column):
print 'Transforming ' + column
transform_column(d,column,column_transforms[column])
count = 1
fig = plt.figure()
for column in d.columns:
for column2 in d.columns:
x = d[column]
y = d[column2]
print (x,y)
print('Plotting ',column,column2)
fig.add_subplot(1,sub_plot_size,count)
count = count + 1
plt.scatter(x,y)
plt.show()
| 19.265957 | 86 | 0.66593 |
a08dfb07ca949b79c8f003ebd21c05ce3769992e | 8,814 | py | Python | cleanrl/experiments/a2c_gae_continuous_tanh.py | timtody/cleanrl | 1e1736bf49f98d94d71d1b51a75dd4f02f4b600f | [
"MIT"
] | 1 | 2019-12-15T14:12:32.000Z | 2019-12-15T14:12:32.000Z | cleanrl/experiments/a2c_gae_continuous_tanh.py | timtody/cleanrl | 1e1736bf49f98d94d71d1b51a75dd4f02f4b600f | [
"MIT"
] | null | null | null | cleanrl/experiments/a2c_gae_continuous_tanh.py | timtody/cleanrl | 1e1736bf49f98d94d71d1b51a75dd4f02f4b600f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from torch.distributions.normal import Normal
from torch.utils.tensorboard import SummaryWriter
from cleanrl.common import preprocess_obs_space, preprocess_ac_space
import argparse
import numpy as np
import gym
import pybullet_envs
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='A2C agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).strip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="InvertedPendulumBulletEnv-v0",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=7e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=0,
help='seed of the experiment')
parser.add_argument('--episode-length', type=int, default=200,
help='the maximum length of each episode')
parser.add_argument('--total-timesteps', type=int, default=4000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=bool, default=True,
help='whether to set `torch.backends.cudnn.deterministic=True`')
parser.add_argument('--cuda', type=bool, default=True,
help='whether to use CUDA whenever possible')
parser.add_argument('--prod-mode', type=bool, default=False,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
# Algorithm specific arguments
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--vf-coef', type=float, default=0.25,
help="value function's coefficient the loss function")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--ent-coef', type=float, default=0.01,
help="policy entropy's coefficient the loss function")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
# TRY NOT TO MODIFY: setup the environment
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
input_shape, preprocess_obs_fn = preprocess_obs_space(env.observation_space, device)
output_shape = preprocess_ac_space(env.action_space)
# ALGO LOGIC: initialize agent here:
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.fc1 = nn.Linear(input_shape, 120)
self.fc2 = nn.Linear(120, 84)
self.fc_mean = nn.Linear(84, output_shape)
self.logstd = nn.Parameter(torch.zeros(1, output_shape))
def forward(self, x):
x = preprocess_obs_fn(x)
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
action_mean = self.fc_mean(x)
action_logstd = self.logstd.expand_as(action_mean)
return action_mean, action_logstd.exp()
class Value(nn.Module):
def __init__(self):
super(Value, self).__init__()
self.fc1 = nn.Linear(input_shape, 64)
self.fc2 = nn.Linear(64, 1)
def forward(self, x):
x = preprocess_obs_fn(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
pg = Policy().to(device)
vf = Value().to(device)
optimizer = optim.Adam(list(pg.parameters()) + list(vf.parameters()), lr=args.learning_rate)
loss_fn = nn.MSELoss()
# TRY NOT TO MODIFY: start the game
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, tensorboard=True, config=vars(args), name=experiment_name)
writer = SummaryWriter(f"/tmp/{experiment_name}")
wandb.save(os.path.abspath(__file__))
global_step = 0
while global_step < args.total_timesteps:
next_obs = np.array(env.reset())
actions = np.empty((args.episode_length,), dtype=object)
rewards, dones = np.zeros((2, args.episode_length))
obs = np.empty((args.episode_length,) + env.observation_space.shape)
# ALGO LOGIC: put other storage logic here
values = torch.zeros((args.episode_length), device=device)
neglogprobs = torch.zeros((args.episode_length,), device=device)
entropys = torch.zeros((args.episode_length,), device=device)
# TRY NOT TO MODIFY: prepare the execution of the game.
for step in range(args.episode_length):
global_step += 1
obs[step] = next_obs.copy()
# ALGO LOGIC: put action logic here
logits, std = pg.forward([obs[step]])
values[step] = vf.forward([obs[step]])
# ALGO LOGIC: `env.action_space` specific logic
if isinstance(env.action_space, Discrete):
probs = Categorical(logits=logits)
action = probs.sample()
actions[step], neglogprobs[step], entropys[step] = action.tolist()[0], -probs.log_prob(action), probs.entropy()
elif isinstance(env.action_space, Box):
probs = Normal(logits, std)
action = probs.sample()
clipped_action = torch.clamp(action, torch.min(torch.Tensor(env.action_space.low)), torch.min(torch.Tensor(env.action_space.high)))
actions[step], neglogprobs[step], entropys[step] = clipped_action.tolist()[0], -probs.log_prob(action).sum(), probs.entropy().sum()
elif isinstance(env.action_space, MultiDiscrete):
logits_categories = torch.split(logits, env.action_space.nvec.tolist(), dim=1)
action = []
probs_categories = []
probs_entropies = torch.zeros((logits.shape[0]))
neglogprob = torch.zeros((logits.shape[0]))
for i in range(len(logits_categories)):
probs_categories.append(Categorical(logits=logits_categories[i]))
if len(action) != env.action_space.shape:
action.append(probs_categories[i].sample())
neglogprob -= probs_categories[i].log_prob(action[i])
probs_entropies += probs_categories[i].entropy()
action = torch.stack(action).transpose(0, 1).tolist()
actions[step], neglogprobs[step], entropys[step] = action[0], neglogprob, probs_entropies
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, rewards[step], dones[step], _ = env.step(actions[step])
next_obs = np.array(next_obs)
if dones[step]:
break
# ALGO LOGIC: training.
# calculate the discounted rewards, or namely, returns
gae = 0
returns = np.zeros_like(rewards)
for t in reversed(range(rewards.shape[0]-1)):
delta = rewards[t] + args.gamma * values[t+1] * dones[t+1] - values[t]
gae = delta + args.gamma * 0.95 * dones[t+1] * gae
returns[t] = gae + values[t]
# advantages are returns - baseline, value estimates in our case
advantages = returns - values.detach().cpu().numpy()
vf_loss = loss_fn(torch.Tensor(returns).to(device), values) * args.vf_coef
pg_loss = torch.Tensor(advantages).to(device) * neglogprobs
loss = (pg_loss - entropys * args.ent_coef).mean() + vf_loss
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(pg.parameters()) + list(vf.parameters()), args.max_grad_norm)
optimizer.step()
# TRY NOT TO MODIFY: record rewards for plotting purposes
writer.add_scalar("charts/episode_reward", rewards.sum(), global_step)
writer.add_scalar("losses/value_loss", vf_loss.item(), global_step)
writer.add_scalar("losses/entropy", entropys.mean().item(), global_step)
writer.add_scalar("losses/policy_loss", pg_loss.mean().item(), global_step)
env.close()
writer.close()
| 44.969388 | 143 | 0.657477 |
268a0bcab861a087cb597933f8f7c328ebed4a5f | 74,424 | py | Python | django/db/models/fields/__init__.py | simone/django-gb | 5233b366935dcc81e47546921ab4eb5007c83719 | [
"BSD-3-Clause"
] | 1 | 2015-01-07T10:29:03.000Z | 2015-01-07T10:29:03.000Z | django/db/models/fields/__init__.py | pchiquet/django | 45ef4baf5cb018acba16c269e3d470ee1de146e0 | [
"BSD-3-Clause"
] | null | null | null | django/db/models/fields/__init__.py | pchiquet/django | 45ef4baf5cb018acba16c269e3d470ee1de146e0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.apps import apps
from django.db import connection
from django.db.models.lookups import default_lookups, RegisterLookupMixin
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators, checks
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.functional import curry, total_ordering, Promise
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import (smart_text, force_text, force_bytes,
python_2_unicode_compatible)
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
from django.utils.itercompat import is_iterable
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'EmailField', 'Empty', 'Field',
'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
class_lookups = default_lookups.copy()
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
hint=None,
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=self,
id='fields.E003',
)
]
else:
return []
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
("'choices' must be an iterable containing "
"(actual value, human readable name) tuples."),
hint=None,
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
hint=None,
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
return connection.validation.check_field(self, **kwargs)
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"choices": "_choices",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = set(["choices", "validators", "db_tablespace"])
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
self.name,
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app registry. This
# could be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
type_string = self.db_type(connection)
try:
check_string = connection.creation.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.creation.data_types_suffix.get(self.get_internal_type())
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_virtual_field(self)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in {
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'month', 'day', 'week_day', 'hour', 'minute', 'second',
'isnull', 'search', 'regex', 'iregex',
}:
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
return self.get_prep_value(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute',
'second', 'search', 'regex', 'iregex'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if isinstance(self, DateTimeField):
return connection.ops.year_lookup_bounds_for_datetime_field(value)
elif isinstance(self, DateField):
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return [value] # this isn't supposed to happen
else:
return [value]
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_text(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
for choice, _ in self.choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.get_limit_choices_to())]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.get_limit_choices_to())]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = blank_choice if include_blank else []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if isinstance(self._choices, collections.Iterator):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
try:
max_length = int(self.max_length)
if max_length <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=self,
id='fields.E120',
)
]
except ValueError:
return [
checks.Error(
"'max_length' must be a positive integer.",
hint=None,
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField, self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For dates lookups, convert the value to an int
# so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime (%s)"
" while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=self,
id='fields.E134',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length should be overridden to 254 characters to be fully
# compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 75)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length", None) == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def __init__(self, *args, **kwargs):
default_validators = self.default_validators[:]
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
default_validators.append(validators.MinValueValidator(min_value))
if max_value is not None:
default_validators.append(validators.MaxValueValidator(max_value))
self.default_validators = default_validators
super(IntegerField, self).__init__(*args, **kwargs)
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
warnings.warn("IPAddressField has been deprecated. Use GenericIPAddressField instead.",
RemovedInDjango19Warning)
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length", None) == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return value
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length", None) == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length", None) == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
| 36.953327 | 107 | 0.59419 |
6d27cc0261876b03983118f03d640a13ea9281bd | 4,782 | py | Python | qa/rpc-tests/wallet-dump.py | modong/qtum | e2d7f5e7b588443ac10ac31f7af18527e54abcb5 | [
"MIT"
] | 2 | 2017-07-31T14:18:36.000Z | 2021-07-19T21:35:56.000Z | qa/rpc-tests/wallet-dump.py | yelongbao/qtum | e2d7f5e7b588443ac10ac31f7af18527e54abcb5 | [
"MIT"
] | null | null | null | qa/rpc-tests/wallet-dump.py | yelongbao/qtum | e2d7f5e7b588443ac10ac31f7af18527e54abcb5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (start_nodes, start_node, assert_equal, bitcoind_processes)
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-keypool=90"]]
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60)
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 525) # 525 blocks where mined
assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
bitcoind_processes[0].wait()
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
assert_equal(found_addr_chg, 616) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
if __name__ == '__main__':
WalletDumpTest().main ()
| 43.87156 | 114 | 0.592848 |
803bc62502d2725993175d1a99a7fe6c5c6e44c0 | 1,582 | py | Python | lib/csv_exporter.py | phareeser/workouts | 36be728402e5829b5f32000ff7d315b96d9f00a2 | [
"MIT"
] | null | null | null | lib/csv_exporter.py | phareeser/workouts | 36be728402e5829b5f32000ff7d315b96d9f00a2 | [
"MIT"
] | null | null | null | lib/csv_exporter.py | phareeser/workouts | 36be728402e5829b5f32000ff7d315b96d9f00a2 | [
"MIT"
] | null | null | null | # coding=utf-8
from lib.workout_exporter import WorkoutExporter
from lib.workout import Workout, Sport, SportsType, WorkoutsDatabase
import logging
import csv
logger = logging.getLogger(__name__)
class CsvExporter(WorkoutExporter):
"""
Exports workouts from database to CSV file
"""
def __init__(self, filename):
logger.info("csv exporter initializing ...")
self.csv = None
self.filename = filename
def create_session(self):
logger.info("csv exporter creating session ...")
try:
self.csv = open(self.filename, 'w', encoding='utf-8', newline='')
except OSError:
logger.error("csv output file could not be accessed")
return False
except TypeError:
logger.error("export filename not correct")
return False
return True
def close_session(self):
logger.info("csv exporter closing session ...")
if self.csv:
self.csv.close()
self.csv = None
def export_workouts(self, db):
logger.info("exporting workouts ...")
exported_workouts = 0
workouts = db.session.query(Workout).all()
csv_data = []
# header line
header = Workout.header()
csv_data.append(header)
# record lines
for workout in workouts:
csv_data.append(workout.as_list(db))
exported_workouts += 1
writer = csv.writer(self.csv)
writer.writerows(csv_data)
logger.info("{} workouts exported".format(exported_workouts))
| 28.763636 | 77 | 0.616941 |
2af6399c9ff2271d95a840cc072d8a64d4e023b7 | 7,623 | py | Python | fhir/resources/relatedartifact.py | chgl/fhir.resources | 35b22314642640c0b25960ab5b2855e7c51749ef | [
"BSD-3-Clause"
] | null | null | null | fhir/resources/relatedartifact.py | chgl/fhir.resources | 35b22314642640c0b25960ab5b2855e7c51749ef | [
"BSD-3-Clause"
] | null | null | null | fhir/resources/relatedartifact.py | chgl/fhir.resources | 35b22314642640c0b25960ab5b2855e7c51749ef | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/RelatedArtifact
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import element, fhirtypes
class RelatedArtifact(element.Element):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Related artifacts for a knowledge resource.
Related artifacts such as additional documentation, justification, or
bibliographic references.
"""
resource_type = Field("RelatedArtifact", const=True)
citation: fhirtypes.Markdown = Field(
None,
alias="citation",
title="Bibliographic citation for the artifact",
description=(
"A bibliographic citation for the related artifact. This text SHOULD be"
" formatted according to an accepted citation format."
),
# if property is element of this resource.
element_property=True,
)
citation__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_citation", title="Extension field for ``citation``."
)
display: fhirtypes.String = Field(
None,
alias="display",
title="Brief description of the related artifact",
description=(
"A brief description of the document or knowledge resource being "
"referenced, suitable for display to a consumer."
),
# if property is element of this resource.
element_property=True,
)
display__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_display", title="Extension field for ``display``."
)
document: fhirtypes.AttachmentType = Field(
None,
alias="document",
title="What document is being referenced",
description=(
"The document being referenced, represented as an attachment. This is "
"exclusive with the resource element."
),
# if property is element of this resource.
element_property=True,
)
label: fhirtypes.String = Field(
None,
alias="label",
title="Short label",
description=(
"A short label that can be used to reference the citation from "
"elsewhere in the containing artifact, such as a footnote index."
),
# if property is element of this resource.
element_property=True,
)
label__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_label", title="Extension field for ``label``."
)
resource: fhirtypes.Canonical = Field(
None,
alias="resource",
title="What resource is being referenced",
description=(
"The related resource, such as a library, value set, profile, or other "
"knowledge resource."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
resource__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_resource", title="Extension field for ``resource``."
)
type: fhirtypes.Code = Field(
None,
alias="type",
title=(
"documentation | justification | citation | predecessor | successor | "
"derived-from | depends-on | composed-of"
),
description="The type of relationship to the related artifact.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"documentation",
"justification",
"citation",
"predecessor",
"successor",
"derived-from",
"depends-on",
"composed-of",
],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
url: fhirtypes.Url = Field(
None,
alias="url",
title="Where the artifact can be accessed",
description=(
"A url for the artifact that can be followed to access the actual "
"content."
),
# if property is element of this resource.
element_property=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_1717(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("type", "type__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
| 37.185366 | 93 | 0.598059 |
e5dbaedc76d81166f41b40038a50db6aab01a8cc | 2,478 | py | Python | tests/test_depthcache.py | rzchangcheng/python-binance-chain | 9193517f6272fdf39f34ef630c4be94be5ce43a4 | [
"MIT"
] | 241 | 2019-03-02T06:41:39.000Z | 2022-03-31T11:40:36.000Z | tests/test_depthcache.py | rzchangcheng/python-binance-chain | 9193517f6272fdf39f34ef630c4be94be5ce43a4 | [
"MIT"
] | 47 | 2019-04-16T11:31:58.000Z | 2022-03-03T06:10:17.000Z | tests/test_depthcache.py | rzchangcheng/python-binance-chain | 9193517f6272fdf39f34ef630c4be94be5ce43a4 | [
"MIT"
] | 90 | 2019-03-31T21:11:25.000Z | 2022-03-12T09:30:21.000Z | import pytest
from binance_chain.depthcache import DepthCache, DepthCacheManager
from binance_chain.environment import BinanceEnvironment
from binance_chain.http import HttpApiClient
class TestDepthCache:
clear_price = "0.00000000"
def test_init_depth_cache(self):
symbol = 'BNB_ETH'
dc = DepthCache(symbol='BNB_ETH')
assert dc.symbol == symbol
assert len(dc.get_asks()) == 0
assert len(dc.get_bids()) == 0
def test_add_bid(self):
dc = DepthCache('BNB_ETH')
bid = [1.0, 2.0]
dc.add_bid(bid)
assert dc.get_bids() == [bid]
assert len(dc.get_asks()) == 0
def test_remove_bid(self):
dc = DepthCache('BNB_ETH')
bid = [1.0, 2.0]
dc.add_bid(bid)
bid = [1.0, self.clear_price]
dc.add_bid(bid)
assert len(dc.get_bids()) == 0
assert len(dc.get_asks()) == 0
def test_add_ask(self):
dc = DepthCache('BNB_ETH')
ask = [1.0, 2.0]
dc.add_ask(ask)
assert dc.get_asks() == [ask]
assert len(dc.get_bids()) == 0
def test_remove_ask(self):
dc = DepthCache('BNB_ETH')
ask = [1.0, 2.0]
dc.add_ask(ask)
ask = [1.0, self.clear_price]
dc.add_ask(ask)
assert len(dc.get_bids()) == 0
assert len(dc.get_asks()) == 0
def test_sorted_bids(self):
dc = DepthCache('BNB_ETH')
bid = [1.0, 2.0]
dc.add_bid(bid)
bid2 = [2.0, 3.0]
dc.add_bid(bid2)
assert dc.get_bids() == [bid2, bid]
assert len(dc.get_asks()) == 0
def test_sorted_asks(self):
dc = DepthCache('BNB_ETH')
ask = [1.0, 2.0]
dc.add_ask(ask)
ask2 = [2.0, 3.0]
dc.add_ask(ask2)
assert dc.get_asks() == [ask, ask2]
assert len(dc.get_bids()) == 0
class TestDepthCacheConnection:
@pytest.fixture()
def env(self):
return BinanceEnvironment.get_testnet_env()
@pytest.fixture
def httpclient(self, env):
return HttpApiClient(env=env)
@pytest.mark.asyncio
async def test_depthcache_create(self, event_loop, env):
async def callback(_depth_cache):
pass
client = HttpApiClient(env=env)
dcm1 = await DepthCacheManager.create(client, event_loop, "MITH-C76_BNB", callback, env=env)
assert dcm1
assert dcm1.get_depth_cache()
await dcm1.close()
| 20.146341 | 100 | 0.575868 |
7107cb081e78a958d18df66d849f715270e9263c | 1,220 | py | Python | conversationApi.py | jithinsunnyofficial/TeleVital-MicrosoftHack | a957605456d2895dab0357ab3b31f353253f8d3c | [
"MIT"
] | 4 | 2020-05-31T13:57:10.000Z | 2020-10-29T04:59:51.000Z | conversationApi.py | jithinsunnyofficial/TeleVital-MicrosoftHack | a957605456d2895dab0357ab3b31f353253f8d3c | [
"MIT"
] | 3 | 2021-04-30T21:17:07.000Z | 2021-09-08T02:06:04.000Z | conversationApi.py | jithinsunnyofficial/TeleVital-MicrosoftHack | a957605456d2895dab0357ab3b31f353253f8d3c | [
"MIT"
] | null | null | null | import os
import dialogflow
from google.api_core.exceptions import InvalidArgument
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = 'private-key.json'
DIALOGFLOW_PROJECT_ID = 'televital-hack-4dd77'
DIALOGFLOW_LANGUAGE_CODE = 'en'
def analyze_text(input_text = 'Hi',session_id = 'random'):
SESSION_ID = session_id
text_to_be_analyzed = input_text
session_client = dialogflow.SessionsClient()
session = session_client.session_path(DIALOGFLOW_PROJECT_ID, SESSION_ID)
text_input = dialogflow.types.TextInput(text=text_to_be_analyzed, language_code=DIALOGFLOW_LANGUAGE_CODE)
query_input = dialogflow.types.QueryInput(text=text_input)
try:
response = session_client.detect_intent(session=session, query_input=query_input)
except InvalidArgument:
raise
print("Query text:", response.query_result.query_text)
print("Detected intent:", response.query_result.intent.display_name)
print("Detected intent confidence:", response.query_result.intent_detection_confidence)
print("Fulfillment text:", response.query_result.fulfillment_text)
return response.query_result.fulfillment_text
#analyze_text('Hi my name is Farhan', 'session1')
| 42.068966 | 110 | 0.770492 |
d761a496302424089adeb9fdbef5cfdb69475cf1 | 716 | py | Python | bin/rstpep2html.py | j-windsor/iRiot-WebApp | 2af059f88bd010b98e50b9ee593c89027dcc53e0 | [
"MIT"
] | null | null | null | bin/rstpep2html.py | j-windsor/iRiot-WebApp | 2af059f88bd010b98e50b9ee593c89027dcc53e0 | [
"MIT"
] | null | null | null | bin/rstpep2html.py | j-windsor/iRiot-WebApp | 2af059f88bd010b98e50b9ee593c89027dcc53e0 | [
"MIT"
] | null | null | null | #!/Users/jameswindsor/Documents/Capstone/iRiot-WebApp/bin/python
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
| 27.538462 | 76 | 0.734637 |
55c42301d946241ebfa1348d2edfbc62dda0176b | 2,902 | py | Python | bigtable/test_bigtable.py | bsnisar/dataproc-initialization-actions | 90174ed577feff278d6895a6c814c9227f42f51d | [
"Apache-2.0"
] | 1 | 2019-06-07T09:19:13.000Z | 2019-06-07T09:19:13.000Z | bigtable/test_bigtable.py | bsnisar/dataproc-initialization-actions | 90174ed577feff278d6895a6c814c9227f42f51d | [
"Apache-2.0"
] | 2 | 2019-05-08T02:51:28.000Z | 2020-01-11T18:19:50.000Z | bigtable/test_bigtable.py | bsnisar/dataproc-initialization-actions | 90174ed577feff278d6895a6c814c9227f42f51d | [
"Apache-2.0"
] | null | null | null | """
This module provides testing functionality of the BigTable Init Action.
Test logic:
1. Create test table and fill it with some data by injecting commands into hbase shell.
2. Validate from local station that BigTable has test table created with right data.
Note:
Test REQUIRES cbt tool installed which provides CLI access to BigTable instances.
See: https://cloud.google.com/bigtable/docs/cbt-overview
"""
import os
from absl.testing import absltest
from absl.testing import parameterized
from integration_tests.dataproc_test_case import DataprocTestCase
class BigTableTestCase(DataprocTestCase):
COMPONENT = 'bigtable'
INIT_ACTIONS = ['bigtable/bigtable.sh']
TEST_SCRIPT_FILE_NAME = "run_hbase_commands.py"
def __init__(self, method_name='runTest'):
super().__init__(method_name)
self.metadata = None
self.db_name = None
def setUp(self):
super().setUp()
self.db_name = "test-bt-{}-{}".format(self.datetime_str(),
self.random_str())
self.metadata = "bigtable-instance={},bigtable-project={}".format(
self.db_name, self.PROJECT)
self.assert_command(
'gcloud bigtable instances create {}'
' --cluster {} --cluster-zone {}'
' --display-name={} --instance-type=DEVELOPMENT'.format(
self.db_name, self.db_name, self.ZONE, self.db_name))
def tearDown(self):
super().tearDown()
self.assert_command('gcloud bigtable instances delete {}'.format(
self.db_name))
def _validate_bigtable(self):
_, stdout, _ = self.assert_command(
'cbt -instance {} count test-bigtable '.format(self.db_name))
self.assertEqual(
int(float(stdout)), 4, "Invalid BigTable instance count")
def verify_instance(self, name):
self.upload_test_file(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
self.TEST_SCRIPT_FILE_NAME), name)
self.assert_instance_command(
name, "python {}".format(self.TEST_SCRIPT_FILE_NAME))
self._validate_bigtable()
""" Dataproc versions 1.0 and 1.1 are excluded from automatic testing.
Hbase shell is not working properly on older Dataproc Clusters when
admin commands are provided from text file.
"""
@parameterized.parameters(
("SINGLE", ["m"]),
("STANDARD", ["m"]),
("HA", ["m-0"]),
)
def test_bigtable(self, configuration, machine_suffixes):
self.createCluster(
configuration, self.INIT_ACTIONS, metadata=self.metadata)
for machine_suffix in machine_suffixes:
self.verify_instance("{}-{}".format(self.getClusterName(),
machine_suffix))
if __name__ == '__main__':
absltest.main()
| 34.547619 | 87 | 0.636113 |
bf43de859bf6b4f42600223a5d4c400021877e1a | 2,452 | py | Python | audioLightSensorAnalogDemo.py | fredericaltorres/fCircuitPython | 4185799c2eae1fd2d38963ff08f442f46ca02463 | [
"MIT"
] | null | null | null | audioLightSensorAnalogDemo.py | fredericaltorres/fCircuitPython | 4185799c2eae1fd2d38963ff08f442f46ca02463 | [
"MIT"
] | null | null | null | audioLightSensorAnalogDemo.py | fredericaltorres/fCircuitPython | 4185799c2eae1fd2d38963ff08f442f46ca02463 | [
"MIT"
] | null | null | null | """
Circuit Playground Express
https://learn.adafruit.com/adafruit-circuit-playground-express/pinouts
- Audio
- LED
- Buttons
- NeoPixels
- Analog In
- Play wav file on internal speaker
"""
import time
import board
from digitalio import DigitalInOut, Direction, Pull
import neopixel
from cpex.NeoPixelStrip import NeoPixelStrip
from cpex.AudioPlayer import AudioPlayer
from cpex.Button import Button
from cpex.analogInput import AnalogInput
import microcontroller
from analogio import AnalogIn
from simpleio import map_range
analogLightSensor = AnalogIn(board.LIGHT)
analogInA1 = AnalogInput(board.A1)
audioPlayer = AudioPlayer()
audioPlayer.playTone([400, 500], 0.5).playTone([400, 500], 0.3)
MAX_NEO_PIXEL = 10
neoPixelStrip = NeoPixelStrip(MAX_NEO_PIXEL, board.D8).fill((0, 0 ,0)).show().wait(.25)
LED = DigitalInOut(board.D13)
LED.direction = Direction.OUTPUT
# Buttons Definition
buttonA = Button(board.BUTTON_A)
buttonB = Button(board.BUTTON_B)
rgbRed = (255, 0, 0)
rgbGreen = (0, 180, 0)
rgbBlue = (0, 0, 255)
rgbBlack = (0, 0, 0)
class Program:
def __init__(self):
print('Initialization...')
neoPixelStrip.animate(rgbRed, .05).wait(.5)
neoPixelStrip.animate(rgbBlack, .05).wait(.5)
def run(self):
counter = 0
while True:
#neoPixelStrip.fill(rgbBlue if counter % 2 == 0 else rgbGreen).show()
LED.value = not LED.value
print("Led %s, count:%s" % (LED.value, counter))
counter += 1
# Buttons
if buttonA.isPressed():
print('button A down')
audioPlayer.playFile("laugh.wav")
if buttonB.isPressed():
print('button B down')
audioPlayer.playFile("rimshot.wav")
print("Analog Voltage: %6.2f" % analogInA1.readVoltage())
# The temperature sensor is included in the mcu and is not accurate, try to correct the reading
temperature = microcontroller.cpu.temperature-5
print("Temp: %fc %ff" % (temperature, (temperature)*1.8+32))
# light value remaped to pixel position
analogLightSensorValue = analogLightSensor.value
peak = map_range(analogLightSensorValue, 1000, 30000, 0, 10)
print("Light Sensor AnalogValue:%f, Peak:%s" % (analogLightSensorValue, int(peak)))
time.sleep(1)
Program().run()
| 29.902439 | 107 | 0.64845 |
d97f123e5884710975d7012722f6da37743439d5 | 5,576 | py | Python | PyYaMusic/radio.py | AlexRoar/YaMusic-Python | d709f8920ab4c66dafae9b5bcb8e623512d60b3a | [
"MIT"
] | 2 | 2020-07-14T08:15:34.000Z | 2020-08-25T07:13:25.000Z | PyYaMusic/radio.py | AlexRoar/YaMusic-Python | d709f8920ab4c66dafae9b5bcb8e623512d60b3a | [
"MIT"
] | null | null | null | PyYaMusic/radio.py | AlexRoar/YaMusic-Python | d709f8920ab4c66dafae9b5bcb8e623512d60b3a | [
"MIT"
] | null | null | null | # Copyright (c) 2019.
# Designed and codded with love by Aleksander Dremov
#
#
import json
from PyYaMusic.track import Track
import subprocess
import requests
import time
# Queue work not good
# TODO: make normal query
class Radio:
def __init__(self, login='', raw_edit=''):
self.now = ()
self.next = ()
self.tracks = []
self.login = login
self.raw_edit = raw_edit
def getTracks(self, que=''):
cookies = {
'mda': '0',
'_ym_isad': '2',
'Session_id': '3:1556406891.5.0.1556406891904:r0r_vA:12.1|227207001.0.2.0:3|198398.402244.BOys4fFX_wLTsjlfrtZiiYujCdA',
'sessionid2': '3:1556406891.5.0.1556406891904:r0r_vA:12.1|227207001.0.2.0:3|198398.18991.gVUVw4ZUadiPkdSAcfQ4pVEOpbQ',
'bltsr': '1',
}
if self.raw_edit == '':
cookies = {
'yandexuid': '8534847021556406878',
'_ym_wasSynced': '%7B%22time%22%3A1556406880705%2C%22params%22%3A%7B%22eu%22%3A0%7D%2C%22bkParams%22%3A%7B%7D%7D',
'_ym_uid': '15564068811045866983',
'_ym_d': '1556406881',
'mda': '0',
'_ym_isad': '2',
'Session_id': '3:1556406891.5.0.1556406891904:r0r_vA:12.1|227207001.0.2.0:3|198398.402244.BOys4fFX_wLTsjlfrtZiiYujCdA',
'sessionid2': '3:1556406891.5.0.1556406891904:r0r_vA:12.1|227207001.0.2.0:3|198398.18991.gVUVw4ZUadiPkdSAcfQ4pVEOpbQ',
'ys': 'diskchrome.8-22-3#udn.czoxNjQ3MDUwNjp2azrQodCw0YjQsCDQlNGA0LXQvNC%2B0LI%3D',
'L': 'fw0HcUtKAHNnX1RaV1xCe3hIRGcIQF9vBSQNAhYaXXJAe2E=.1556406891.13848.335772.c1d5d2dc58970caea216848bcffa23a6',
'yandex_login': self.login,
'i': 'jxREviG0SFaGqCQxJutJW+cCGxVqlrxtP1FJ3sO5LzxF3BxT7wuW6ZErMIxUNPEAHyUnzPoL6oidaJxjTCiJgAoL5uM=',
'device_id': 'ac27f1b47303e5935c639c2e6c7c54406659058e0',
'bltsr': '1',
'yp': '1871766880.yrtsi.1556406880#1556636423.gpauto.55_876396799999995%3A37_5777814%3A37%3A3%3A1556463623#1871766891.udn.czoxNjQ3MDUwNjp2azrQodCw0YjQsCDQlNGA0LXQvNC%2B0LI%3D',
}
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36',
'X-Current-UID': '227207001',
'Accept': 'application/json; q=1.0, text/*; q=0.8, */*; q=0.1',
'Referer': 'https://radio.yandex.ru/user/' + self.login,
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
else:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'X-Retpath-Y': 'https%3A%2F%2Fradio.yandex.ru%2Fgenre%2Fpop',
'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36',
'X-Current-UID': '227207001',
'Accept': 'application/json; q=1.0, text/*; q=0.8, */*; q=0.1',
'Referer': 'https://radio.yandex.ru/genre/pop',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
params = (
('queue', que),
('external-domain', 'radio.yandex.ru'),
('overembed', 'no'),
('__t', str(time.time() * 1000)[:13])
)
if self.raw_edit == '':
response = requests.get('https://radio.yandex.ru/api/v2.1/handlers/radio/user/' + self.login + '/tracks',
headers=headers, params=params, cookies=cookies)
else:
response = requests.get('https://radio.yandex.ru/api/v2.1/handlers/radio/' + self.raw_edit + '/tracks',
headers=headers, params=params, cookies=cookies)
data = json.loads(response.text)
print('Gathering list: ')
n = 1
for i in data['tracks']:
track_id = i['track']['id']
album_id = i['track']['albums'][0]['id']
title = i['track']['title']
print('\t'+str(n)+'. '+title+' ('+i['track']['artists'][0]['name']+') ')
n+=1
self.tracks.append((track_id, album_id))
return self.tracks
def updateTracks(self):
fl = True
if (len(self.tracks) == 0):
self.getTracks()
self.now = self.tracks.pop(0)
self.next = self.tracks.pop(0)
if (len(self.now) == 0):
fl = False
if (fl):
self.tracks = []
que = str(self.now[0]) + ':' + str(self.now[1]) + ',' + str(self.next[0]) + ':' + str(
self.next[1])
else:
que = ''
self.getTracks(que=que)
def start(self):
self.updateTracks()
self.now = self.tracks.pop(0)
self.next = self.tracks.pop(0)
t = Track()
t.playByids(self.now[0], self.now[1])
self.start()
def radioDaemon(self):
self.pr = subprocess.Popen(['python3.7', 'radioDaemon.py'])
def stopDaemon(self):
self.pr.kill()
def nextTrack(self):
self.updateTracks()
self.now = self.tracks.pop(0)
self.next = self.tracks.pop(0)
| 41.61194 | 192 | 0.54609 |
fbc1c3a6dc69cfff020e1293d598d81cd0602dcc | 5,839 | py | Python | oookbooksite/settings/base.py | janpipek/oookbook | 69524ca95206080454eae4e97f0830225ac863ae | [
"MIT"
] | 1 | 2020-06-20T13:36:52.000Z | 2020-06-20T13:36:52.000Z | oookbooksite/settings/base.py | janpipek/oookbook | 69524ca95206080454eae4e97f0830225ac863ae | [
"MIT"
] | null | null | null | oookbooksite/settings/base.py | janpipek/oookbook | 69524ca95206080454eae4e97f0830225ac863ae | [
"MIT"
] | null | null | null | # Django settings for oookbooksite project.
import os
BASE_DIR = os.path.join(os.path.dirname(__file__), "..")
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'oookbook', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'oookbook',
'PASSWORD': 'oookbook',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('en', 'English'),
('cs', 'Czech'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static/oookbook"),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'nfb*y&ufhx^%=ft$((lw(#8e=om$c$e*od5$^lns-6#3l1m3v%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'oookbooksite.urls'
INTERNAL_IPS = ('127.0.0.1',)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'oookbooksite.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'tastypie',
'oookbook',
'crispy_forms'
]
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/welcome'
AUTH_PROFILE_MODULE = "oookbook.UserProfile"
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
CRISPY_TEMPLATE_PACK = 'bootstrap3' | 32.438889 | 127 | 0.693098 |
914d7a7e2f0e65ec41e6b4a201eff1b5bc74aab5 | 5,286 | py | Python | train_code/train_crnn/crnn_recognizer.py | RuslanOm/ocr.pytorch | 21f62d358b2c6e186cc460c8a50d9ae70414a54f | [
"MIT"
] | 387 | 2019-04-30T14:20:45.000Z | 2022-03-30T02:34:57.000Z | train_code/train_crnn/crnn_recognizer.py | makao007/ocr.pytorch | 6c51bda6b49e378c904d200ae02157dbe6e0e03c | [
"MIT"
] | 57 | 2019-06-27T13:22:34.000Z | 2022-03-27T09:41:03.000Z | train_code/train_crnn/crnn_recognizer.py | makao007/ocr.pytorch | 6c51bda6b49e378c904d200ae02157dbe6e0e03c | [
"MIT"
] | 100 | 2019-05-01T14:18:08.000Z | 2022-03-29T09:18:42.000Z | import torch.nn as nn
# import torchvision.models as models
import torch, os
from PIL import Image
import cv2
import torchvision.transforms as transforms
from torch.autograd import Variable
import numpy as np
import random
from crnn import CRNN
import config
# copy from mydataset
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.LANCZOS, is_test=True):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
self.is_test = is_test
def __call__(self, img):
w, h = self.size
w0 = img.size[0]
h0 = img.size[1]
if w <= (w0 / h0 * h):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
else:
w_real = int(w0 / h0 * h)
img = img.resize((w_real, h), self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
tmp = torch.zeros([img.shape[0], h, w])
start = random.randint(0, w - w_real - 1)
if self.is_test:
start = 0
tmp[:, :, start:start + w_real] = img
img = tmp
return img
# copy from utils
class strLabelConverter(object):
def __init__(self, alphabet, ignore_case=False):
self._ignore_case = ignore_case
if self._ignore_case:
alphabet = alphabet.lower()
self.alphabet = alphabet + '_' # for `-1` index
self.dict = {}
for i, char in enumerate(alphabet):
# NOTE: 0 is reserved for 'blank' required by wrap_ctc
self.dict[char] = i + 1
# print(self.dict)
def encode(self, text):
length = []
result = []
for item in text:
item = item.decode('utf-8', 'strict')
length.append(len(item))
for char in item:
if char not in self.dict.keys():
index = 0
else:
index = self.dict[char]
result.append(index)
text = result
return (torch.IntTensor(text), torch.IntTensor(length))
def decode(self, t, length, raw=False):
if length.numel() == 1:
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(),
length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(
t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts
# recognize api
class PytorchOcr():
def __init__(self, model_path):
alphabet_unicode = config.alphabet_v2
self.alphabet = ''.join([chr(uni) for uni in alphabet_unicode])
# print(len(self.alphabet))
self.nclass = len(self.alphabet) + 1
self.model = CRNN(config.imgH, 1, self.nclass, 256)
self.cuda = False
if torch.cuda.is_available():
self.cuda = True
self.model.cuda()
self.model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(model_path).items()})
else:
# self.model = nn.DataParallel(self.model)
self.model.load_state_dict(torch.load(model_path, map_location='cpu'))
self.model.eval()
self.converter = strLabelConverter(self.alphabet)
def recognize(self, img):
h,w = img.shape[:2]
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
image = Image.fromarray(img)
transformer = resizeNormalize((int(w/h*32), 32))
image = transformer(image)
image = image.view(1, *image.size())
image = Variable(image)
if self.cuda:
image = image.cuda()
preds = self.model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
txt = self.converter.decode(preds.data, preds_size.data, raw=False)
return txt
if __name__ == '__main__':
model_path = './crnn_models/CRNN-1008.pth'
recognizer = PytorchOcr(model_path)
img_name = 't1.jpg'
img = cv2.imread(img_name)
h, w = img.shape[:2]
res = recognizer.recognize(img)
print(res)
| 34.54902 | 116 | 0.518918 |
6fd31b2b3d3f85704a6c522680c8adbe4b95e262 | 610 | py | Python | setup.py | sanzaru/bleutrade_python | 412b553e5b7d64dc790a215ff69001a8ee7ffad7 | [
"MIT"
] | 1 | 2016-03-14T13:52:44.000Z | 2016-03-14T13:52:44.000Z | setup.py | sanzaru/bleutrade_python | 412b553e5b7d64dc790a215ff69001a8ee7ffad7 | [
"MIT"
] | null | null | null | setup.py | sanzaru/bleutrade_python | 412b553e5b7d64dc790a215ff69001a8ee7ffad7 | [
"MIT"
] | 1 | 2018-03-11T02:39:19.000Z | 2018-03-11T02:39:19.000Z | from distutils.core import setup
setup(
name = 'bleutradeapi',
packages = ['bleutradeapi'], # this must be the same as the name above
version = '0.3',
description = 'Python client library implementation of the Bleutrade API',
author = 'Martin Albrecht',
author_email = 'iwuerstchen@gmail.com',
url = 'https://github.com/sanzaru/bleutrade_python', # use the URL to the github repo
download_url = 'https://github.com/sanzaru/bleutrade_python/tarball/0.1', # I'll explain this in a second
keywords = ['bleutrade', 'api', 'cryptocurrencies', 'bitcoin'], # arbitrary keywords
classifiers = [],
) | 46.923077 | 107 | 0.716393 |
710d80ca52008d7a0a0a0e09bb63a07173821053 | 9,517 | py | Python | mrcnn/config.py | anton-cid-mejias/Mask_RCNN | 226b97f590c40f884cb12d88da22966732e618ac | [
"MIT"
] | null | null | null | mrcnn/config.py | anton-cid-mejias/Mask_RCNN | 226b97f590c40f884cb12d88da22966732e618ac | [
"MIT"
] | null | null | null | mrcnn/config.py | anton-cid-mejias/Mask_RCNN | 226b97f590c40f884cb12d88da22966732e618ac | [
"MIT"
] | null | null | null | """
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import numpy as np
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
# Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 2
# Number of training steps per epoch
# This doesn't need to match the size of the training set. Tensorboard
# updates are saved at the end of each epoch, so setting this to a
# smaller number means getting more frequent TensorBoard updates.
# Validation stats are also calculated at each epoch end and they
# might take a while, so don't set this too small to avoid spending
# a lot of time on validation stats.
STEPS_PER_EPOCH = 1000
# Number of validation steps to run at the end of every training epoch.
# A bigger number improves accuracy of validation stats, but slows
# down the training.
VALIDATION_STEPS = 50
# Backbone network architecture
# Supported values are: resnet50, resnet101.
# You can also provide a callable that should have the signature
# of model.resnet_graph. If you do so, you need to supply a callable
# to COMPUTE_BACKBONE_SHAPE as well
BACKBONE = "resnet101"
# Only useful if you supply a callable to BACKBONE. Should compute
# the shape of each layer of the FPN Pyramid.
# See model.compute_backbone_shapes
COMPUTE_BACKBONE_SHAPE = None
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Size of the fully-connected layers in the classification graph
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
# Size of the top-down layers used to build the feature pyramid
TOP_DOWN_PYRAMID_SIZE = 256
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more proposals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after tf.nn.top_k and before non-maximum suppression
PRE_NMS_LIMIT = 6000
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Input image resizing
# Generally, use the "square" resizing mode for training and predicting
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none: No resizing or padding. Return the image unchanged.
# square: Resize and pad with zeros to get a square image
# of size [max_dim, max_dim].
# pad64: Pads width and height with zeros to make them multiples of 64.
# If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales
# up before padding. IMAGE_MAX_DIM is ignored in this mode.
# The multiple of 64 is needed to ensure smooth scaling of feature
# maps up and down the 6 levels of the FPN pyramid (2**6=64).
# crop: Picks random crops from the image. First, scales the image based
# on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of
# size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.
# IMAGE_MAX_DIM is not used in this mode.
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further
# up scaling. For example, if set to 2 then images are scaled up to double
# the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.
# However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.
IMAGE_MIN_SCALE = 0
# Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4
# Changing this requires other changes in the code. See the WIKI for more
# details: https://github.com/matterport/Mask_RCNN/wiki
IMAGE_CHANNEL_COUNT = 3
# Image mean (RGB)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 200
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimizer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.,
"mrcnn_orientation_loss": 1.
}
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Train or freeze batch normalization layers
# None: Train BN layers. This is the normal mode
# False: Freeze BN layers. Good when using a small batch size
# True: (don't use). Set layer in training mode even when predicting
TRAIN_BN = False # Defaulting to False since batch size is often small
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
# Do train and inference with the orientation module
ORIENTATION = False
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
# Input image size
if self.IMAGE_RESIZE_MODE == "crop":
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM,
self.IMAGE_CHANNEL_COUNT])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM,
self.IMAGE_CHANNEL_COUNT])
# Image meta data length
# See compose_image_meta() for details
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
| 39.489627 | 80 | 0.689188 |
e901d226f1a32afe69b0570f35e332eeeeeded09 | 930 | py | Python | ejercicios_resueltos/t03/t03ejer09.py | workready/pythonbasic | 59bd82caf99244f5e711124e1f6f4dec8de22141 | [
"MIT"
] | null | null | null | ejercicios_resueltos/t03/t03ejer09.py | workready/pythonbasic | 59bd82caf99244f5e711124e1f6f4dec8de22141 | [
"MIT"
] | null | null | null | ejercicios_resueltos/t03/t03ejer09.py | workready/pythonbasic | 59bd82caf99244f5e711124e1f6f4dec8de22141 | [
"MIT"
] | null | null | null | # Estos son los módulos que necesitas
import datetime
import calendar
import locale
def get_today_as_string():
"""
Funcion que me dice el día de la semana que es hoy, en formato string.
Pequeña guia de implementación:
1. Obtén el día de la semana que es hoy, en formato entero (1, 2, 3, etc). Usa datetime.
2. Obtén en una lista, los días de la semana en español en formato string. Usa calendar y locale.
3. Usa el resultado de 1 para indexar el array de 2
@returns: "Lunes", "Martes"...
"""
locale.setlocale(locale.LC_ALL, 'es_ES.UTF-8')
# El dia de la semana en formato entero, empezando por 1
today_date = datetime.date.today()
num_weekday = today_date.isoweekday()
# Lista con los días de la semana en formato cadena
weekdays = list(calendar.day_name)
return weekdays[num_weekday - 1]
print("Hoy es {}".format(get_today_as_string())) | 31 | 105 | 0.67957 |
28ea9175de8616d7ac0d112a729548969f3855fb | 6,121 | py | Python | ecowitt2mqtt/data.py | akleber/ecowitt2mqtt | e8ffd9f8f36d9065353d67b4580d92d15a5aaba2 | [
"MIT"
] | null | null | null | ecowitt2mqtt/data.py | akleber/ecowitt2mqtt | e8ffd9f8f36d9065353d67b4580d92d15a5aaba2 | [
"MIT"
] | null | null | null | ecowitt2mqtt/data.py | akleber/ecowitt2mqtt | e8ffd9f8f36d9065353d67b4580d92d15a5aaba2 | [
"MIT"
] | null | null | null | """Define helpers to process data from an Ecowitt device."""
import argparse
from functools import partial
import inspect
from typing import Any, Callable, Dict, Optional, Union
from ecowitt2mqtt.const import (
DATA_POINT_DEWPOINT,
DATA_POINT_FEELSLIKE,
DATA_POINT_GLOB_BAROM,
DATA_POINT_GLOB_BATT,
DATA_POINT_GLOB_RAIN,
DATA_POINT_GLOB_TEMP,
DATA_POINT_GLOB_WIND,
DATA_POINT_HEATINDEX,
DATA_POINT_HUMIDITY,
DATA_POINT_LIGHTNING,
DATA_POINT_LIGHTNING_TIME,
DATA_POINT_SOLARRADIATION,
DATA_POINT_SOLARRADIATION_LUX,
DATA_POINT_SOLARRADIATION_PERCEIVED,
DATA_POINT_TEMPF,
DATA_POINT_WINDCHILL,
DATA_POINT_WINDDIR,
DATA_POINT_WINDSPEEDMPH,
UNIT_SYSTEM_METRIC,
)
from ecowitt2mqtt.device import get_device_from_raw_payload
from ecowitt2mqtt.util.battery import calculate_battery
from ecowitt2mqtt.util.distance import calculate_distance
from ecowitt2mqtt.util.meteo import (
calculate_dew_point,
calculate_feels_like,
calculate_heat_index,
calculate_illuminance_wm2_to_lux,
calculate_illuminance_wm2_to_perceived,
calculate_pressure,
calculate_rain_volume,
calculate_temperature,
calculate_wind_chill,
calculate_wind_speed,
)
from ecowitt2mqtt.util.time import calculate_epoch
DEFAULT_KEYS_TO_IGNORE = ["PASSKEY", "dateutc", "freq", "model", "stationtype"]
CALCULATOR_FUNCTION_MAP: Dict[str, Callable] = {
DATA_POINT_DEWPOINT: calculate_dew_point,
DATA_POINT_FEELSLIKE: calculate_feels_like,
DATA_POINT_GLOB_BAROM: calculate_pressure,
DATA_POINT_GLOB_BATT: calculate_battery,
DATA_POINT_GLOB_RAIN: calculate_rain_volume,
DATA_POINT_GLOB_TEMP: calculate_temperature,
DATA_POINT_GLOB_WIND: calculate_wind_speed,
DATA_POINT_HEATINDEX: calculate_heat_index,
# Lightning strike distance always gives values in metric:
DATA_POINT_LIGHTNING: lambda val: calculate_distance(
val, input_unit_system=UNIT_SYSTEM_METRIC
),
DATA_POINT_LIGHTNING_TIME: calculate_epoch,
DATA_POINT_SOLARRADIATION_LUX: calculate_illuminance_wm2_to_lux,
DATA_POINT_SOLARRADIATION_PERCEIVED: calculate_illuminance_wm2_to_perceived,
DATA_POINT_WINDCHILL: calculate_wind_chill,
# Prevent WINDDIR being converted by GLOB_WIND:
DATA_POINT_WINDDIR: lambda val: val,
}
DEW_POINT_KEYS = (DATA_POINT_TEMPF, DATA_POINT_HUMIDITY)
FEELS_LIKE_KEYS = (DATA_POINT_TEMPF, DATA_POINT_HUMIDITY, DATA_POINT_WINDSPEEDMPH)
HEAT_INDEX_KEYS = (DATA_POINT_TEMPF, DATA_POINT_HUMIDITY)
WIND_CHILL_KEYS = (DATA_POINT_TEMPF, DATA_POINT_WINDSPEEDMPH)
ILLUMINANCE_KEYS = (DATA_POINT_SOLARRADIATION,)
def de_unit_key(key: str) -> str:
"""Remove the unit from a key."""
if key.endswith("f"):
return key[:-1]
if key.endswith("in"):
return key[:-2]
if key.endswith("mph"):
return key[:-3]
return key
def get_data_type(key: str) -> Optional[str]:
"""Get the data "type" (if it exists) for a specific data key."""
if key in CALCULATOR_FUNCTION_MAP:
return key
matches = [k for k in CALCULATOR_FUNCTION_MAP if k in key]
if matches:
return matches[0]
return None
def get_typed_value(value: str) -> Union[float, int, str]:
"""Take a string and return its properly typed counterpart."""
if value.isdigit():
# Integer:
return int(value)
try:
# Float:
return float(value)
except ValueError:
# String:
return value
class DataProcessor: # pylint: disable=too-few-public-methods
"""Define an object that holds processed payload data from the device."""
def __init__(self, payload: Dict[str, Any], args: argparse.Namespace) -> None:
"""Initialize."""
self._args = args
self._input_unit_system = args.input_unit_system
self._output_unit_system = args.output_unit_system
self._payload: Dict[str, Union[float, str]] = {}
for key, value in payload.items():
self._payload[key] = get_typed_value(value)
self.device = get_device_from_raw_payload(payload)
def _get_calculator_func(
self, key: str, *args: Union[float, str], **kwargs: str
) -> Optional[Callable]:
"""Get a data calculator function for a particular data key."""
data_type = get_data_type(key)
if not data_type:
return None
func = CALCULATOR_FUNCTION_MAP[data_type]
func_params = inspect.signature(func).parameters
if "input_unit_system" in func_params:
kwargs["input_unit_system"] = self._input_unit_system
if "output_unit_system" in func_params:
kwargs["output_unit_system"] = self._output_unit_system
return partial(func, *args, **kwargs)
def generate_data(self) -> Dict[str, Union[float, str]]:
"""Generate a parsed data payload."""
data: Dict[str, Any] = {}
for target_key, value in self._payload.items():
if target_key in DEFAULT_KEYS_TO_IGNORE:
continue
calculate = self._get_calculator_func(target_key, value)
if self._args.raw_data or not calculate:
data[target_key] = value
continue
output = calculate()
target_key = de_unit_key(target_key)
data[target_key] = output
for target_key, input_keys in [
(DATA_POINT_DEWPOINT, DEW_POINT_KEYS),
(DATA_POINT_FEELSLIKE, FEELS_LIKE_KEYS),
(DATA_POINT_HEATINDEX, HEAT_INDEX_KEYS),
(DATA_POINT_SOLARRADIATION_LUX, ILLUMINANCE_KEYS),
(DATA_POINT_SOLARRADIATION_PERCEIVED, ILLUMINANCE_KEYS),
(DATA_POINT_WINDCHILL, WIND_CHILL_KEYS),
]:
if not all(k in self._payload for k in input_keys):
continue
calculate = self._get_calculator_func(
target_key, *[self._payload[k] for k in input_keys]
)
if not calculate:
continue
output = calculate()
data[target_key] = output
return data
| 32.908602 | 82 | 0.695311 |
d8a67d7685b8ac93328110f05ddb1e9409cdfc39 | 984 | py | Python | tests/test_eminq_parser.py | chargehound/pyxmex | 49349b770af64be6cbe738ef734c9dc24c9f3d3b | [
"MIT"
] | null | null | null | tests/test_eminq_parser.py | chargehound/pyxmex | 49349b770af64be6cbe738ef734c9dc24c9f3d3b | [
"MIT"
] | null | null | null | tests/test_eminq_parser.py | chargehound/pyxmex | 49349b770af64be6cbe738ef734c9dc24c9f3d3b | [
"MIT"
] | null | null | null | import os
import unittest
import datetime
from pyxmex import EMINQParser
class TestEMINQParser(unittest.TestCase):
def setUp(self, session=None):
self.parser = EMINQParser()
self.dummy_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'support/dummy_eminq_raw')
def test_process_parses_a_sample_file(self):
parsed = self.parser.process(self.dummy_file_path)
self.assertEqual(parsed[0]['SE_NO'], '9450000003')
self.assertEqual(parsed[0]['CASE_NO'], 'E1307900972')
self.assertEqual(parsed[0]['SE_REPLY_BY_DT'], datetime.datetime(2013, 4, 3))
self.assertEqual(parsed[0]['TRANS_DATE'], datetime.datetime(2013, 1, 31))
self.assertEqual(parsed[0]['TRANS_AMT'], 58.25)
self.assertEqual(parsed[0]['TRANS_CURR_CD'], 'EUR')
self.assertEqual(parsed[0]['FIRST_PRSNT_AMT'], 58.25)
self.assertEqual(parsed[1]['TRANS_AMT'], 69.99)
if __name__ == '__main__':
unittest.main()
| 37.846154 | 115 | 0.691057 |
bf8ff9d9211f8ad54ac827e2b0312a475d7b7d6b | 918 | py | Python | octodns/cmds/dump.py | nexcess/octodns | 68fcd69f228fcfff6c8c6a970dcd40170994218e | [
"MIT"
] | null | null | null | octodns/cmds/dump.py | nexcess/octodns | 68fcd69f228fcfff6c8c6a970dcd40170994218e | [
"MIT"
] | null | null | null | octodns/cmds/dump.py | nexcess/octodns | 68fcd69f228fcfff6c8c6a970dcd40170994218e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
Octo-DNS Dumper
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from octodns.cmds.args import ArgumentParser
from octodns.manager import Manager
def main():
parser = ArgumentParser(description=__doc__.split('\n')[1])
parser.add_argument('--config-file', required=True,
help='The Manager configuration file to use')
parser.add_argument('--output-dir', required=True,
help='The directory into which the results will be '
'written (Note: will overwrite existing files)')
parser.add_argument('zone', help='Zone to dump')
parser.add_argument('source', nargs='+',
help='Source(s) to pull data from')
args = parser.parse_args()
manager = Manager(args.config_file)
manager.dump(args.zone, args.output_dir, *args.source)
| 31.655172 | 76 | 0.651416 |
ef5f369a6a4f87a352ee3516c2cdd5f7a81df88e | 1,599 | py | Python | powerline/segments/pdb.py | PH111P/powerline | f8dfe7e7e3d021cd66bc0e19b19ea4a51949cb9a | [
"MIT"
] | 23 | 2016-12-16T09:03:18.000Z | 2022-02-25T19:19:23.000Z | powerline/segments/pdb.py | PH111P/powerline | f8dfe7e7e3d021cd66bc0e19b19ea4a51949cb9a | [
"MIT"
] | 30 | 2016-12-20T11:11:42.000Z | 2019-11-19T15:23:59.000Z | powerline/segments/pdb.py | PH111P/powerline | f8dfe7e7e3d021cd66bc0e19b19ea4a51949cb9a | [
"MIT"
] | 4 | 2016-12-11T18:29:11.000Z | 2018-04-22T07:51:28.000Z | import os
from powerline.theme import requires_segment_info
@requires_segment_info
def current_line(pl, segment_info):
'''Displays line number that is next to be run
'''
return str(segment_info['curframe'].f_lineno)
@requires_segment_info
def current_file(pl, segment_info, basename=True):
'''Displays current file name
:param bool basename:
If true only basename is displayed.
'''
filename = segment_info['curframe'].f_code.co_filename
if basename:
filename = os.path.basename(filename)
return filename
@requires_segment_info
def current_code_name(pl, segment_info):
'''Displays name of the code object of the current frame
'''
return segment_info['curframe'].f_code.co_name
@requires_segment_info
def current_context(pl, segment_info):
'''Displays currently executed context name
This is similar to :py:func:`current_code_name`, but gives more details.
Currently it only gives module file name if code_name happens to be
``<module>``.
'''
name = segment_info['curframe'].f_code.co_name
if name == '<module>':
name = os.path.basename(segment_info['curframe'].f_code.co_filename)
return name
@requires_segment_info
def stack_depth(pl, segment_info, full_stack=False):
'''Displays current stack depth
Result is relative to the stack depth at the time prompt was first run.
:param bool full_stack:
If true then absolute depth is used.
'''
return str(len(segment_info['pdb'].stack) - (
0 if full_stack else segment_info['initial_stack_length']))
| 27.101695 | 76 | 0.712946 |
80168c5757accabd8b92e1a76baf4a8585f24ec4 | 13,511 | py | Python | ansible_var_checker/scope/scope.py | KlutzyBubbles/jinja2schema | 6e04e8b2b1cdef6dbecab80f0d28129873d3f82b | [
"BSD-3-Clause"
] | null | null | null | ansible_var_checker/scope/scope.py | KlutzyBubbles/jinja2schema | 6e04e8b2b1cdef6dbecab80f0d28129873d3f82b | [
"BSD-3-Clause"
] | null | null | null | ansible_var_checker/scope/scope.py | KlutzyBubbles/jinja2schema | 6e04e8b2b1cdef6dbecab80f0d28129873d3f82b | [
"BSD-3-Clause"
] | null | null | null | from .access_type import AccessType
import pprint
MAGIC_VARS = {
"ansible_check_mode": True,
"ansible_config_file": True,
"ansible_dependent_role_names": True,
"ansible_diff_mode": True,
"ansible_forks": True,
"ansible_inventory_sources": True,
"ansible_limit": True,
"ansible_loop": True,
"ansible_loop_var": True,
"ansible_index_var": True,
"ansible_parent_role_names": True,
"ansible_parent_role_paths": True,
"ansible_play_batch": True,
"ansible_play_hosts": True,
"ansible_play_hosts_all": True,
"ansible_play_role_names": True,
"ansible_playbook_python": True,
"ansible_role_names": True,
"ansible_role_name": True,
"ansible_collection_name": True,
"ansible_run_tags": True,
"ansible_search_path": True,
"ansible_skip_tags": True,
"ansible_verbosity": True,
"ansible_version": True,
"group_names": True,
"groups": True,
"inventory_hostname": True,
"inventory_hostname_short": True,
"inventory_dir": True,
"inventory_file": True,
"play_hosts": True,
"ansible_play_name": True,
"playbook_dir": True,
"role_name": True,
"role_names": True,
"role_path": True,
"ansible_local": True,
"ansible_become_user": True,
"ansible_connection": True,
"ansible_host": True,
"ansible_python_interpreter": True,
"ansible_user": True,
"hostvars": True,
"omit": True,
"ansible_facts": {
"ansible_all_ipv4_addresses": True,
"ansible_all_ipv6_addresses": True,
"ansible_apparmor": {
"status": True
},
"ansible_architecture": True,
"ansible_bios_date": True,
"ansible_bios_version": True,
"ansible_cmdline": True,
"ansible_date_time": {
"date": True,
"day": True,
"epoch": True,
"hour": True,
"iso8601": True,
"iso8601_basic": True,
"iso8601_basic_short": True,
"iso8601_micro": True,
"minute": True,
"month": True,
"second": True,
"time": True,
"tz": True,
"tz_offset": True,
"weekday": True,
"weekday_number": True,
"weeknumber": True,
"year": True
},
"ansible_default_ipv4": {
"address": True,
"alias": True,
"broadcast": True,
"gateway": True,
"interface": True,
"macaddress": True,
"mtu": True,
"netmask": True,
"network": True,
"type": True
},
"ansible_default_ipv6": True,
"ansible_device_links": {
"ids": True,
"labels": True,
"masters": True,
"uuids": True
},
"ansible_devices": True,
"ansible_distribution": True,
"ansible_distribution_file_parsed": True,
"ansible_distribution_file_path": True,
"ansible_distribution_file_variety": True,
"ansible_distribution_major_version": True,
"ansible_distribution_release": True,
"ansible_distribution_version": True,
"ansible_dns": {
"nameservers": True
},
"ansible_domain": True,
"ansible_effective_group_id": True,
"ansible_effective_user_id": True,
"ansible_env": {
"HOME": True,
"LANG": True,
"LESSOPEN": True,
"LOGNAME": True,
"MAIL": True,
"PATH": True,
"PWD": True,
"SELINUX_LEVEL_REQUESTED": True,
"SELINUX_ROLE_REQUESTED": True,
"SELINUX_USE_CURRENT_RANGE": True,
"SHELL": True,
"SHLVL": True,
"SSH_CLIENT": True,
"SSH_CONNECTION": True,
"USER": True,
"XDG_RUNTIME_DIR": True,
"XDG_SESSION_ID": True,
"_": True
},
"ansible_eth0": {
"active": True,
"device": True,
"ipv4": {
"address": True,
"broadcast": True,
"netmask": True,
"network": True
},
"ipv6": True,
"macaddress": True,
"module": True,
"mtu": True,
"pciid": True,
"promisc": True,
"type": True
},
"ansible_eth1": {
"active": True,
"device": True,
"ipv4": {
"address": True,
"broadcast": True,
"netmask": True,
"network": True
},
"ipv6": True,
"macaddress": True,
"module": True,
"mtu": True,
"pciid": True,
"promisc": True,
"type": True
},
"ansible_fips": True,
"ansible_form_factor": True,
"ansible_fqdn": True,
"ansible_hostname": True,
"ansible_interfaces": True,
"ansible_is_chroot": True,
"ansible_kernel": True,
"ansible_lo": {
"active": True,
"device": True,
"ipv4": {
"address": True,
"broadcast": True,
"netmask": True,
"network": True
},
"ipv6": True,
"mtu": True,
"promisc": True,
"type": True
},
"ansible_local": True,
"ansible_lsb": {
"codename": True,
"description": True,
"id": True,
"major_release": True,
"release": True
},
"ansible_machine": True,
"ansible_machine_id": True,
"ansible_memfree_mb": True,
"ansible_memory_mb": {
"nocache": {
"free": True,
"used": True
},
"real": {
"free": True,
"total": True,
"used": True
},
"swap": {
"cached": True,
"free": True,
"total": True,
"used": True
}
},
"ansible_memtotal_mb": True,
"ansible_mounts": True,
"ansible_nodename": True,
"ansible_os_family": True,
"ansible_pkg_mgr": True,
"ansible_processor": True,
"ansible_processor_cores": True,
"ansible_processor_count": True,
"ansible_processor_nproc": True,
"ansible_processor_threads_per_core": True,
"ansible_processor_vcpus": True,
"ansible_product_name": True,
"ansible_product_serial": True,
"ansible_product_uuid": True,
"ansible_product_version": True,
"ansible_python": {
"executable": True,
"has_sslcontext": True,
"type": True,
"version": {
"major": True,
"micro": True,
"minor": True,
"releaselevel": True,
"serial": True
},
"version_info": True
},
"ansible_python_version": True,
"ansible_real_group_id": True,
"ansible_real_user_id": True,
"ansible_selinux": {
"config_mode": True,
"mode": True,
"policyvers": True,
"status": True,
"type": True
},
"ansible_selinux_python_present": True,
"ansible_service_mgr": True,
"ansible_ssh_host_key_ecdsa_public": True,
"ansible_ssh_host_key_ed25519_public": True,
"ansible_ssh_host_key_rsa_public": True,
"ansible_swapfree_mb": True,
"ansible_swaptotal_mb": True,
"ansible_system": True,
"ansible_system_capabilities": True,
"ansible_system_capabilities_enforced": True,
"ansible_system_vendor": True,
"ansible_uptime_seconds": True,
"ansible_user_dir": True,
"ansible_user_gecos": True,
"ansible_user_gid": True,
"ansible_user_id": True,
"ansible_user_shell": True,
"ansible_user_uid": True,
"ansible_userspace_architecture": True,
"ansible_userspace_bits": True,
"ansible_virtualization_role": True,
"ansible_virtualization_type": True,
"gather_subset": True,
"module_setup": True
}
}
class Scope(object):
def __init__(self, parent=None, host=None, magic_vars=MAGIC_VARS):
self.parent = parent
self.host = host
self.children = []
self.variables = {}
if self.parent is None:
self.inject_magic_vars(magic_vars=MAGIC_VARS)
def inject_magic_vars(self, magic_vars=MAGIC_VARS, trail=[]):
for key, value in magic_vars.items():
if isinstance(value, dict):
temp = list(trail)
temp.append(key)
self.inject_magic_vars(value, temp)
else:
if len(trail) == 0:
self.add_variable(key, 'magic')
else:
temp = list(trail)
temp.append(key)
self.add_attribute(temp.pop(0), temp, 'magic')
def __repr__(self):
return pprint.pformat(self.variables)
def add_variable(self, name, action, ignore_parent=False):
if ignore_parent or self.parent is None:
if name not in self.variables:
self.variables[name] = AccessType()
self.variables[name].add(action)
elif not self.parent.add_variable(name, action, ignore_parent=ignore_parent):
return self.add_variable(name, action, ignore_parent=True)
return True
def add_attribute(self, name, attribute, action, ignore_parent=False):
if ignore_parent or self.parent is None:
if name not in self.variables:
self.variables[name] = AccessType()
self.variables[name].add(action)
self.variables[name].add_attribute(attribute, action)
elif not self.parent.add_attribute(name, attribute, action, ignore_parent=ignore_parent):
return self.add_attribute(name, attribute, action, ignore_parent=True)
return True
def construct_with_attr(self, name, with_history=False):
if name not in self.variables:
return {}
return self.variables[name].construct_from_attr(with_history=with_history)
def is_undefined(self, name, trail=[]):
print ('--is_undefined')
if name == 'undefined':
if name not in self.variables:
print('Not in variables')
return True
if self.variables[name].is_undefined():
print('is undefined')
if self.parent is not None:
print('parent not none')
return self.parent.is_undefined(name, trail)
return True
return False
if len(trail) > 0:
print (name)
current_vars = self.variables
for key in trail:
if key not in current_vars:
return True
current_vars = current_vars[key]
if name not in current_vars:
return True
if current_vars[name].is_undefined():
if self.parent is not None:
return self.parent.is_undefined(name, trail)
return True
else:
if name not in self.variables:
return True
if self.variables[name].is_undefined():
if self.parent is not None:
return self.parent.is_undefined(name, trail)
return True
return False
def is_magic(self, name):
if name not in self.variables:
return False
if self.variables[name].is_magic():
if self.parent is not None:
return self.parent.is_magic(name)
return True
return False
def is_magic_used(self, name):
if name not in self.variables:
return False
if self.variables[name].is_magic_used():
if self.parent is not None:
return self.parent.is_magic_used(name)
return True
return False
def get_undefined(self, trail=[], exclude_magic=True, with_history=False):
output = {}
mod_trail = list(trail)
if len(mod_trail) > 0:
trail_popped = mod_trail.pop(0)
variable = self.variables[trail_popped]
if len(mod_trail) > 0:
for trail_key in mod_trail:
if trail_key in variable.attributes.keys():
trail_popped = trail_key
variable = variable.attributes[trail_key]
else:
return output
if variable.has_attr():
for key in variable.attributes.keys():
if variable.attributes[key].has_attr():
temp = list(trail)
temp.append(key)
child_undefined = self.get_undefined(trail=temp, exclude_magic=True, with_history=with_history)
if len(child_undefined.keys()) != 0:
if key not in output.keys():
output[key] = {}
output[key] = child_undefined
else:
if variable.attributes[key].is_undefined():
output[key] = variable.attributes[key].construct_from_attr(with_history=with_history)
else:
if variable.is_undefined():
output[trail_popped] = variable.construct_from_attr(with_history=with_history)
else:
for key in self.variables.keys():
if exclude_magic and self.is_magic(key):
continue
if self.variables[key].has_attr():
temp = list(trail)
temp.append(key)
child_undefined = self.get_undefined(trail=temp, exclude_magic=True, with_history=with_history)
if len(child_undefined.keys()) != 0:
if key not in output.keys():
output[key] = {}
output[key] = child_undefined
else:
if self.is_undefined(key):
output[key] = self.construct_with_attr(key, with_history=with_history)
return output
def get_all(self, exclude_magic=True, with_history=True):
output = {}
for key in self.variables.keys():
if exclude_magic and self.is_magic(key):
if not self.is_magic_used(key):
continue
output[key] = self.construct_with_attr(key, with_history=with_history)
return output
def get_debug(self, exclude_magic=True):
output = {}
for key in self.variables.keys():
if exclude_magic and self.is_magic(key):
continue
output[key] = self.construct_with_history(key)
return output
def create_child(self):
self.children.append(Scope(parent=self))
return self.children[-1]
def has_attributes(self):
return len(self.variables.keys()) > 0
| 29.759912 | 107 | 0.6041 |
9876317f85359f023b8cfa21c030a1e62204b85a | 6,545 | py | Python | ironic/tests/unit/drivers/modules/irmc/test_bios.py | armohamm/ironic | 21093ca886ed736a7a25bf5e71e05d41e132fd2f | [
"Apache-2.0"
] | 2 | 2019-06-17T21:37:53.000Z | 2020-07-11T03:58:39.000Z | ironic/tests/unit/drivers/modules/irmc/test_bios.py | armohamm/ironic | 21093ca886ed736a7a25bf5e71e05d41e132fd2f | [
"Apache-2.0"
] | 13 | 2019-04-12T21:55:36.000Z | 2020-05-26T14:07:13.000Z | ironic/tests/unit/drivers/modules/irmc/test_bios.py | armohamm/ironic | 21093ca886ed736a7a25bf5e71e05d41e132fd2f | [
"Apache-2.0"
] | 11 | 2019-04-10T04:43:33.000Z | 2020-01-31T10:34:14.000Z | # Copyright 2018 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for IRMC BIOS configuration
"""
import mock
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.irmc import bios as irmc_bios
from ironic.drivers.modules.irmc import common as irmc_common
from ironic import objects
from ironic.tests.unit.drivers.modules.irmc import test_common
class IRMCBIOSTestCase(test_common.BaseIRMCTest):
def setUp(self):
super(IRMCBIOSTestCase, self).setUp()
@mock.patch.object(irmc_common, 'parse_driver_info',
autospec=True)
def test_validate(self, parse_driver_info_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.bios.validate(task)
parse_driver_info_mock.assert_called_once_with(task.node)
@mock.patch.object(irmc_bios.irmc.elcm, 'set_bios_configuration',
autospec=True)
@mock.patch.object(irmc_bios.irmc.elcm, 'get_bios_settings',
autospec=True)
def test_apply_configuration(self, get_bios_settings_mock,
set_bios_configuration_mock):
settings = [{
"name": "launch_csm_enabled",
"value": True
}, {
"name": "hyper_threading_enabled",
"value": True
}, {
"name": "cpu_vt_enabled",
"value": True
}]
with task_manager.acquire(self.context, self.node.uuid) as task:
irmc_info = irmc_common.parse_driver_info(task.node)
task.node.save = mock.Mock()
get_bios_settings_mock.return_value = settings
task.driver.bios.apply_configuration(task, settings)
set_bios_configuration_mock.assert_called_once_with(irmc_info,
settings)
@mock.patch.object(irmc_bios.irmc.elcm, 'set_bios_configuration',
autospec=True)
def test_apply_configuration_failed(self, set_bios_configuration_mock):
settings = [{
"name": "launch_csm_enabled",
"value": True
}, {
"name": "hyper_threading_enabled",
"value": True
}, {
"name": "setting",
"value": True
}]
irmc_bios.irmc.scci.SCCIError = Exception
set_bios_configuration_mock.side_effect = Exception
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.IRMCOperationError,
task.driver.bios.apply_configuration,
task, settings)
def test_factory_reset(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.UnsupportedDriverExtension,
task.driver.bios.factory_reset, task)
@mock.patch.object(objects.BIOSSettingList, 'sync_node_setting')
@mock.patch.object(objects.BIOSSettingList, 'create')
@mock.patch.object(objects.BIOSSettingList, 'save')
@mock.patch.object(objects.BIOSSettingList, 'delete')
@mock.patch.object(irmc_bios.irmc.elcm, 'get_bios_settings',
autospec=True)
def test_cache_bios_settings(self, get_bios_settings_mock,
delete_mock, save_mock, create_mock,
sync_node_setting_mock):
settings = [{
"name": "launch_csm_enabled",
"value": True
}, {
"name": "hyper_threading_enabled",
"value": True
}, {
"name": "cpu_vt_enabled",
"value": True
}]
with task_manager.acquire(self.context, self.node.uuid) as task:
irmc_info = irmc_common.parse_driver_info(task.node)
get_bios_settings_mock.return_value = settings
sync_node_setting_mock.return_value = \
(
[
{
"name": "launch_csm_enabled",
"value": True
}],
[
{
"name": "hyper_threading_enabled",
"value": True
}],
[
{
"name": "cpu_vt_enabled",
"value": True
}],
[]
)
task.driver.bios.cache_bios_settings(task)
get_bios_settings_mock.assert_called_once_with(irmc_info)
sync_node_setting_mock.assert_called_once_with(task.context,
task.node.id,
settings)
create_mock.assert_called_once_with(
task.context, task.node.id,
sync_node_setting_mock.return_value[0])
save_mock.assert_called_once_with(
task.context, task.node.id,
sync_node_setting_mock.return_value[1])
delete_names = \
[setting['name'] for setting in
sync_node_setting_mock.return_value[2]]
delete_mock.assert_called_once_with(task.context, task.node.id,
delete_names)
@mock.patch.object(irmc_bios.irmc.elcm, 'get_bios_settings',
autospec=True)
def test_cache_bios_settings_failed(self, get_bios_settings_mock):
irmc_bios.irmc.scci.SCCIError = Exception
get_bios_settings_mock.side_effect = Exception
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.IRMCOperationError,
task.driver.bios.cache_bios_settings,
task)
| 41.687898 | 75 | 0.574637 |
15fffffe1e1558d84bb70de8a2e15d2124ced15b | 870 | py | Python | lab2/multi_echo_server.py | carrolji/cmput404 | fda7c3f3d8edcd900f73c0667ded4b9d6e5a113d | [
"Apache-2.0"
] | null | null | null | lab2/multi_echo_server.py | carrolji/cmput404 | fda7c3f3d8edcd900f73c0667ded4b9d6e5a113d | [
"Apache-2.0"
] | null | null | null | lab2/multi_echo_server.py | carrolji/cmput404 | fda7c3f3d8edcd900f73c0667ded4b9d6e5a113d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import socket
import time
from multiprocessing import Process
HOST = ""
PORT = 8001
BUFFER_SIZE = 1024
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(2)
while True:
conn, addr = s.accept()
p = Process(target=handle_echo, args=(addr, conn))
p.daemon = True
p.start()
print("Started process ", p)
def handle_echo(addr, conn):
print("Connected by", addr)
full_data = b""
while True:
data = conn.recv(BUFFER_SIZE)
# print(data)
if not data: break
full_data += data
# conn.send(data)
time.sleep(0.5)
conn.sendall(full_data)
conn.close()
if __name__ == "__main__":
main() | 22.894737 | 64 | 0.588506 |
c6120042516b240fe72841e1a027c611ab72ea29 | 374 | py | Python | tests/test_tag_list_comparison.py | bennorth/python-xml-serdes | fb1c44e1630bdbf1fc0ae97a51fb5adac498ffc0 | [
"Apache-2.0"
] | 5 | 2017-02-04T21:17:47.000Z | 2022-03-06T03:15:39.000Z | tests/test_tag_list_comparison.py | bennorth/python-xml-serdes | fb1c44e1630bdbf1fc0ae97a51fb5adac498ffc0 | [
"Apache-2.0"
] | 3 | 2016-10-12T18:06:34.000Z | 2019-08-20T16:00:27.000Z | tests/test_tag_list_comparison.py | bennorth/python-xml-serdes | fb1c44e1630bdbf1fc0ae97a51fb5adac498ffc0 | [
"Apache-2.0"
] | null | null | null | from xmlserdes.errors import TagListComparison
def test_length():
x = list(map(str, range(10)))
y = []
c = TagListComparison(x, y)
assert len(c) == len(x)
def test_helper_lines():
x = ['hello-world']
y = ['hellO-world']
c = TagListComparison(x, y)
assert len(c) == 2
assert str(c) == '[missing: hello-world, unexpected: hellO-world]'
| 22 | 70 | 0.612299 |
3fc79a0fcb7a243588aa1f02308a9cb900553430 | 542 | py | Python | Mundo #02/ex-040-a11notas-aluno-2.0(1).py | freitasSystemOutPrint/Python3 | e5e88fbe8e7e0c5472573d2c901844270385194b | [
"MIT"
] | 1 | 2020-06-02T07:31:16.000Z | 2020-06-02T07:31:16.000Z | Mundo #02/ex-040-a11notas-aluno-2.0.py | freitasSystemOutPrint/Python3 | e5e88fbe8e7e0c5472573d2c901844270385194b | [
"MIT"
] | null | null | null | Mundo #02/ex-040-a11notas-aluno-2.0.py | freitasSystemOutPrint/Python3 | e5e88fbe8e7e0c5472573d2c901844270385194b | [
"MIT"
] | null | null | null | n = input('Digite seu nome: ')
n1 = float(input('Digite sua primeira nota: '))
n2 = float(input('Digite sua segunda nota: '))
r = (n1 + n2) / 2
if r <5:
print('{}, você está REPROVADO, com média {} !'.format(n,r))
elif r >= 5 and r <=6.9:
print('{} você está de RECUPERAÇÃO, com média {}'.format(n,r))
elif r >=7:
print('{} você está APROVADO, com média {} !'.format(n,r))
input('DIGITE ENTER PARA SAIR')
"""> Média abaixo de 5.0:
- Reprovado
> Média entre 5.0 e 6.9:
- Recuperação
> Média 7.0 ou superior:
- Aprovado """
| 24.636364 | 66 | 0.605166 |
9847ba0db6c12f29ea98225bfdbff7fa88574ca3 | 4,998 | py | Python | services/web/apps/fm/reportoutages/views.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | services/web/apps/fm/reportoutages/views.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | services/web/apps/fm/reportoutages/views.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# fm.reportoutages
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import datetime
from collections import defaultdict
# Third-party modules
from django import forms
from django.contrib.admin.widgets import AdminDateWidget
from mongoengine.queryset.visitor import Q
# NOC modules
from noc.fm.models.outage import Outage
from noc.sa.models.managedobject import ManagedObject
from noc.sa.models.useraccess import UserAccess
from noc.lib.app.simplereport import SimpleReport, TableColumn, PredefinedReport
from noc.core.translation import ugettext as _
class ReportForm(forms.Form):
duration = forms.ChoiceField(
choices=[
(0, _("Range")),
(86400, _("1 day")),
(7 * 86400, _("1 week")),
(30 * 86400, _("1 month")),
],
label=_("Duration"),
)
from_date = forms.CharField(widget=AdminDateWidget, label=_("From Date"), required=False)
to_date = forms.CharField(widget=AdminDateWidget, label=_("To Date"), required=False)
class ReportOutagesApplication(SimpleReport):
title = _("Outages")
form = ReportForm
predefined_reports = {
"1d": PredefinedReport(_("Outages (1 day)"), {"duration": 86400}),
"7d": PredefinedReport(_("Outages (7 days)"), {"duration": 7 * 86400}),
"30d": PredefinedReport(_("Outages (30 day)"), {"duration": 30 * 86400}),
}
def get_data(self, request, duration, from_date=None, to_date=None, **kwargs):
now = datetime.datetime.now()
if not from_date:
duration = 1
if int(duration):
self.logger.info("Use duration\n")
d = datetime.timedelta(seconds=int(duration))
b = now - d
q = Q(start__gte=b) | Q(stop__gte=b) | Q(stop__exists=False)
else:
b = datetime.datetime.strptime(from_date, "%d.%m.%Y")
q = Q(start__gte=b) | Q(stop__gte=b) | Q(stop__exists=False)
if to_date:
if from_date == to_date:
t1 = datetime.datetime.strptime(to_date, "%d.%m.%Y") + datetime.timedelta(1)
else:
t1 = datetime.datetime.strptime(to_date, "%d.%m.%Y")
else:
t1 = now
q &= Q(start__lte=t1) | Q(stop__lte=t1)
d = datetime.timedelta(seconds=int((t1 - b).total_seconds()))
outages = defaultdict(list)
otime = defaultdict(int)
for o in Outage.objects.filter(q):
start = max(o.start, b)
stop = o.stop if o.stop else now
outages[o.object] += [o]
otime[o.object] += (stop - start).total_seconds()
td = d.total_seconds()
if not request.user.is_superuser:
for mo in ManagedObject.objects.exclude(
administrative_domain__in=UserAccess.get_domains(request.user)
):
if mo.id in otime:
otime.pop(mo.id)
# Load managed objects
mos = list(otime)
chunk = 500
mo = {}
while mos:
for o in ManagedObject.objects.filter(id__in=mos[:chunk]):
mo[o.id] = o
mos = mos[chunk:]
r = []
for o in sorted(otime, key=lambda x: -otime[x]):
m = mo.get(o)
if not m:
continue # Hanging Outage
dt = min(td, otime[o])
downtime = "%02d:%02d:%02d" % ((dt // 3600) % 24, (dt // 60) % 60, dt % 60)
if dt >= 86400:
downtime = "%dd %s" % (dt // 86400, downtime)
if td:
avail = float(td - dt) * 100 / td
else:
avail = 0
r += [
(
m.name,
m.address,
m.profile.name,
m.platform.name if m.platform else "",
_("Yes") if m.is_managed else _("No"),
_("Yes") if m.get_status() else _("No"),
downtime,
avail,
len(outages[o]),
)
]
return self.from_dataset(
title=self.title,
columns=[
_("Managed Object"),
_("Address"),
_("Profile"),
_("Platform"),
TableColumn(_("Managed"), align="right"),
TableColumn(_("Status"), align="right"),
TableColumn(_("Downtime"), align="right"),
TableColumn(_("Availability"), align="right", format="percent"),
TableColumn(_("Downs"), align="right", format="integer"),
],
data=r,
enumerate=True,
)
| 37.022222 | 96 | 0.498199 |
2026ba93f5ae4dac049ebde5117dbbb932e22cc2 | 1,017 | py | Python | autogluon/utils/tabular/ml/constants.py | jhutchings1/autogluon | 9a0eb8a8f7c88cd09b081adf5d4c6c281d113d75 | [
"Apache-2.0"
] | null | null | null | autogluon/utils/tabular/ml/constants.py | jhutchings1/autogluon | 9a0eb8a8f7c88cd09b081adf5d4c6c281d113d75 | [
"Apache-2.0"
] | null | null | null | autogluon/utils/tabular/ml/constants.py | jhutchings1/autogluon | 9a0eb8a8f7c88cd09b081adf5d4c6c281d113d75 | [
"Apache-2.0"
] | null | null | null | # Do not change these!
BINARY = 'binary'
MULTICLASS = 'multiclass'
REGRESSION = 'regression'
SOFTCLASS = 'softclass' # classification with soft-target (rather than classes, labels are probabilities of each class).
PROBLEM_TYPES_CLASSIFICATION = [BINARY, MULTICLASS]
PROBLEM_TYPES_REGRESSION = [REGRESSION]
PROBLEM_TYPES = PROBLEM_TYPES_CLASSIFICATION + PROBLEM_TYPES_REGRESSION + [SOFTCLASS]
REFIT_FULL_NAME = 'refit_single_full' # stack-name used for refit_single_full (aka "compressed") models
REFIT_FULL_SUFFIX = "_FULL" # suffix appended to model name for refit_single_full (aka "compressed") models
# AG_ARGS variables are key names in model hyperparameters to dictionaries of custom AutoGluon arguments.
AG_ARGS = '_ag_args' # Contains arguments to control model name, model priority, and the valid configurations which it can be used in.
AG_ARGS_FIT = '_ag_args_fit' # Contains arguments that impact model training, such as early stopping rounds, #cores, #gpus, max time limit, max memory usage # TODO
| 59.823529 | 165 | 0.794494 |
d8aca93793f0d10300410d3dc868054303e7243d | 69 | py | Python | pymessagefocus/__init__.py | TriggeredMessaging/pymessagefocus | 4f75ce508b526f96c71bd2cf09848ddff7de3c30 | [
"MIT"
] | null | null | null | pymessagefocus/__init__.py | TriggeredMessaging/pymessagefocus | 4f75ce508b526f96c71bd2cf09848ddff7de3c30 | [
"MIT"
] | null | null | null | pymessagefocus/__init__.py | TriggeredMessaging/pymessagefocus | 4f75ce508b526f96c71bd2cf09848ddff7de3c30 | [
"MIT"
] | 3 | 2016-08-11T20:11:16.000Z | 2020-06-01T15:34:34.000Z | from __future__ import absolute_import
from .pymessagefocus import *
| 23 | 38 | 0.855072 |
c232db35b1915a6fd3d7b55e5ba50e5f54eaf144 | 5,311 | py | Python | dataloader.py | rKrishna97/TRACER | 9f3b76ac40a1780755260b1b3360b5e5adb4301f | [
"Apache-2.0"
] | null | null | null | dataloader.py | rKrishna97/TRACER | 9f3b76ac40a1780755260b1b3360b5e5adb4301f | [
"Apache-2.0"
] | null | null | null | dataloader.py | rKrishna97/TRACER | 9f3b76ac40a1780755260b1b3360b5e5adb4301f | [
"Apache-2.0"
] | null | null | null | import cv2
import glob
import torch
import numpy as np
import albumentations as albu
from pathlib import Path
from albumentations.pytorch.transforms import ToTensorV2
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
class DatasetGenerate(Dataset):
def __init__(self, img_folder, gt_folder, edge_folder, phase: str = 'train', transform=None, seed=None):
self.images = sorted(glob.glob(img_folder + '/*'))
self.gts = sorted(glob.glob(gt_folder + '/*'))
self.edges = sorted(glob.glob(edge_folder + '/*'))
self.transform = transform
train_images, val_images, train_gts, val_gts, train_edges, val_edges = train_test_split(self.images, self.gts,
self.edges,
test_size=0.2,
random_state=seed)
if phase == 'train':
self.images = train_images
self.gts = train_gts
self.edges = train_edges
elif phase == 'val':
self.images = val_images
self.gts = val_gts
self.edges = val_edges
else: # Testset
pass
def __getitem__(self, idx):
image = cv2.imread(self.images[idx])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.gts[idx])
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
edge = cv2.imread(self.edges[idx])
edge = cv2.cvtColor(edge, cv2.COLOR_BGR2GRAY)
if self.transform is not None:
augmented = self.transform(image=image, masks=[mask, edge])
image = augmented['image']
mask = np.expand_dims(augmented['masks'][0], axis=0) # (1, H, W)
mask = mask / 255.0
edge = np.expand_dims(augmented['masks'][1], axis=0) # (1, H, W)
edge = edge / 255.0
return image, mask, edge
def __len__(self):
return len(self.images)
class Test_DatasetGenerate(Dataset):
def __init__(self, img_folder, gt_folder, transform=None):
self.images = sorted(glob.glob(img_folder + '/*'))
self.gts = sorted(glob.glob(gt_folder + '/*'))
self.transform = transform
def __getitem__(self, idx):
image_name = Path(self.images[idx]).stem
image = cv2.imread(self.images[idx])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
original_size = image.shape[:2]
if self.transform is not None:
augmented = self.transform(image=image)
image = augmented['image']
gt_image = None if self.gts is None else self.gts[idx]
return image, gt_image, original_size, image_name
def __len__(self):
return len(self.images)
def get_loader(img_folder, gt_folder: str, edge_folder, phase: str, batch_size, shuffle,
num_workers, transform, seed=None):
if phase == 'test':
dataset = Test_DatasetGenerate(img_folder, gt_folder, transform)
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
else:
dataset = DatasetGenerate(img_folder, gt_folder, edge_folder, phase, transform, seed)
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
drop_last=True)
print(f'{phase} length : {len(dataset)}')
return data_loader
def get_train_augmentation(img_size, ver):
if ver == 1:
transforms = albu.Compose([
albu.Resize(img_size, img_size, always_apply=True),
albu.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]),
ToTensorV2(),
])
if ver == 2:
transforms = albu.Compose([
albu.OneOf([
albu.HorizontalFlip(),
albu.VerticalFlip(),
albu.RandomRotate90()
], p=0.5),
albu.OneOf([
albu.RandomContrast(),
albu.RandomGamma(),
albu.RandomBrightness(),
], p=0.5),
albu.OneOf([
albu.MotionBlur(blur_limit=5),
albu.MedianBlur(blur_limit=5),
albu.GaussianBlur(blur_limit=5),
albu.GaussNoise(var_limit=(5.0, 20.0)),
], p=0.5),
albu.Resize(img_size, img_size, always_apply=True),
albu.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]),
ToTensorV2(),
])
return transforms
def get_test_augmentation(img_size):
transforms = albu.Compose([
albu.Resize(img_size, img_size, always_apply=True),
albu.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]),
ToTensorV2(),
])
return transforms
def gt_to_tensor(gt):
gt = cv2.imread(gt)
gt = cv2.cvtColor(gt, cv2.COLOR_BGR2GRAY) / 255.0
gt = np.where(gt > 0.5, 1.0, 0.0)
gt = torch.tensor(gt, device='cuda', dtype=torch.float32)
gt = gt.unsqueeze(0).unsqueeze(1)
return gt
| 36.376712 | 118 | 0.562606 |
2845890d8762da152b894bbe104dd74f5059a820 | 9,637 | py | Python | system/indy-node-tests/test_libsovtoken.py | devinleighsmith/indy-test-automation | 32d27c8cf8e233d5e365672358752ae3a8b5bf00 | [
"Apache-2.0"
] | 7 | 2019-03-14T10:52:50.000Z | 2021-12-03T00:02:15.000Z | system/indy-node-tests/test_libsovtoken.py | devinleighsmith/indy-test-automation | 32d27c8cf8e233d5e365672358752ae3a8b5bf00 | [
"Apache-2.0"
] | 27 | 2018-10-24T15:28:32.000Z | 2022-03-29T21:30:35.000Z | system/indy-node-tests/test_libsovtoken.py | devinleighsmith/indy-test-automation | 32d27c8cf8e233d5e365672358752ae3a8b5bf00 | [
"Apache-2.0"
] | 22 | 2018-10-03T17:05:23.000Z | 2021-12-03T00:02:04.000Z | from system.utils import *
from indy import pool, did, payment
import pytest
import asyncio
@pytest.mark.skip(reason='ST-580')
@pytest.mark.asyncio
async def test_libsovtoken_acceptance(docker_setup_and_teardown):
await pool.set_protocol_version(2)
await payment_initializer('libsovtoken.so', 'sovtoken_init')
# await payment_initializer('libnullpay.so', 'nullpay_init')
pool_handle, _ = await pool_helper()
wallet_handle, _, _ = await wallet_helper()
libsovtoken_payment_method = 'sov'
libnullpay_payment_method = 'null'
trustee_did1, trustee_vk1 = await did.create_and_store_my_did(wallet_handle, json.dumps(
{"seed": str('000000000000000000000000Trustee1')}))
trustee_did2, trustee_vk2 = await did.create_and_store_my_did(wallet_handle, json.dumps(
{"seed": str('000000000000000000000000Trustee2')}))
trustee_did3, trustee_vk3 = await did.create_and_store_my_did(wallet_handle, json.dumps(
{"seed": str('000000000000000000000000Trustee3')}))
trustee_did4, trustee_vk4 = await did.create_and_store_my_did(wallet_handle, json.dumps(
{"seed": str('000000000000000000000000Trustee4')}))
await send_nym(pool_handle, wallet_handle, trustee_did1, trustee_did2, trustee_vk2, None, 'TRUSTEE')
await send_nym(pool_handle, wallet_handle, trustee_did1, trustee_did3, trustee_vk3, None, 'TRUSTEE')
await send_nym(pool_handle, wallet_handle, trustee_did1, trustee_did4, trustee_vk4, None, 'TRUSTEE')
fees = {'1': 1, '100': 1, '101': 1, '102': 1, '113': 1, '114': 1, '10001': 1}
req = await payment.build_set_txn_fees_req(wallet_handle, trustee_did1, libsovtoken_payment_method,
json.dumps(fees))
req = await ledger.multi_sign_request(wallet_handle, trustee_did1, req)
req = await ledger.multi_sign_request(wallet_handle, trustee_did2, req)
req = await ledger.multi_sign_request(wallet_handle, trustee_did3, req)
req = await ledger.multi_sign_request(wallet_handle, trustee_did4, req)
res2 = json.loads(await ledger.submit_request(pool_handle, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await payment.build_get_txn_fees_req(wallet_handle, trustee_did1, libsovtoken_payment_method)
res3 = json.loads(await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req))
assert res3['result']['fees'] == fees
address1 = await payment.create_payment_address(wallet_handle, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet3')}))
address2 = await payment.create_payment_address(wallet_handle, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet4')}))
address3 = await payment.create_payment_address(wallet_handle, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet5')}))
address4 = await payment.create_payment_address(wallet_handle, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet6')}))
address5 = await payment.create_payment_address(wallet_handle, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet7')}))
map1 = {"recipient": address1, "amount": 5}
map2 = {"recipient": address2, "amount": 1}
map3 = {"recipient": address3, "amount": 1}
map4 = {"recipient": address4, "amount": 4}
map5 = {"recipient": address5, "amount": 1}
list1 = [map1, map2, map3, map4, map5]
req, _ = await payment.build_mint_req(wallet_handle, trustee_did1,
json.dumps(list1), None)
req = await ledger.multi_sign_request(wallet_handle, trustee_did1, req)
req = await ledger.multi_sign_request(wallet_handle, trustee_did2, req)
req = await ledger.multi_sign_request(wallet_handle, trustee_did3, req)
req = await ledger.multi_sign_request(wallet_handle, trustee_did4, req)
res0 = json.loads(await ledger.submit_request(pool_handle, req))
print('MINT RESULT: {}'.format(res0))
assert res0['op'] == 'REPLY'
req, _ = await payment.build_get_payment_sources_request(wallet_handle, trustee_did1, address1)
res1 = await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req)
source1 = await payment.parse_get_payment_sources_response(libsovtoken_payment_method, res1)
source1 = json.loads(source1)[0]['source']
l1 = [source1]
req, _ = await payment.build_get_payment_sources_request(wallet_handle, trustee_did1, address2)
res2 = await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req)
source2 = await payment.parse_get_payment_sources_response(libsovtoken_payment_method, res2)
source2 = json.loads(source2)[0]['source']
l2 = [source2]
req, _ = await payment.build_get_payment_sources_request(wallet_handle, trustee_did1, address3)
res3 = await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req)
source3 = await payment.parse_get_payment_sources_response(libsovtoken_payment_method, res3)
source3 = json.loads(source3)[0]['source']
l3 = [source3]
req, _ = await payment.build_get_payment_sources_request(wallet_handle, trustee_did1, address4)
res4 = await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req)
source4 = await payment.parse_get_payment_sources_response(libsovtoken_payment_method, res4)
source4 = json.loads(source4)[0]['source']
l4 = [source4]
req, _ = await payment.build_get_payment_sources_request(wallet_handle, trustee_did1, address5)
res5 = await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req)
source5 = await payment.parse_get_payment_sources_response(libsovtoken_payment_method, res5)
source5 = json.loads(source5)[0]['source']
l5 = [source5]
# send schema, no tokens
_, res = await send_schema(pool_handle, wallet_handle, trustee_did1, random_string(5), '1.0',
json.dumps(["name", "age"]))
assert res['op'] == 'REJECT'
# send schema, enough tokens
schema_id, schema_json = \
await anoncreds.issuer_create_schema(trustee_did1, random_string(5), '1.0', json.dumps(["name", "age"]))
req = await ledger.build_schema_request(trustee_did1, schema_json)
req_with_fees_json, _ = await payment.add_request_fees(wallet_handle, trustee_did1, req, json.dumps(l2), '[]', None)
res5 = json.loads(
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req_with_fees_json))
assert res5['op'] == 'REPLY'
# get schema
await asyncio.sleep(1)
res6 = await get_schema(pool_handle, wallet_handle, trustee_did1, schema_id)
assert res6['result']['seqNo'] is not None
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res6))
cred_def_id, cred_def_json = \
await anoncreds.issuer_create_and_store_credential_def(wallet_handle, trustee_did1, schema_json,
random_string(5), 'CL',
json.dumps({'support_revocation': False}))
# cred_def incorrect
req = await ledger.build_cred_def_request(trustee_did1, cred_def_json)
req_with_fees_json1, _ =\
await payment.add_request_fees(wallet_handle, trustee_did1, req, json.dumps(l3), '[]', None)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req))
assert res7['op'] == 'REJECT'
# cred_def correct
req = await ledger.build_cred_def_request(trustee_did1, cred_def_json)
req_with_fees_json2, _ =\
await payment.add_request_fees(wallet_handle, trustee_did1, req, json.dumps(l5), '[]', None)
res8 = json.loads(
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req_with_fees_json2))
assert res8['op'] == 'REPLY'
# get cred def
res9 = await get_cred_def(pool_handle, wallet_handle, trustee_did1, cred_def_id)
assert res9['result']['seqNo'] is not None
# send nym with fees
map8 = {"recipient": address1, "amount": 3}
l8 = [map8]
req = await ledger.build_nym_request(trustee_did1, 'V4SGRU86Z58d6TV7PBU111', None, None, None)
req_with_fees_json, _ = await payment.add_request_fees(wallet_handle, trustee_did1, req, json.dumps(l4),
json.dumps(l8), None)
res10 = json.loads(
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did1, req_with_fees_json))
assert res10['op'] == 'REPLY'
# rotate key with fees
map9 = {"recipient": address1, "amount": 4}
l9 = [map9]
res11 = await did.key_for_local_did(wallet_handle, trustee_did2)
new_key = await did.replace_keys_start(wallet_handle, trustee_did2, json.dumps({}))
req = await ledger.build_nym_request(trustee_did2, trustee_did2, new_key, None, None)
req_with_fees_json, _ = await payment.add_request_fees(wallet_handle, trustee_did2, req, json.dumps(l1),
json.dumps(l9), None)
res_ = json.loads(
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did2, req_with_fees_json))
assert res_['op'] == 'REPLY'
res__ = await did.replace_keys_apply(wallet_handle, trustee_did2)
assert res__ is None
res12 = await did.key_for_local_did(wallet_handle, trustee_did2)
assert res12 != res11
assert res12 == new_key
| 54.755682 | 120 | 0.715887 |
dd8a9e784350033c192d37a23d28e6c4b3147123 | 958 | py | Python | Exercicios/ex107_108_109_110_111/modulos/moeda/__init__.py | GabrielMendesMelo/CursoEmVideo-Python | 74e3a4df29406a9fc6859e3f3e9b824ee8a997b2 | [
"Unlicense"
] | null | null | null | Exercicios/ex107_108_109_110_111/modulos/moeda/__init__.py | GabrielMendesMelo/CursoEmVideo-Python | 74e3a4df29406a9fc6859e3f3e9b824ee8a997b2 | [
"Unlicense"
] | null | null | null | Exercicios/ex107_108_109_110_111/modulos/moeda/__init__.py | GabrielMendesMelo/CursoEmVideo-Python | 74e3a4df29406a9fc6859e3f3e9b824ee8a997b2 | [
"Unlicense"
] | null | null | null | def moeda(n, moeda="R$"):
return f"{moeda}{n:.2f}".replace(".", ",")
def metade(n=0, formato=False):
res = n / 2
return res if not formato else moeda(res)
def dobro(n=0, formato=False):
res = n * 2
return res if not formato else moeda(res)
def aumentar(n=0, porc=0, formato=False):
res = n + (n * porc / 100)
return res if not formato else moeda(res)
def diminuir(n=0, porc=0, formato=False):
res = n - (n * porc / 100)
return res if not formato else moeda(res)
def resumo(preco=0, aum=0, dim=0, formato=True):
print("-" * 30)
print("RESUMO DO VALOR".center(30))
print("-" * 30)
print(f"Preço analisado: \t{moeda(preco)}")
print(f"Dobro do preço: \t{dobro(preco, formato)}")
print(f"Metade do preço: \t{metade(preco, formato)}")
print(f"{aum:}% de aumento: \t{aumentar(preco, aum, formato)}")
print(f"{dim}% de redução: \t{diminuir(preco, dim, formato)}")
print("_" * 30) | 33.034483 | 68 | 0.604384 |
afcd2bc55ae5cc5cbab330929622a06da091d12e | 1,681 | py | Python | wxbtool/norms/minmax.py | caiyunapp/wxbtool | 7a019e1c0b9d454d07ed5cecbbf5ed00951d3ce7 | [
"MIT"
] | 3 | 2021-03-14T03:15:40.000Z | 2021-09-30T16:32:52.000Z | wxbtool/norms/minmax.py | caiyunapp/wxbtool | 7a019e1c0b9d454d07ed5cecbbf5ed00951d3ce7 | [
"MIT"
] | null | null | null | wxbtool/norms/minmax.py | caiyunapp/wxbtool | 7a019e1c0b9d454d07ed5cecbbf5ed00951d3ce7 | [
"MIT"
] | 1 | 2021-03-14T03:15:40.000Z | 2021-03-14T03:15:40.000Z | # -*- coding: utf-8 -*-
def norm_gpt(x):
min, max = -10000, 500000
return (x - min) / (max - min)
def norm_tmp(x):
min, max = 173, 373
return (x - min) / (max - min)
def norm_shm(x):
x = x * (x > 0)
min, max = 0, 0.1
return (x - min) / (max - min)
def norm_rhm(x):
x = x * (x > 0)
min, max = 0.0, 200.0
return (x - min) / (max - min)
def norm_u(x):
min, max = -250.0, 250.0
return (x - min) / (max - min)
def norm_v(x):
min, max = -250.0, 250.0
return (x - min) / (max - min)
def norm_tisr(tisr):
return tisr / 5500000.0
def norm_tcc(tcc):
return tcc
def denorm_gpt(x):
return 510000 * x - 10000
def denorm_tmp(x):
return 173 + 200.0 * x
def denorm_shm(x):
return 0.1 * x
def denorm_rhm(x):
return 200.0 * x
def denorm_u(x):
return x * 500 - 250.0
def denorm_v(x):
return x * 500 - 250.0
def denorm_tisr(tisr):
return tisr * 5500000.0
def denorm_tcc(tcc):
return tcc
normalizors = {
'geopotential': norm_gpt,
'temperature': norm_tmp,
'specific_humidity': norm_shm,
'relative_humidity': norm_rhm,
'u_component_of_wind': norm_u,
'v_component_of_wind': norm_v,
'toa_incident_solar_radiation': norm_tisr,
'total_cloud_cover': norm_tcc,
'2m_temperature': norm_tmp,
}
denormalizors = {
'geopotential': denorm_gpt,
'temperature': denorm_tmp,
'specific_humidity': denorm_shm,
'relative_humidity': denorm_rhm,
'u_component_of_wind': denorm_u,
'v_component_of_wind': denorm_v,
'toa_incident_solar_radiation': denorm_tisr,
'total_cloud_cover': denorm_tcc,
'2m_temperature': denorm_tmp,
}
| 16.81 | 48 | 0.610351 |
d4aa5e35acfb36cc9569df24fd69c5b6a5e909ce | 5,420 | py | Python | ml_lineage_helper/utils.py | aws-samples/ml-lineage-helper | 3562fa35a5480e7f0a06c6de55a26407774a9edb | [
"Apache-2.0"
] | 7 | 2021-09-28T13:31:31.000Z | 2022-03-26T17:17:07.000Z | ml_lineage_helper/utils.py | aws-samples/ml-lineage-helper | 3562fa35a5480e7f0a06c6de55a26407774a9edb | [
"Apache-2.0"
] | null | null | null | ml_lineage_helper/utils.py | aws-samples/ml-lineage-helper | 3562fa35a5480e7f0a06c6de55a26407774a9edb | [
"Apache-2.0"
] | null | null | null | import sys
import os
import subprocess
import numpy as np
import sagemaker
import boto3
from botocore.config import Config
class StatusIndicator:
def __init__(self):
self.previous_status = None
self.need_newline = False
def update(self, status):
if self.previous_status != status:
if self.need_newline:
sys.stdout.write("\n")
sys.stdout.write(status + " ")
self.need_newline = True
self.previous_status = status
else:
sys.stdout.write(".")
self.need_newline = True
sys.stdout.flush()
def end(self):
if self.need_newline:
sys.stdout.write("\n")
class SageMakerSession:
"""Custom SageMakerSession class with sensible default properties"""
# Default constructor
def __init__(
self,
bucket_name=None,
region="us-east-1",
role_name=None,
aws_profile_name="default",
):
self.bucket_name = bucket_name
self.region = region
self.role_name = role_name
self.aws_profile_name = aws_profile_name
self.get_sagemaker_session()
def get_sagemaker_session(self):
try:
# You're using a SageMaker notebook
self.role_arn = sagemaker.get_execution_role()
self.session = sagemaker.Session()
self.session.config = Config(
connect_timeout=5, read_timeout=60, retries={"max_attempts": 20}
)
self.bucket_name = self.session.default_bucket()
self.bucket_s3_uri = f"s3://{self.bucket_name}"
self.region = self.session.boto_region_name
except ValueError:
# You're using a notebook somewhere else
print("Setting role and SageMaker session manually...")
iam = boto3.client("iam", region_name=self.region)
sagemaker_client = boto3.client(
"sagemaker",
region_name=self.region,
config=Config(
connect_timeout=5, read_timeout=60, retries={"max_attempts": 20}
),
)
self.role_arn = iam.get_role(RoleName=self.role_name)["Role"]["Arn"]
boto3.setup_default_session(
region_name=self.region, profile_name=self.aws_profile_name
)
self.session = sagemaker.Session(
sagemaker_client=sagemaker_client, default_bucket=self.bucket_name
)
self.bucket_s3_uri = f"s3://{self.bucket_name}"
def upload_df_to_s3(df, s3_uri, sagemaker_session, csv=True, header=True):
"""Save a Pandas DataFrame as CSV and upload to S3
Args:
df (pandas.DataFrame): Pandas DataFrame
s3_uri (str): S3 URI of where you want the CSV to be stored
sagemaker_session (SageMakerSession): Custom SageMakerSession
csv (bool): If false, DataFrame will be written as numpy file
header (bool): If false, the header of the dataframe will not be written
"""
data_dir = os.path.join(os.getcwd(), "data")
os.makedirs(data_dir, exist_ok=True)
s3_client = boto3.client("s3", region_name=sagemaker_session.region)
s3_uri_split = s3_uri.split("/")
file_name = s3_uri_split[-1]
bucket = s3_uri_split[2]
prefix = ("/").join(s3_uri_split[3:-1])
if csv:
df.to_csv(f"./data/{file_name}", index=False)
else:
np.save(f"./data/{file_name}", df.to_numpy())
s3_client.upload_file(
Filename=f"data/{file_name}", Bucket=bucket, Key=f"{prefix}/{file_name}"
)
print(f"Uploaded {file_name} to {s3_uri}.")
def get_repo_link(cwd: str, entry_point_script_path: str, processing_code=True):
"""Construct git url of the processing or training code
Args:
cwd (str): Current working directory (e.g. os.cwd())
entry_point_script_path (str): This is relative to your cwd (e.g. code/processing.py)
processing_code (bool): (If True, repo link will be added to processing code artifact propert, else will be added to training code artifact property)
Returns:
repo_link (str): The git url of the processing or training code
"""
result = subprocess.run(["git", "remote", "-v"], capture_output=True, text=True)
output = result.stdout
if "git@ssh" in output:
git_https = (
output.split("\n")[0]
.split("\t")[1][:-8]
.replace(":", "/")
.replace("git@ssh.", "https://")
.split(".git")[0]
)
elif "git@" in output:
git_https = (
output.split("\n")[0]
.split("\t")[1][:-8]
.replace(":", "/")
.replace("git@", "https://")
.split(".git")[0]
)
else:
git_https = output.split("\n")[0].split("\t")[1][:-8].split(".git")[0]
repo_name = git_https.split("/")[-1]
result = subprocess.run(["git", "branch"], capture_output=True, text=True)
output = result.stdout
branch = output.strip()[2:]
cwd_list = cwd.split("/")
repo_name_index = cwd_list.index(repo_name)
relative_path = "/".join(cwd_list[repo_name_index + 1 :])
repo_link = f"{git_https}/blob/{branch}/{relative_path}/{entry_point_script_path}"
if processing_code:
return ("processing_code", repo_link)
return ("training_code", repo_link)
| 33.875 | 157 | 0.602214 |
a2686e292fa363821ef57f933e461d28b76c428f | 268 | py | Python | regionalSwordFernDieOff/import/extract.py | paul-shannon/annotatedMap | f4181c970886561950f8e1242fd18e9f08b5c6ba | [
"MIT"
] | null | null | null | regionalSwordFernDieOff/import/extract.py | paul-shannon/annotatedMap | f4181c970886561950f8e1242fd18e9f08b5c6ba | [
"MIT"
] | null | null | null | regionalSwordFernDieOff/import/extract.py | paul-shannon/annotatedMap | f4181c970886561950f8e1242fd18e9f08b5c6ba | [
"MIT"
] | null | null | null | # status: abandoned for now
from xml.etree import ElementTree as etree
filename = "doc.kml"
xmlDoc = etree.parse(filename)
len(xmlDoc.findall("Folder")) # /Placemark"))
len(placeMarks)
from pykml import parser
with open(filename) as f:
doc.parser.parse(f)
| 20.615385 | 46 | 0.723881 |
10690bfbc499410d860d28dbfec50bfeb70e05cf | 4,831 | py | Python | abcpy/summaryselections.py | shoshijak/abcpy | ad12808782fa72c0428122fc659fd3ff22d3e854 | [
"BSD-3-Clause-Clear"
] | null | null | null | abcpy/summaryselections.py | shoshijak/abcpy | ad12808782fa72c0428122fc659fd3ff22d3e854 | [
"BSD-3-Clause-Clear"
] | null | null | null | abcpy/summaryselections.py | shoshijak/abcpy | ad12808782fa72c0428122fc659fd3ff22d3e854 | [
"BSD-3-Clause-Clear"
] | null | null | null | from abc import ABCMeta, abstractmethod
from abcpy.graphtools import GraphTools
from abcpy.acceptedparametersmanager import *
import numpy as np
from sklearn import linear_model
class Summaryselections(metaclass=ABCMeta):
"""This abstract base class defines a way to choose the summary statistics.
"""
@abstractmethod
def __init__(self, model, statistics_calc, backend, n_samples=1000, seed=None):
"""The constructor of a sub-class must accept a non-optional model, statistics calculator and
backend which are stored to self.model, self.statistics_calc and self.backend. Further it
accepts two optional parameters n_samples and seed defining the number of simulated dataset
used for the pilot to decide the summary statistics and the integer to initialize the random
number generator.
Parameters
----------
model: abcpy.models.Model
Model object that conforms to the Model class.
statistics_cal: abcpy.statistics.Statistics
Statistics object that conforms to the Statistics class.
backend: abcpy.backends.Backend
Backend object that conforms to the Backend class.
n_samples: int, optional
The number of (parameter, simulated data) tuple generated to learn the summary statistics in pilot step.
The default value is 1000.
n_samples_per_param: int, optional
Number of data points in each simulated data set.
seed: integer, optional
Optional initial seed for the random number generator. The default value is generated randomly.
"""
raise NotImplementedError
def __getstate__(self):
state = self.__dict__.copy()
del state['backend']
return state
@abstractmethod
def transformation(self, statistics):
raise NotImplementedError
class Semiautomatic(Summaryselections, GraphTools):
"""This class implements the semi automatic summary statistics choice described in Fearnhead and Prangle [1].
[1] Fearnhead P., Prangle D. 2012. Constructing summary statistics for approximate
Bayesian computation: semi-automatic approximate Bayesian computation. J. Roy. Stat. Soc. B 74:419–474.
"""
def __init__(self, model, statistics_calc, backend, n_samples=1000, n_samples_per_param = 1, seed=None):
self.model = model
self.statistics_calc = statistics_calc
self.backend = backend
self.rng = np.random.RandomState(seed)
self.n_samples_per_param = n_samples_per_param
# An object managing the bds objects
self.accepted_parameters_manager = AcceptedParametersManager(self.model)
self.accepted_parameters_manager.broadcast(self.backend, [])
# main algorithm
seed_arr = self.rng.randint(1, n_samples * n_samples, size=n_samples, dtype=np.int32)
rng_arr = np.array([np.random.RandomState(seed) for seed in seed_arr])
rng_pds = self.backend.parallelize(rng_arr)
sample_parameters_statistics_pds = self.backend.map(self._sample_parameter_statistics, rng_pds)
sample_parameters_and_statistics = self.backend.collect(sample_parameters_statistics_pds)
sample_parameters, sample_statistics = [list(t) for t in zip(*sample_parameters_and_statistics)]
sample_parameters = np.array(sample_parameters)
sample_statistics = np.concatenate(sample_statistics)
self.coefficients_learnt = np.zeros(shape=(sample_parameters.shape[1], sample_statistics.shape[1]))
regr = linear_model.LinearRegression(fit_intercept=True)
for ind in range(sample_parameters.shape[1]):
regr.fit(sample_statistics, sample_parameters[:, ind])
self.coefficients_learnt[ind, :] = regr.coef_
def transformation(self, statistics):
if not statistics.shape[1] == self.coefficients_learnt.shape[1]:
raise ValueError('Mismatch in dimension of summary statistics')
return np.dot(statistics, np.transpose(self.coefficients_learnt))
def _sample_parameter_statistics(self, rng=np.random.RandomState()):
"""
Samples a single model parameter and simulates from it until
distance between simulated outcome and the observation is
smaller than eplison.
Parameters
----------
seed: int
value of a seed to be used for reseeding
Returns
-------
np.array
accepted parameter
"""
self.sample_from_prior(rng=rng)
parameter = self.get_parameters()
y_sim = self.simulate(self.n_samples_per_param, rng=rng)
if y_sim is not None:
statistics = self.statistics_calc.statistics(y_sim)
return (parameter, statistics)
| 42.752212 | 117 | 0.694266 |
d59020eac2f967dc1fb3817ccd9858b0c17a29b6 | 2,274 | py | Python | examples/service_migration/server.py | fabrizio-granelli/comnetsemu | 598a4347076c423c4887d4243f49d3c94fe1d075 | [
"MIT"
] | 11 | 2020-10-29T13:13:09.000Z | 2022-01-28T10:30:43.000Z | examples/service_migration/server.py | fabrizio-granelli/comnetsemu | 598a4347076c423c4887d4243f49d3c94fe1d075 | [
"MIT"
] | 12 | 2020-11-25T11:23:18.000Z | 2022-03-28T12:36:49.000Z | examples/service_migration/server.py | fabrizio-granelli/comnetsemu | 598a4347076c423c4887d4243f49d3c94fe1d075 | [
"MIT"
] | 12 | 2020-11-09T03:27:41.000Z | 2022-01-17T11:37:47.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
About: Simple server for counting.
"""
import argparse
import signal
import socket
import time
INTERNAL_IP_H2 = "192.168.0.12"
INTERNAL_IP_H3 = "192.168.0.13"
INTERNAL_PORT = 9999
SERVICE_IP = "10.0.0.12"
SERVICE_PORT = 8888
HOST_NAME = None
def recv_state(host_name):
"""Get the latest counter state from the internal
network between h2 and h3.
"""
if host_name == "h2":
recv_ip = INTERNAL_IP_H2
else:
recv_ip = INTERNAL_IP_H3
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((recv_ip, INTERNAL_PORT))
state, _ = sock.recvfrom(1024)
state = int(state.decode("utf-8"))
return state
def run(host_name, get_state=False):
"""Run the couting service and handle sigterm signal."""
counter = 0
if get_state:
counter = recv_state(host_name)
print("Get the init counter state: {}".format(counter))
# Use closure to avoid using a global variable for state.
def term_signal_handler(signum, frame):
# Check if the server is running on the host 2.
if host_name == "h2":
dest_ip = INTERNAL_IP_H3
else:
dest_ip = INTERNAL_IP_H2
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Send duplicated packets to avoid losses.
for _ in range(6):
sock.sendto(str(counter).encode("utf-8"), (dest_ip, INTERNAL_PORT))
sock.close()
signal.signal(signal.SIGTERM, term_signal_handler)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((SERVICE_IP, SERVICE_PORT))
while True:
# Block here waiting for data input.
_, addr = sock.recvfrom(1024)
counter += 1
sock.sendto(str(counter).encode("utf-8"), addr)
time.sleep(0.5)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simple counting server.")
parser.add_argument(
"hostname",
type=str,
help="The name of the host on which the server is deployed.",
)
parser.add_argument(
"--get_state",
action="store_true",
help="Get state from network.",
)
args = parser.parse_args()
run(args.hostname, args.get_state)
| 26.44186 | 79 | 0.641161 |
1191606d48b1a0c6add0b6b61e9a39cc1e7e4176 | 4,706 | py | Python | openpnm/metrics/_porosimetry.py | lixuekai2001/OpenPNM | 9026f0fed427d37f4caf1a79e4a7684490d52cf6 | [
"MIT"
] | 1 | 2021-12-06T03:20:55.000Z | 2021-12-06T03:20:55.000Z | openpnm/metrics/_porosimetry.py | lixuekai2001/OpenPNM | 9026f0fed427d37f4caf1a79e4a7684490d52cf6 | [
"MIT"
] | 2 | 2020-06-26T19:58:23.000Z | 2021-12-14T07:16:41.000Z | openpnm/metrics/_porosimetry.py | lixuekai2001/OpenPNM | 9026f0fed427d37f4caf1a79e4a7684490d52cf6 | [
"MIT"
] | null | null | null | import numpy as np
from openpnm.algorithms import OrdinaryPercolation
from openpnm.utils import logging, SettingsAttr, Docorator
docstr = Docorator()
logger = logging.getLogger(__name__)
__all__ = ['Porosimetry']
@docstr.dedent
class PorosimetrySettings:
r"""
%(OrdinaryPercolationSettings.parameters)s
pore_partial_filling : str
The name of the model used to determine partial pore filling as
a function of applied pressure.
throat_partial_filling : str
The name of the model used to determine partial throat filling as
a function of applied pressure.
"""
quantity = 'pore.pressure'
pore_partial_filling = ''
throat_partial_filling = ''
class Porosimetry(OrdinaryPercolation):
r"""
Simulates mercury instrustion porosimetry using ordinary percolation
Parameters
----------
network : GenericNetwork
The Network upon which this simulation should be run
name : str, optional
An identifying name for the object. If none is given then one is
generated.
Notes
-----
Mercury intrusion progresses by applying increasing pressures to the
invading mercury phase, and measuring the resultant volume of invading
fluid. This corresponds directly to an ordinary percolation process,
with access limitations enabled.
See Also
--------
OrdinaryPercolation
"""
def __init__(self, phase, settings=None, **kwargs):
self.settings = SettingsAttr(PorosimetrySettings, settings)
super().__init__(phase=phase, settings=self.settings, **kwargs)
# Use the reset method to initialize all arrays
self.reset()
self.settings['phase'] = phase.name
def set_partial_filling(self, propname):
r"""
Define which pore filling model to apply.
Parameters
----------
propname : str
Dictionary key on the physics object(s) containing the pore
filling model(s) to apply.
Notes
-----
It is assumed that these models are functions of the `quantity`
specified in the algorithms settings. This values is applied to the
corresponding phase just prior to regenerating the given pore-scale
model(s).
"""
if propname.startswith('pore'):
self.settings['pore_partial_filling'] = propname
if propname.startswith('throat'):
self.settings['throat_partial_filling'] = propname
def run(self, points=25, start=None, stop=None):
if self.settings['mode'] != 'bond':
raise Exception('Porosimetry must be run as bond percolation')
if self.settings['access_limited'] is False:
raise Exception('Porosimetry must be run as access limited')
super().run(points=points, start=start, stop=stop)
run.__doc__ = OrdinaryPercolation.run.__doc__
def results(self, Pc=None):
r"""
"""
if Pc is None:
p_inv = self['pore.invasion_pressure']
t_inv = self['throat.invasion_pressure']
results = {'pore.invasion_pressure': p_inv,
'throat.invasion_pressure': t_inv}
else:
p_inv, t_inv = super().results(Pc).values()
phase = self.project[self.settings.phase]
quantity = self.settings['quantity'].split('.')[-1]
lpf = np.array([1])
if self.settings['pore_partial_filling']:
# Set pressure on phase to current capillary pressure
phase['pore.'+quantity] = Pc
# Regenerate corresponding physics model
for phys in self.project.find_physics(phase=phase):
phys.regenerate_models(self.settings['pore_partial_filling'])
# Fetch partial filling fraction from phase object (0->1)
lpf = phase[self.settings['pore_partial_filling']]
# Calculate filled throat volumes
ltf = np.array([1])
if self.settings['throat_partial_filling']:
# Set pressure on phase to current capillary pressure
phase['throat.'+quantity] = Pc
# Regenerate corresponding physics model
for phys in self.project.find_physics(phase=phase):
phys.regenerate_models(self.settings['throat_partial_filling'])
# Fetch partial filling fraction from phase object (0->1)
ltf = phase[self.settings['throat_partial_filling']]
p_inv = p_inv*lpf
t_inv = t_inv*ltf
results = {'pore.occupancy': p_inv, 'throat.occupancy': t_inv}
return results
| 37.349206 | 83 | 0.630684 |
e9e8842d1381ccb2226a36ac113d7a65a7e0d3e2 | 8,659 | py | Python | plugins/callback.py | Pumudu-railway/MusicPlayer | cdce0966e1a599b02c7b95afd0d851cc70f03fb5 | [
"MIT"
] | null | null | null | plugins/callback.py | Pumudu-railway/MusicPlayer | cdce0966e1a599b02c7b95afd0d851cc70f03fb5 | [
"MIT"
] | null | null | null | plugins/callback.py | Pumudu-railway/MusicPlayer | cdce0966e1a599b02c7b95afd0d851cc70f03fb5 | [
"MIT"
] | null | null | null | #MIT License
#Copyright (c) 2021 SUBIN
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
from pyrogram import Client, emoji
from utils import mp, playlist
from config import Config
HELP = """
<b>Add the bot and User account in your Group with admin rights.
Start a VoiceChat
Use /play <song name> or use /play as a reply to an audio file or youtube link.
You can also use /splay <song name> to play a song from JioSaavn or /cplay <channel username or channel id> to play music from a telegram channel.</b>
**Common Commands**:
**/play** Reply to an audio file or YouTube link to play it or use /play <song name>.
**/splay** Play music from Jio Saavn, Use /splay <song name>
**/player** Show current playing song.
**/help** Show help for commands
**/playlist** Shows the playlist.
**Admin Commands**:
**/skip** [n] ... Skip current or n where n >= 2
**/join** Join voice chat.
**/leave** Leave current voice chat
**/shuffle** Shuffle Playlist.
**/cplay** Play music from a channel's music files.
**/vc** Check which VC is joined.
**/stop** Stop playing.
**/radio** Start Radio.
**/stopradio** Stops Radio Stream.
**/clearplaylist** Clear the playlist.
**/replay** Play from the beginning.
**/clean** Remove unused RAW PCM files.
**/pause** Pause playing.
**/resume** Resume playing.
**/volume** Change volume(0-200).
**/mute** Mute in VC.
**/unmute** Unmute in VC.
**/restart** Update and restarts the Bot.
"""
@Client.on_callback_query()
async def cb_handler(client: Client, query: CallbackQuery):
admins = await mp.get_admins(Config.CHAT)
if query.from_user.id not in admins and query.data != "help":
await query.answer(
"😒 Played Joji.mp3",
show_alert=True
)
return
else:
await query.answer()
if query.data == "replay":
group_call = mp.group_call
if not playlist:
return
group_call.restart_playout()
if not playlist:
pl = f"{emoji.NO_ENTRY} Empty Playlist"
else:
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
await query.edit_message_text(
f"{pl}",
parse_mode="Markdown",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="pause"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
elif query.data == "pause":
if not playlist:
return
else:
mp.group_call.pause_playout()
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} Paused\n\n{pl},",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="resume"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
elif query.data == "resume":
if not playlist:
return
else:
mp.group_call.resume_playout()
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} Resumed\n\n{pl}",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="pause"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
elif query.data=="skip":
if not playlist:
return
else:
await mp.skip_current_playing()
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
try:
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} Skipped\n\n{pl}",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="pause"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
except:
pass
elif query.data=="help":
buttons = [
[
InlineKeyboardButton('📲 Updates', url='https://t.me/TheKumikosetsuko'),
InlineKeyboardButton('💬 Support Chat', url='https://t.me/KumikoSetsuko'),
]
]
reply_markup = InlineKeyboardMarkup(buttons)
await query.edit_message_text(
HELP,
reply_markup=reply_markup
)
| 38.314159 | 150 | 0.519229 |
593150126c8da01b179bbd766ce7fb07fdacc423 | 4,303 | py | Python | test/test.py | FredHappyface/StegStash | c5e3f0d2df5ccbbb270d1e7f79c439b8be126535 | [
"MIT"
] | 1 | 2021-02-07T07:03:43.000Z | 2021-02-07T07:03:43.000Z | test/test.py | FredHappyface/StegStash | c5e3f0d2df5ccbbb270d1e7f79c439b8be126535 | [
"MIT"
] | null | null | null | test/test.py | FredHappyface/StegStash | c5e3f0d2df5ccbbb270d1e7f79c439b8be126535 | [
"MIT"
] | null | null | null | """ tests """
import sys
import os
from pathlib import Path
THISDIR = str(Path(__file__).resolve().parent)
sys.path.insert(0, os.path.dirname(THISDIR))
from stegstash import (imagelsb, fileappend, soundlsb, homoglyphs, msoffice, odf, zerowidth)
# Simple imagelsb
print("\n# simple imagelsb")
imagelsb.simpleEncode(THISDIR + "/originalImage.png",
THISDIR + "/lsbSimpleEncode.png", "hello world from lsbSimpleEncode!")
print(imagelsb.simpleDecode(THISDIR + "/lsbSimpleEncode.png"))
imagelsb.visual(THISDIR + "/originalImage.png", THISDIR + "/imagelsbVog.png")
imagelsb.visual(THISDIR + "/lsbSimpleEncode.png", THISDIR + "/imagelsbVen.png")
# Hide image simplelsb
print("\n# hide image simplelsb")
imagelsb.simpleEncode(THISDIR + "/originalImage.png",
THISDIR + "/lsbImageEncode.png", open(THISDIR + "/hideImage.png", "rb"))
imagelsb.simpleDecode(THISDIR + "/lsbImageEncode.png", False,
open(THISDIR + "/hideImageRecovered.png", "wb"))
# imagelsb
print("\n# imagelsb")
imagelsb.encode(THISDIR + "/originalImage.png", THISDIR + "/lsbEncode.png",
"hello world from lsbEncode!", "test", "pass")
print(imagelsb.decode(THISDIR + "/lsbEncode.png", "test", "pass"))
# fileappend
print("\n# fileappend")
fileappend.encode(THISDIR + "/originalImage.png",
THISDIR + "/appendEncode.png", "hello world from appendEncode!", "pass")
print(fileappend.decode(THISDIR + "/appendEncode.png", "pass"))
print(fileappend.detectSteg(THISDIR + "/originalImage.png"))
print(fileappend.detectSteg(THISDIR + "/appendEncode.png"))
# soundlsb
print("\n# soundlsb")
soundlsb.simpleEncode(THISDIR + "/originalSound.wav",
THISDIR + "/simpleEncode.wav", "hello world from soundLsbSimpleEncode!")
print(soundlsb.simpleDecode(THISDIR + "/simpleEncode.wav"))
soundlsb.encode(THISDIR + "/originalSound.wav", THISDIR + "/encode.wav",
"hello world from soundLsbEncode!", "test", "pass")
print(soundlsb.decode(THISDIR + "/encode.wav", "test", "pass"))
# homoglyphs
print("\n# homoglyphs")
homoglyphs.simpleEncode(THISDIR + "/originalText.txt",
THISDIR + "/simpleEncode.txt", "glyph")
print(homoglyphs.simpleDecode(THISDIR + "/simpleEncode.txt"))
homoglyphs.encode(THISDIR + "/originalText.txt",
THISDIR + "/encode.txt", "glyph", "test", "pass")
print(homoglyphs.decode(THISDIR + "/encode.txt", "test", "pass"))
print(homoglyphs.detectSteg(THISDIR + "/originalText.txt"))
print(homoglyphs.detectSteg(THISDIR + "/encode.txt"))
homoglyphs.visual(THISDIR + "/originalText.txt", THISDIR + "/homoglyphsVog.png")
homoglyphs.visual(THISDIR + "/encode.txt", THISDIR + "/homoglyphsVen.png")
# MsOffice
print("\n# msoffice")
msoffice.encodeComment(THISDIR + "/originalDoc.docx",
THISDIR + "/encodeComment.docx", "hello world from encodeComment!")
print(msoffice.decodeComment(THISDIR + "/encodeComment.docx"))
msoffice.encodeFile(THISDIR + "/originalDoc.docx",
THISDIR + "/encodeFile.docx", open(THISDIR + "/hideImage.png", "rb"), password="pass")
msoffice.decodeFile(THISDIR + "/encodeFile.docx", "pass",
open(THISDIR + "/docxImageRecovered.png", "wb"))
print(msoffice.detectSteg(THISDIR + "/originalDoc.docx"))
print(msoffice.detectSteg(THISDIR + "/encodeFile.docx"))
# odt
print("\n# odt")
odf.encodeComment(THISDIR + "/originalDoc.odt",
THISDIR + "/encodeComment.odt", "hello world from encodeComment odt!")
print(odf.decodeComment(THISDIR + "/encodeComment.odt"))
odf.encodeFile(THISDIR + "/originalDoc.odt",
THISDIR + "/encodeFile.odt", open(THISDIR + "/hideImage.png", "rb"), password="pass")
odf.decodeFile(THISDIR + "/encodeFile.odt", "pass",
open(THISDIR + "/odtImageRecovered.png", "wb"))
print(odf.detectSteg(THISDIR + "/originalDoc.odt"))
print(odf.detectSteg(THISDIR + "/encodeFile.odt"))
# zerowidth
print("\n# zerowidth")
zerowidth.simpleEncode(THISDIR + "/originalText.txt",
THISDIR + "/simpleEncodeZW.txt", "zerowidth")
print(zerowidth.simpleDecode(THISDIR + "/simpleEncodeZW.txt"))
zerowidth.encode(THISDIR + "/originalText.txt",
THISDIR + "/encodeZW.txt", "zerowidth", "test", "pass")
print(zerowidth.decode(THISDIR + "/encodeZW.txt", "test", "pass"))
print(zerowidth.detectSteg(THISDIR + "/originalText.txt"))
print(zerowidth.detectSteg(THISDIR + "/simpleEncodeZW.txt"))
zerowidth.visual(THISDIR + "/originalText.txt", THISDIR + "/zerowidthVog.png")
zerowidth.visual(THISDIR + "/encodeZW.txt", THISDIR + "/zerowidthVen.png")
| 44.360825 | 92 | 0.736463 |
b7942be7a8f9be78e13585bde3962085a009b5c8 | 21,811 | py | Python | src/lib/datasets/dataset/jde_kd.py | wisematch/KDMOT | 03be0a148fc5d5a43c13a0427c429305b92e6838 | [
"MIT"
] | null | null | null | src/lib/datasets/dataset/jde_kd.py | wisematch/KDMOT | 03be0a148fc5d5a43c13a0427c429305b92e6838 | [
"MIT"
] | null | null | null | src/lib/datasets/dataset/jde_kd.py | wisematch/KDMOT | 03be0a148fc5d5a43c13a0427c429305b92e6838 | [
"MIT"
] | null | null | null | import glob
import math
import os
import os.path as osp
import random
import time
from collections import OrderedDict
import cv2
import json
import numpy as np
import torch
import copy
from torch.utils.data import Dataset
from torchvision.transforms import transforms as T
from cython_bbox import bbox_overlaps as bbox_ious
from opts import opts
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.utils import xyxy2xywh, generate_anchors, xywh2xyxy, encode_delta
class LoadImages: # for inference
def __init__(self, path, img_size=(1088, 608)):
if os.path.isdir(path):
image_format = ['.jpg', '.jpeg', '.png', '.tif']
self.files = sorted(glob.glob('%s/*.*' % path))
self.files = list(filter(lambda x: os.path.splitext(x)[1].lower() in image_format, self.files))
elif os.path.isfile(path):
self.files = [path]
self.nF = len(self.files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
assert self.nF > 0, 'No images found in ' + path
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == self.nF:
raise StopIteration
img_path = self.files[self.count]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return img_path, img, img0
def __getitem__(self, idx):
idx = idx % self.nF
img_path = self.files[idx]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
return img_path, img, img0
def __len__(self):
return self.nF # number of files
class LoadVideo: # for inference
def __init__(self, path, img_size=(1088, 608)):
self.cap = cv2.VideoCapture(path)
self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS)))
self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
self.w, self.h = 1920, 1080
print('Lenth of the video: {:d} frames'.format(self.vn))
def get_size(self, vw, vh, dw, dh):
wa, ha = float(dw) / vw, float(dh) / vh
a = min(wa, ha)
return int(vw * a), int(vh * a)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == len(self):
raise StopIteration
# Read image
res, img0 = self.cap.read() # BGR
assert img0 is not None, 'Failed to load frame {:d}'.format(self.count)
img0 = cv2.resize(img0, (self.w, self.h))
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return self.count, img, img0
def __len__(self):
return self.vn # number of files
class LoadImagesAndLabels: # for training
def __init__(self, path, img_size=(1088, 608), augment=False, transforms=None):
with open(path, 'r') as file:
self.img_files = file.readlines()
self.img_files = [x.replace('\n', '') for x in self.img_files]
self.img_files = list(filter(lambda x: len(x) > 0, self.img_files))
self.label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files]
self.nF = len(self.img_files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
def __getitem__(self, files_index):
img_path = self.img_files[files_index]
label_path = self.label_files[files_index]
return self.get_data(img_path, label_path)
def get_data(self, img_path, label_path):
height = self.height
width = self.width
img = cv2.imread(img_path) # BGR
if img is None:
raise ValueError('File corrupt {}'.format(img_path))
augment_hsv = True
if self.augment and augment_hsv:
# SV augmentation by 50%
fraction = 0.50
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = (random.random() * 2 - 1) * fraction + 1
S *= a
if a > 1:
np.clip(S, a_min=0, a_max=255, out=S)
a = (random.random() * 2 - 1) * fraction + 1
V *= a
if a > 1:
np.clip(V, a_min=0, a_max=255, out=V)
img_hsv[:, :, 1] = S.astype(np.uint8)
img_hsv[:, :, 2] = V.astype(np.uint8)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
h, w, _ = img.shape
img, ratio, padw, padh = letterbox(img, height=height, width=width)
# Load labels
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
# Normalized xywh to pixel xyxy format
labels = labels0.copy()
labels[:, 2] = ratio * w * (labels0[:, 2] - labels0[:, 4] / 2) + padw
labels[:, 3] = ratio * h * (labels0[:, 3] - labels0[:, 5] / 2) + padh
labels[:, 4] = ratio * w * (labels0[:, 2] + labels0[:, 4] / 2) + padw
labels[:, 5] = ratio * h * (labels0[:, 3] + labels0[:, 5] / 2) + padh
else:
labels = np.array([])
# Augment image and labels
if self.augment:
img, labels, M = random_affine(img, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.50, 1.20))
plotFlag = False
if plotFlag:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.figure(figsize=(50, 50))
plt.imshow(img[:, :, ::-1])
plt.plot(labels[:, [1, 3, 3, 1, 1]].T, labels[:, [2, 2, 4, 4, 2]].T, '.-')
plt.axis('off')
plt.savefig('test.jpg')
time.sleep(10)
nL = len(labels)
if nL > 0:
# convert xyxy to xywh
labels[:, 2:6] = xyxy2xywh(labels[:, 2:6].copy()) # / height
labels[:, 2] /= width
labels[:, 3] /= height
labels[:, 4] /= width
labels[:, 5] /= height
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip & (random.random() > 0.5):
img = np.fliplr(img)
if nL > 0:
labels[:, 2] = 1 - labels[:, 2]
img = np.ascontiguousarray(img[:, :, ::-1]) # BGR to RGB
if self.transforms is not None:
img = self.transforms(img)
return img, labels, img_path, (h, w)
def __len__(self):
return self.nF # number of batches
def letterbox(img, height=608, width=1088,
color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded rectangular
shape = img.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular
return img, ratio, dw, dh
def random_affine(img, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),
borderValue=(127.5, 127.5, 127.5)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
border = 0 # width of added border (optional)
height = img.shape[0]
width = img.shape[1]
# Rotation and Scale
R = np.eye(3)
a = random.random() * (degrees[1] - degrees[0]) + degrees[0]
# a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations
s = random.random() * (scale[1] - scale[0]) + scale[0]
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)
T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR,
borderValue=borderValue) # BGR order borderValue
# Return warped points also
if targets is not None:
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 2:6].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
#np.clip(xy[:, 0], 0, width, out=xy[:, 0])
#np.clip(xy[:, 2], 0, width, out=xy[:, 2])
#np.clip(xy[:, 1], 0, height, out=xy[:, 1])
#np.clip(xy[:, 3], 0, height, out=xy[:, 3])
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 2:6] = xy[i]
return imw, targets, M
else:
return imw
def collate_fn(batch):
imgs, labels, paths, sizes = zip(*batch)
batch_size = len(labels)
imgs = torch.stack(imgs, 0)
max_box_len = max([l.shape[0] for l in labels])
labels = [torch.from_numpy(l) for l in labels]
filled_labels = torch.zeros(batch_size, max_box_len, 6)
labels_len = torch.zeros(batch_size)
for i in range(batch_size):
isize = labels[i].shape[0]
if len(labels[i]) > 0:
filled_labels[i, :isize, :] = labels[i]
labels_len[i] = isize
return imgs, filled_labels, paths, sizes, labels_len.unsqueeze(1)
class JointDataset(LoadImagesAndLabels): # for training
default_resolution = [1088, 608]
mean = None
std = None
num_classes = 1
def __init__(self, opt, root, paths, img_size=(1088, 608), augment=False, transforms=None):
self.opt = opt
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
self.num_classes = 1
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:, 1])
if img_max > max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index + 1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
self.max_objs = opt.K
self.augment = augment
self.transforms = transforms
print('=' * 80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('=' * 80)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
imgs, labels, img_path, (input_h, input_w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
output_h = imgs.shape[1] // self.opt.down_ratio
output_w = imgs.shape[2] // self.opt.down_ratio
num_classes = self.num_classes
num_objs = labels.shape[0]
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
if self.opt.ltrb:
wh = np.zeros((self.max_objs, 4), dtype=np.float32)
else:
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs, ), dtype=np.int64)
reg_mask = np.zeros((self.max_objs, ), dtype=np.uint8)
ids = np.zeros((self.max_objs, ), dtype=np.int64)
bbox_xys = np.zeros((self.max_objs, 4), dtype=np.float32)
crop_h = self.opt.crop_h
crop_w = self.opt.crop_w
cropped_imgs = np.zeros((self.max_objs, crop_h, crop_w), dtype=np.float32)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
for k in range(num_objs):
label = labels[k]
bbox = label[2:]
cls_id = int(label[0])
bbox[[0, 2]] = bbox[[0, 2]] * output_w
bbox[[1, 3]] = bbox[[1, 3]] * output_h
bbox_amodal = copy.deepcopy(bbox)
bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2.
bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.
bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]
bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]
bbox[0] = np.clip(bbox[0], 0, output_w - 1)
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
bbox_xy = copy.deepcopy(bbox)
bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2
bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2
bbox_xy[2] = bbox_xy[0] + bbox_xy[2]
bbox_xy[3] = bbox_xy[1] + bbox_xy[3]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = 6 if self.opt.mse_loss else radius
#radius = max(1, int(radius)) if self.opt.mse_loss else radius
ct = np.array(
[bbox[0], bbox[1]], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
if self.opt.ltrb:
wh[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
else:
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
ids[k] = label[1]
bbox_xys[k] = bbox_xy
crop_im = imgs[:, int(bbox_xy[1]):int(np.ceil(bbox_xy[3])), int(bbox_xy[0]):int(np.ceil(bbox_xy[2]))]
crop_im = cv2.resize(crop_im, (crop_w, crop_h))
cropped_imgs[k] = crop_im
save_crop_im = False
if save_crop_im:
cv2.imwrite('/export/wei.zhang/PycharmProjects/FairMOT/vis_debug/'+str(k)+'.jpg', crop_im)
ret = {'input': imgs, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'reg': reg, 'ids': ids, 'bbox': bbox_xys,
'cropped_imgs': cropped_imgs}
return ret
class DetDataset(LoadImagesAndLabels): # for training
def __init__(self, root, paths, img_size=(1088, 608), augment=False, transforms=None):
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:, 1])
if img_max > max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index + 1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
print('=' * 80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('=' * 80)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
imgs, labels, img_path, (h, w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
return imgs, labels0, img_path, (h, w)
| 37.866319 | 125 | 0.538627 |
0b4a4419b81f0695c9bb59a4382257a548ede0c7 | 1,956 | py | Python | collectors/pfizer/parser.py | almeidaah/collectors | f03096855b8d702969d22af0b20a4d6a0d820bd0 | [
"MIT"
] | 17 | 2016-06-28T21:20:21.000Z | 2022-03-02T16:31:25.000Z | collectors/pfizer/parser.py | almeidaah/collectors | f03096855b8d702969d22af0b20a4d6a0d820bd0 | [
"MIT"
] | 41 | 2016-04-04T10:36:45.000Z | 2017-04-24T10:04:57.000Z | collectors/pfizer/parser.py | kenferrara/collectors | e6c1f45df3a1ffd5d60dada1816484812eb51417 | [
"MIT"
] | 25 | 2016-05-18T09:27:42.000Z | 2021-03-21T14:44:31.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .record import Record
# Module API
def parse_record(res):
# Init data
data = {}
# Description
key = 'study_type'
path = '.field-name-field-study-type .field-item::text'
value = res.css(path).extract_first()
data[key] = value
key = 'organization_id'
path = '.field-name-field-organization-id .field-item::text'
value = res.css(path).extract_first()
data[key] = value
key = 'nct_id'
path = '.field-name-field-clinical-trial-id .field-item::text'
value = res.css(path).extract_first()
data[key] = value
key = 'status'
path = '//label[text() = "Status"]/../text()'
value = ''.join(res.xpath(path).extract()).strip()
data[key] = value
key = 'study_start_date'
path = '.field-name-field-study-start-date .field-item span::text'
value = res.css(path).extract_first()
data[key] = value
key = 'study_end_date'
path = '.field-name-field-study-end-date .field-item span::text'
value = res.css(path).extract_first()
data[key] = value
# Eligibility
key = 'eligibility_criteria'
path = '.field-name-field-criteria .field-item *::text'
value = ''.join(res.css(path).extract())
data[key] = value
key = 'gender'
path = '.field-name-field-gender .field-item::text'
value = res.css(path).extract_first()
data[key] = value
key = 'age_range'
path = '//label[text() = "Age Range:"]/../text()'
value = ''.join(res.xpath(path).extract()).strip()
data[key] = value
key = 'healthy_volunteers_allowed'
path = '.field-name-field-healthy-volunteers-allowed .field-item::text'
value = res.css(path).extract_first()
data[key] = value
# Create record
record = Record.create(res.url, data)
return record
| 26.08 | 75 | 0.639059 |
e3a02a053528e3efaf8b742b455f4761e4465adf | 5,949 | py | Python | app.py | shuvanon/Memory-Board | 0615b20c36c42290ee0c3d64e1a2c45fddfdd98b | [
"MIT"
] | 2 | 2017-01-23T20:01:54.000Z | 2022-03-15T15:38:27.000Z | app.py | shuvanon/Memory-Board | 0615b20c36c42290ee0c3d64e1a2c45fddfdd98b | [
"MIT"
] | null | null | null | app.py | shuvanon/Memory-Board | 0615b20c36c42290ee0c3d64e1a2c45fddfdd98b | [
"MIT"
] | null | null | null | from flask import (Flask, g,render_template, flash, redirect, url_for, abort,send_from_directory,request)
from flask.ext.bcrypt import check_password_hash
from flask.ext.login import LoginManager, login_user,logout_user,login_required,current_user
from werkzeug import secure_filename
import os
DEBUG =True
PORT=5000
HOST = '127.0.0.1'
app = Flask(__name__)
import forms
import models
app.secret_key = 'abcd.1234.xyz'
app.config['UPLOAD_FOLDER'] = 'uploads/'
login_manager =LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(userid):
try:
return models.User.get(models.User.id==userid)
except models.DoesNotExist:
return None
@app.before_request
def before_request():
"""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
g.user = current_user
@app.after_request
def after_request(response):
"""Close the database connection after each request."""
g.db.close()
return response
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.RegisterForm()
if form.validate_on_submit():
flash("Yay, you registered", "success")
models.User.create_user(
user_name=form.username.data,
email=form.email.data,
password=form.password.data
)
return redirect(url_for('index'))
return render_template('register.html', form=form)
@app.route('/login', methods=('GET','POST'))
def login():
form =forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.email ==form.email.data)
except models.DoesNotExist:
flash("Your email or password does not match!***", "error")
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in!", "success")
return redirect(url_for('index'))
else:
flash("Your email or password does not match!", "error")
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash ("You've been logged out! Come back soon!", "success")
return redirect(url_for('index'))
@app.route('/new_post', methods=['GET', 'POST'])
@login_required
def post():
form = forms.PostForm()
if form.validate_on_submit():
filename = secure_filename(form.photo.data.filename)
filePath=os.path.join(app.config['UPLOAD_FOLDER'], filename)
print(filename)
print(filePath)
form.photo.data.save(filePath)
models.Post.create(user=g.user._get_current_object(),
content=form.content.data.strip(),
path = ("/uploads/"+ filename))
print("/uploads/"+ filename)
flash("Message posted: Thanks!", "success")
return redirect(url_for('index'))
return render_template('post.html', form=form)
@app.route('/index')
@login_required
def index():
stream = models.Post.select().limit(100)
return render_template('stream.html', stream=stream)
@app.route('/stream')
@app.route('/stream/<username>')
def stream(username=None):
template = 'stream.html'
if username and username != current_user.username:
try:
user = models.User.select().where(models.User.username**username).get() # the ** is the "like" operator (non-case sensitive comparison)
except models.DoesNotExist:
abort(404)
else:
stream = user.posts.limit(100)
else:
stream = current_user.get_stream().limit(100)
user = current_user
if username:
template = 'user_stream.html'
return render_template(template, stream=stream, user=user)
@app.route('/post/<int:post_id>')
def view_post(post_id):
posts=models.Post.select().where(models.Post.id == post_id)
if posts.count()==0:
abort(404)
return render_template('stream.html', stream=posts)
@app.route('/follow/<username>')
@login_required
def follow(username):
try:
to_user=models.User.get(models.User.username**username)
except models.DoesNotExist:
abort(404)
else:
try:
models.Relationship.create(
from_user=g.user._get_current_object(),
to_user=to_user
)
except models.IntegrityError:
pass
else:
flash("You are now following {}!".format(to_user.username), "success")
return redirect(url_for('stream', username=to_user.username))
@app.route('/unfollow/<username>')
@login_required
def unfollow(username):
try:
to_user=models.User.get(models.User.username**username)
except models.DoesNotExist:
abort(404)
else:
try:
models.Relationship.get(
from_user=g.user._get_current_object(),
to_user=to_user
).delete_instance()
except models.IntegrityError:
pass
else:
flash("you have unfollowed {}!".format(to_user.username), "success")
return redirect(url_for('stream', username=to_user.username))
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],filename)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
@app.route('/')
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
models.initialize()
try:
models.User.create_user(
user_name='shuvanon',
email='razik666@gmail.com',
password='123456',
admin=True
)
except ValueError:
pass
app.run(debug=DEBUG, host=HOST,port=PORT)
| 28.600962 | 147 | 0.632375 |
21d69f9ac9ea6cb4310fbd52982cce40aa1bc7e1 | 5,471 | py | Python | warpseq/api/support.py | simian-terminal/warpseq | f61e68d1e6a9ad15a5e0c899237be784bcff7093 | [
"Apache-2.0"
] | 3 | 2021-01-22T01:20:20.000Z | 2022-03-10T20:58:42.000Z | warpseq/api/support.py | simianterminal/warpseq | f61e68d1e6a9ad15a5e0c899237be784bcff7093 | [
"Apache-2.0"
] | 1 | 2020-08-13T00:28:28.000Z | 2020-08-13T00:28:28.000Z | warpseq/api/support.py | simianterminal/warpseq | f61e68d1e6a9ad15a5e0c899237be784bcff7093 | [
"Apache-2.0"
] | null | null | null | # ------------------------------------------------------------------
# Warp Sequencer
# (C) 2020 Michael DeHaan <michael@michaeldehaan.net> & contributors
# Apache2 Licensed
# ------------------------------------------------------------------
# code supporting public API boilerplate reduction - a bit too much meta-programming but ah well
from .exceptions import *
class BaseApi(object):
def __init__(self, public_api, song):
self.api = public_api
self.song = song
if self.__class__.add_method:
self.fn_add = getattr(self.song, self.__class__.add_method)
self.add_required = self.__class__.add_required
self.edit_required = self.__class__.edit_required
if self.__class__.remove_method:
self.fn_remove = getattr(self.song, self.__class__.remove_method)
self.public_fields = self.__class__.public_fields
def lookup(self, name, require=False):
"""
See if an object with the given name is in the collection. If 'require' is True,
raise an exception if it is not found, instead of returning None.
"""
coll = self._get_collection()
if type(coll) == dict:
for (k,v) in coll.items():
if v.name == name:
return v
else:
for k in coll:
if k.name == name:
return k
if require:
raise NotFound("\"%s\" not found" % name)
return None
def list(self):
"""
Return information about all objects in the collection - usually just the names, but this is
overridden for clips because the scene and track is used as a primary key instead of the name.
"""
coll = self._get_collection()
data = []
if type(coll) == dict:
for (k,v) in coll.items():
data.append(self._short_details(v))
else:
return [ self._short_details(x) for x in coll ]
return data
def _short_details(self, obj):
"""
Used by list to decide what information to return for each object in the collection.
Usually just returns the name except for Clip.
Use .details() for specifics.
"""
return obj.name
def details(self, name):
"""
Returns all the public field information about the object, suitable for display in a web
interface. Omits any internal state variables.
"""
obj = self.lookup(name)
if obj is None:
raise NotFound("\"%s\" not found" % name)
data = obj.to_dict()
new_data = dict()
for (k,v) in data.items():
if k in self.public_fields:
value = getattr(obj, k)
if type(value) == list:
if len(value) > 0 and hasattr(value[0], 'obj_id'):
value = [ x.name for x in value]
elif hasattr(value, 'obj_id'):
value = value.name
new_data[k] = value
self._update_details(new_data, obj)
return new_data
def _update_details(self, details, obj):
"""
This is a hook that allows a subclass to add or remove items before returning information in
.details() to the caller.
"""
pass
def _get_collection(self):
"""
This pulls the object collection out of the song object. The result could be a list or dict.
"""
return getattr(self.song, self.__class__.song_collection)
def _require_input(self, what, params):
"""
Verifies that certain required parameters are passed in.
"""
for k in what:
if params[k] is None:
raise RequiredInput("%s is required" % k)
def _ok(self):
"""
A placeholder for methods returning a consistent response when there is no information to return.
Not really meaningful at this point.
"""
return True
def _generic_add(self, name, params):
"""
Support code for adding new objects to a collection.
"""
obj = self.lookup(name)
del params['self']
del params['name']
self._require_input(self.add_required, params)
if not obj:
obj = self.__class__.object_class(name=name, **params)
self.fn_add([obj])
return self._ok()
else:
raise AlreadyExists()
def _generic_edit(self, name, params):
"""
Support code for editing existing objects in a collection.
"""
obj = self.lookup(name)
if not obj:
raise NotFound("%s not found" % name)
del params["name"]
del params["self"]
self._require_input(self.edit_required, params)
if "new_name" in params:
value = params["new_name"]
if value:
obj.name = value
del params["new_name"]
for (k,v) in params.items():
value = v
if v is not None or k in self.__class__.nullable_edits:
value = v
setattr(obj, k, value)
return self._ok()
def _generic_remove(self, name):
"""
Support code for removing objects from a collection.
"""
obj = self.lookup(name, require=True)
self.fn_remove(obj)
return self._ok()
| 33.564417 | 105 | 0.550174 |
708d7c088a8aed037f3dee9e24e215a3d70f542b | 5,499 | py | Python | .ycm_extra_conf.py | oledahle/mongo-cxx-driver | 60509b2d430b94d8e52d05502fb60e7b91c63cba | [
"Apache-2.0"
] | 934 | 2015-01-02T14:49:43.000Z | 2022-03-16T01:21:42.000Z | .ycm_extra_conf.py | oledahle/mongo-cxx-driver | 60509b2d430b94d8e52d05502fb60e7b91c63cba | [
"Apache-2.0"
] | 533 | 2015-01-07T18:45:30.000Z | 2022-03-23T00:44:26.000Z | .ycm_extra_conf.py | oledahle/mongo-cxx-driver | 60509b2d430b94d8e52d05502fb60e7b91c63cba | [
"Apache-2.0"
] | 570 | 2015-01-04T05:47:04.000Z | 2022-03-28T11:12:38.000Z | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-std=c++11',
'-x', 'c++',
'-Isrc',
'-Ibuild/src',
'-Ibuild/src/bsoncxx/third_party/EP_mnmlstc_core-prefix/src/EP_mnmlstc_core/include',
'-Isrc/third_party/catch/include',
'-I/usr/local/include/libmongoc-1.0',
'-I/usr/local/include/libbson-1.0',
'-I/usr/include'
'-I/usr/local/include',
'-isystem/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists(compilation_database_folder):
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
SOURCE_EXTENSIONS = ['.cpp', '.cxx', '.cc', '.c', '.m', '.mm']
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return list(flags)
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.h', '.hxx', '.hpp', '.hh']
def GetCompilationInfoForFile(filename):
# The compilation_commands.json file generated by CMake does not have
# entries for header files. So we do our best by asking the db for flags
# for a corresponding source file, if any. If one exists, the flags for
# that file should be good enough.
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists(replacement_file):
compilation_info = database.GetCompilationInfoForFile(
replacement_file)
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile(filename)
def FlagsForFile(filename, **kwargs):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile(filename)
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_)
try:
final_flags.remove('-stdlib=libc++')
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True
}
| 33.944444 | 115 | 0.684488 |
0bd9b662ae298bc2a82e4cabf4bc14c19965c668 | 2,012 | py | Python | precon/re_reference.py | ONSBigData/precon | 4e441a780f72c2e399478abdbc5c0059003cbd17 | [
"MIT"
] | 2 | 2020-10-15T08:48:03.000Z | 2021-03-10T15:39:30.000Z | precon/re_reference.py | ONSBigData/precon | 4e441a780f72c2e399478abdbc5c0059003cbd17 | [
"MIT"
] | 19 | 2020-10-30T15:08:41.000Z | 2021-03-30T11:01:30.000Z | precon/re_reference.py | ONSBigData/precon | 4e441a780f72c2e399478abdbc5c0059003cbd17 | [
"MIT"
] | 1 | 2021-03-17T19:44:48.000Z | 2021-03-17T19:44:48.000Z | # -*- coding: utf-8 -*-
import pandas as pd
def set_reference_period(df, period):
""" A function to re-reference an index series on a given period."""
base_mean = df[period].mean()
re_referenced = df.div(base_mean) * 100
# Fill NaNs from division with zeros
re_referenced.fillna(0, inplace=True)
return re_referenced
def set_index_range(df, start=None, end=None):
"""Edit this function to take a start and an end year."""
if start:
if start not in df.index.year.unique().astype(str).to_numpy():
raise Exception(
"start needs to be a year in the index, as a string.")
subset = df.loc[start:end]
if isinstance(df, pd.Series):
subset.iloc[0] = 100
else:
subset.iloc[0, :] = 100
return subset
def full_index_to_in_year_indices(full_index):
"""Break index down into Jan-Jan+1 segments, rebased at 100 each year.
Returns a dictionary of the in-year indices with years as keys.
"""
# Get a list of the years present in the index
index_years = full_index.resample('A').first().index.to_timestamp()
# Set the index range for each year (base=100, runs through to Jan+1)
pi_yearly = {}
for year in index_years:
end = year + pd.DateOffset(years=1)
pi_yearly[year.year] = set_index_range(full_index, start=year, end=end)
return pi_yearly
def in_year_indices_to_full_index(in_year_indices):
"""Converts a dictionary of in-year indices into a single
unchained index.
"""
full_index = pd.concat(in_year_indices).fillna(0).droplevel(0)
# Take out any Jan=100 that are not the first in the full index
jan = (full_index.index.month == 1)
not_first_year = (full_index.index.year != full_index.index[0].year)
equals_100 = full_index == 100
if isinstance(full_index, pd.DataFrame):
equals_100 = equals_100.any(axis=1)
duplicate_months = (jan & not_first_year & equals_100)
return full_index[~duplicate_months]
| 30.029851 | 79 | 0.671968 |
b803d5266df2366f44f1d8cdf40281bab285e332 | 3,462 | py | Python | run_video_output.py | vamos23467/tf-pose-estimation | 4b951e19be6e9365d72645da292d6e3a559db55a | [
"Apache-2.0"
] | null | null | null | run_video_output.py | vamos23467/tf-pose-estimation | 4b951e19be6e9365d72645da292d6e3a559db55a | [
"Apache-2.0"
] | null | null | null | run_video_output.py | vamos23467/tf-pose-estimation | 4b951e19be6e9365d72645da292d6e3a559db55a | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import time
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
logger = logging.getLogger('TfPoseEstimator-Video')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fps_time = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation Video')
parser.add_argument('--video', type=str, default='')
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
parser.add_argument('--showBG', type=bool, default=True, help='False to show skeleton only.')
args = parser.parse_args()
logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
w, h = model_wh(args.resize)
if w > 0 and h > 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(1280, 720))
cap = cv2.VideoCapture('./../drive/MyDrive/MAH04796.MP4')
fourcc = cv2.VideoWriter_fourcc('M','P','4','V')
video = cv2.VideoWriter("./../drive/MyDrive/MAH04796_convert.MP4", fourcc, 30, (w,h))
with open('./../drive/MyDrive/humans.txt', mode='w') as f:
if cap.isOpened() is False:
print("Error opening video stream or file")
while cap.isOpened():
ret_val, image = cap.read()
if ret_val:
#logger.debug('image process+')
humans = (e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio))
#print(humans)
#print(type(humans))
#print(len(humans))
#exit(0)
tmp_h = []
for human in humans:
tmp_h.append(str(human))
str_h = ";".join(tmp_h)
#print(str_h)
f.write(str_h)
f.write("\n")
#exit(0)
if not args.showBG:
image = np.zeros(image.shape)
logger.debug('postprocess+')
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
#logger.debug('show+')
#cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
#cv2.imshow('tf-pose-estimation result', image)
video.write(image)
fps_time = time.time()
if cv2.waitKey(1) == 27:
break
else:
break
cv2.destroyAllWindows()
video.release()
logger.debug('finished+')
| 39.340909 | 143 | 0.590121 |
fcddaf69a2d770a1b78534ceec28808b09d034c0 | 654 | py | Python | parser/fase2/team10/InstruccionesPL/IndicesPL/IndicePLUsing.py | Gabriel-15/tytus | fb00718bf3fcc5211a3604fba1a551f44bdc6deb | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/fase2/team10/InstruccionesPL/IndicesPL/IndicePLUsing.py | Gabriel-15/tytus | fb00718bf3fcc5211a3604fba1a551f44bdc6deb | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/fase2/team10/InstruccionesPL/IndicesPL/IndicePLUsing.py | Gabriel-15/tytus | fb00718bf3fcc5211a3604fba1a551f44bdc6deb | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from InstruccionesPL.TablaSimbolosPL.InstruccionPL import InstruccionPL
class IndicePLUsing(InstruccionPL):
def __init__(self, nombreIndice, nombreTabla , opracion, nombreCampo,tipo, strGram, linea, columna):
InstruccionPL.__init__(self, tipo, linea, columna, strGram)
self.nombreIndice= nombreIndice
self.nombreTabla = nombreTabla
self.opracion = opracion
self.nombreCampo = nombreCampo
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
arbol.setListaIndice(self)
#ejecucion de una funcion
def traducir(self, tabla, arbol):
print('trduccion') | 36.333333 | 104 | 0.697248 |
ddc37367cab899b84173f75457eb4162a876ef25 | 2,660 | py | Python | preprocessing/text_embedding.py | affjljoo3581/Job-Recommend-Competition | bb41cdd02161d97813f03eb9d23ff8c186b7da83 | [
"Apache-2.0"
] | 24 | 2022-02-15T09:04:33.000Z | 2022-03-30T05:45:50.000Z | preprocessing/text_embedding.py | affjljoo3581/Job-Recommend-Competition | bb41cdd02161d97813f03eb9d23ff8c186b7da83 | [
"Apache-2.0"
] | 2 | 2022-03-13T13:12:55.000Z | 2022-03-15T03:01:29.000Z | preprocessing/text_embedding.py | affjljoo3581/Job-Recommend-Competition | bb41cdd02161d97813f03eb9d23ff8c186b7da83 | [
"Apache-2.0"
] | 3 | 2022-02-15T14:36:02.000Z | 2022-03-21T16:24:36.000Z | import sys
import numpy as np
import torch
import tqdm
from torch.utils.data import DataLoader, TensorDataset
from transformers import AutoModel, AutoTokenizer
@torch.no_grad()
def get_text_embeddings_from_simcse(
package_dir: str, model_ckpt: str, texts: list[str], batch_size: int = 1024
) -> np.ndarray:
"""Calculate SimCSE text embeddings.
Args:
package_dir: The directory of simcse project.
model_ckpt: The model checkpoint file of the simcse model.
texts: The list of texts to be embedded.
batch_size: The number of batch. Default is `1024`.
Returns:
A collection of embedded vectors.
"""
if package_dir not in sys.path:
sys.path.insert(0, package_dir)
from data.dataloader import convert_to_tensor, example_model_setting
model, transform, device = example_model_setting(model_ckpt)
input_tensors = convert_to_tensor(texts, transform)
input_tensor_keys = sorted(input_tensors.keys())
input_tensor_values = (input_tensors[name] for name in input_tensor_keys)
dataloader = DataLoader(TensorDataset(*input_tensor_values), batch_size)
batch_text_embeddings = []
for batch_input_tensors in tqdm.tqdm(dataloader):
batch_input_tensors = dict(zip(input_tensor_keys, batch_input_tensors))
batch_text_embeddings.append(model.encode(batch_input_tensors, device))
text_embeddings = torch.cat(batch_text_embeddings, dim=0)
return text_embeddings.cpu().numpy()
@torch.no_grad()
def get_text_embeddings_from_input_embeddings(
model_name: str, texts: list[str], batch_size: int = 1024
) -> np.ndarray:
"""Calculate Transformer input embeddings.
Args:
model_name: The transformer model name.
texts: The list of texts to be embedded.
batch_size: The number of batch. Default is `1024`.
Returns:
A collection of embedded vectors.
"""
tokenizer = AutoTokenizer.from_pretrained(model_name)
embeddings = AutoModel.from_pretrained(model_name).get_input_embeddings()
text_embeddings = []
for batch_offset in tqdm.trange(0, len(texts), batch_size):
encodings = tokenizer(
texts[batch_offset : batch_offset + batch_size],
padding=True,
add_special_tokens=False,
return_tensors="pt",
)
# Average the input embedding vectors with excluding the mask tokens.
masks = encodings["attention_mask"][:, :, None]
embeds = embeddings(encodings["input_ids"])
text_embeddings.append(((embeds * masks).sum(1) / masks.sum(1)).numpy())
return np.concatenate(text_embeddings, axis=0)
| 34.102564 | 80 | 0.707519 |
62347165b1d720ec51bb3e2c84d6a96f72bee666 | 5,826 | py | Python | forte/process_manager.py | jzpang/forte | 489fb9cafba6faf5739bda935836b61b5e3d02b6 | [
"Apache-2.0"
] | 163 | 2019-11-01T19:25:40.000Z | 2022-03-30T22:49:45.000Z | forte/process_manager.py | jzpang/forte | 489fb9cafba6faf5739bda935836b61b5e3d02b6 | [
"Apache-2.0"
] | 633 | 2019-11-01T20:07:08.000Z | 2022-03-31T23:11:20.000Z | forte/process_manager.py | jzpang/forte | 489fb9cafba6faf5739bda935836b61b5e3d02b6 | [
"Apache-2.0"
] | 62 | 2019-11-01T19:41:33.000Z | 2022-03-24T11:14:21.000Z | # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from typing import List, Deque
from forte.process_job import ProcessJob, ProcessJobStatus
class ProcessManager:
r"""A pipeline level manager that manages global processing information,
such as the current running components. This is an internal class and
should only be initialized by the system.
Attributes:
pipeline_length (int): The length of the current pipeline being
executed
_queues (List[Deque[int]]): A list of queues which hold the jobs for
each processors. The size of this list is equal to pipeline
length
_current_queue_index (int): An index indicating which queue to
read the data from. A value of -1 indicates read from the reader.
_current_processor_index (int): An index indicating the
processor that executes the job
_unprocessed_queue_indices (List[int]): Each element of this list is
the index of the first UNPROCESSED element in the corresponding
queue. Length of this list equals the "pipeline_length".
If unprocessed_queue_indices = [0, 2]
- This means for the 1st queue, the first UNPROCESSED job is at
index-0. All elements from indices [0, len(queue[0]) ) are
UNPROCESSED.
- Similarly, for the 2nd queue, the first UNPROCESSED job is at
index-2. All elements from indices [2, len(queue[1])) are
UNPROCESSED
_processed_queue_indices (List [int]): Each element of this list is
the index of the last PROCESSED element in the corresponding queue.
Length of this list equals the "pipeline_length".
If processed_queue_indices = [0, 2]
- This means for the 1st queue, the last PROCESSED job is at
index-0. Only the first element in queue[0] is PROCESSED
- Similarly, for the 2nd queue, the last PROCESSED job is at
index-2. All elements from indices [0, 2] are PROCESSED
Args:
pipeline_length (int): The length of the current pipeline being
executed
"""
def __init__(self, pipeline_length):
self._pipeline_length: int = pipeline_length
self._queues: List[Deque[ProcessJob]] = []
self._current_queue_index: int = -1
self._current_processor_index: int = 0
self._unprocessed_queue_indices: List[int] = []
self._processed_queue_indices: List[int] = []
self.reset()
def reset(self):
self._queues = [deque() for _ in range(self._pipeline_length)]
self._current_queue_index = -1
self._current_processor_index = 0
self._unprocessed_queue_indices = [0] * self._pipeline_length
self._processed_queue_indices = [-1] * self._pipeline_length
@property
def current_processor_index(self) -> int:
return self._current_processor_index
@current_processor_index.setter
def current_processor_index(self, processor_index: int):
if processor_index >= len(self._queues):
raise ValueError(
f"{processor_index} exceeds the pipeline "
f"range [0, {self.pipeline_length - 1}]"
)
self._current_processor_index = processor_index
@property
def current_queue_index(self) -> int:
return self._current_queue_index
@current_queue_index.setter
def current_queue_index(self, queue_index: int):
if queue_index >= len(self._queues):
raise ValueError(
f"{queue_index} exceeds the pipeline range "
f"[0, {self.pipeline_length - 1}]"
)
self._current_queue_index = queue_index
@property
def unprocessed_queue_indices(self) -> List[int]:
return self._unprocessed_queue_indices
@property
def processed_queue_indices(self) -> List[int]:
return self._processed_queue_indices
@property
def current_queue(self) -> Deque[ProcessJob]:
return self._queues[self.current_queue_index]
@property
def pipeline_length(self) -> int:
return self._pipeline_length
def add_to_queue(self, queue_index: int, job: ProcessJob):
"""
Add a job to a particular queue.
Args:
queue_index: The queue that the job is to be added.
job: The job to be added.
Returns:
"""
if queue_index > len(self._queues):
raise ValueError(
f"Queue number {queue_index} exceeds queue "
f"size {len(self._queues)}"
)
else:
# When a job is added to a queue, it will be
# consider as unprocessed.
job.set_status(ProcessJobStatus.UNPROCESSED)
self._queues[queue_index].append(job)
def exhausted(self) -> bool:
r"""Returns True only if the last element remaining in the last queue is
a poison pack."""
return (
len(self._queues[self.pipeline_length - 1]) == 1
and self._queues[self.pipeline_length - 1][0].is_poison
)
| 36.641509 | 80 | 0.643666 |
b3bec88a0854761084641e926089b24ad33b6040 | 1,274 | py | Python | Betsy/Betsy/modules/extract_platform_annotations.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | 9 | 2017-01-13T02:38:41.000Z | 2021-04-08T00:44:39.000Z | Betsy/Betsy/modules/extract_platform_annotations.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | null | null | null | Betsy/Betsy/modules/extract_platform_annotations.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
] | 4 | 2017-01-05T16:25:25.000Z | 2019-12-12T20:07:38.000Z | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
outfile):
from genomicode import filelib
outhandle = open(outfile, 'w')
extract_sf_platform(in_data.identifier, outhandle)
outhandle.close()
filelib.assert_exists_nz(outfile)
def name_outfile(self, antecedents, user_options):
return "platform.txt"
def extract_sf_platform(filename, outhandle):
from genomicode import filelib
handle = filelib.openfh(filename)
while 1:
line = handle.readline()
if not line:
raise AssertionError, "I could not find platform"
#if line.startswith("^PLATFORM") and line.find(platform) >= 0:
# break
# Assuming only one platform per file.
if line.startswith("^PLATFORM"):
break
in_platform_table = 0
for line in handle:
if line.startswith("!platform_table_begin"):
in_platform_table = 1
elif line.startswith("!platform_table_end"):
break
elif in_platform_table:
print >>outhandle, line,
handle.close()
| 27.695652 | 72 | 0.635793 |
45902193c6022f1899f4179e29d2d99abb83a452 | 2,534 | py | Python | credentials.py | prettyflyforabeeguy/Arbibot | 7c11184bcbd105c42890ae6afcd281986a667e9b | [
"MIT"
] | null | null | null | credentials.py | prettyflyforabeeguy/Arbibot | 7c11184bcbd105c42890ae6afcd281986a667e9b | [
"MIT"
] | null | null | null | credentials.py | prettyflyforabeeguy/Arbibot | 7c11184bcbd105c42890ae6afcd281986a667e9b | [
"MIT"
] | null | null | null | # Retrieve the credentials and needed URIs from the creds.json
from datetime import datetime, timezone
import hashlib
import hmac
import time
import json
import api_client as _api
class Creds:
def __init__(self):
self.creds_file = "./config/creds.json"
self.creds_dict = {}
self.read_credsjson()
self.headers = {}
self.header2 = {}
def read_credsjson(self):
try:
with open(self.creds_file) as data_file:
creds = json.load(data_file)
self.creds_dict = creds
except Exception as e:
print(f"There was an error reading credentials from: {self.creds_file}")
def get_key(self) -> str:
return self.creds_dict["Cpatexapi_key"]
def get_secret(self) -> str:
return self.creds_dict["Cpatexapi_secret"]
def get_endpoint(self) -> str:
return self.creds_dict["Cpatexendpoint"]
def get_cmckey(self) -> str:
return self.creds_dict["CMCKey"]
def get_tgramkey(self) -> str:
return self.creds_dict["tgram_API_KEY"]
def get_timestamp(self) -> str:
apiTimeStamp = str(int(time.time() * 1000))
return apiTimeStamp
def build_authentication(self, method, uri, api_key, api_secret, query1, query2) -> str:
self.headers= {}
#cpatex_timezone = pytz.timezone("America/Argentina/ComodRivadavia")
#apiTimeStamp = str(int(datetime.datetime.now(cpatex_timezone).timestamp() * 1000)) #Create the timestamp ourselves
apiTimeStamp = _api.APIClient().timestamp("GET") * 1000 #Get the timestamp directly from cpatex.
if uri == "/api/v2/orders":
preSign = method + "|" + uri + "|" + "access_key=" + api_key + query1 + "&tonce=" + str(apiTimeStamp) + query2
#print(f"PreSIGN: {preSign}")
else:
preSign = method + "|" + uri + "|" + "access_key=" + api_key + query1 + "&tonce=" + str(apiTimeStamp)
api_signature = hmac.new(api_secret, preSign.encode(), hashlib.sha256).hexdigest()
# It appears that C-PateX doesn't require headers currently?
headers = {
'Content-Type' : 'application/x-www-form-urlencoded'
}
self.headers = headers
return api_signature, str(apiTimeStamp)
if __name__ == '__main__':
app = Creds()
#_api = APIClient()
#apikey = Creds().get_key()
#apisecret = Creds().get_secret()
#apisecret = bytes(apisecret, encoding='utf-8')
#endpoint = Creds().get_endpoint()
| 33.342105 | 123 | 0.625099 |
417b66f772a14438b2162b59ecc66d30f0b8bc91 | 4,504 | py | Python | test/algorithms/test_warm_start_qaoa.py | MartinBeseda/qiskit-optimization | 11a924bd7b7f8508149d051827fe82591a12caba | [
"Apache-2.0"
] | null | null | null | test/algorithms/test_warm_start_qaoa.py | MartinBeseda/qiskit-optimization | 11a924bd7b7f8508149d051827fe82591a12caba | [
"Apache-2.0"
] | null | null | null | test/algorithms/test_warm_start_qaoa.py | MartinBeseda/qiskit-optimization | 11a924bd7b7f8508149d051827fe82591a12caba | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test warm start QAOA optimizer. """
from test import QiskitOptimizationTestCase, requires_extra_library
import numpy as np
from docplex.mp.model import Model
from qiskit import BasicAer
from qiskit.algorithms import QAOA
from qiskit_optimization import QuadraticProgram
from qiskit_optimization.algorithms import SlsqpOptimizer
from qiskit_optimization.algorithms.goemans_williamson_optimizer import (
GoemansWilliamsonOptimizer,
)
from qiskit_optimization.algorithms.warm_start_qaoa_optimizer import (
MeanAggregator,
WarmStartQAOAOptimizer,
)
from qiskit_optimization.applications.max_cut import Maxcut
from qiskit_optimization.translators import from_docplex_mp
class TestWarmStartQAOAOptimizer(QiskitOptimizationTestCase):
"""Tests for the warm start QAOA optimizer."""
@requires_extra_library
def test_max_cut(self):
"""Basic test on the max cut problem."""
graph = np.array(
[
[0.0, 1.0, 2.0, 0.0],
[1.0, 0.0, 1.0, 0.0],
[2.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
]
)
presolver = GoemansWilliamsonOptimizer(num_cuts=10)
problem = Maxcut(graph).to_quadratic_program()
backend = BasicAer.get_backend("statevector_simulator")
qaoa = QAOA(quantum_instance=backend, reps=1)
aggregator = MeanAggregator()
optimizer = WarmStartQAOAOptimizer(
pre_solver=presolver,
relax_for_pre_solver=False,
qaoa=qaoa,
epsilon=0.25,
num_initial_solutions=10,
aggregator=aggregator,
)
result_warm = optimizer.solve(problem)
self.assertIsNotNone(result_warm)
self.assertIsNotNone(result_warm.x)
np.testing.assert_almost_equal([0, 0, 1, 0], result_warm.x, 3)
self.assertIsNotNone(result_warm.fval)
np.testing.assert_almost_equal(4, result_warm.fval, 3)
def test_constrained_binary(self):
"""Constrained binary optimization problem."""
model = Model()
v = model.binary_var(name="v")
w = model.binary_var(name="w")
# pylint:disable=invalid-name
t = model.binary_var(name="t")
model.minimize(v + w + t)
model.add_constraint(2 * v + 10 * w + t <= 3, "cons1")
model.add_constraint(v + w + t >= 2, "cons2")
problem = from_docplex_mp(model)
backend = BasicAer.get_backend("statevector_simulator")
qaoa = QAOA(quantum_instance=backend, reps=1)
aggregator = MeanAggregator()
optimizer = WarmStartQAOAOptimizer(
pre_solver=SlsqpOptimizer(),
relax_for_pre_solver=True,
qaoa=qaoa,
epsilon=0.25,
aggregator=aggregator,
)
result_warm = optimizer.solve(problem)
self.assertIsNotNone(result_warm)
self.assertIsNotNone(result_warm.x)
np.testing.assert_almost_equal([1, 0, 1], result_warm.x, 3)
self.assertIsNotNone(result_warm.fval)
np.testing.assert_almost_equal(2, result_warm.fval, 3)
def test_simple_qubo(self):
"""Test on a simple QUBO problem."""
model = Model()
# pylint:disable=invalid-name
u = model.binary_var(name="u")
v = model.binary_var(name="v")
model.minimize((u - v + 2) ** 2)
problem = from_docplex_mp(model)
backend = BasicAer.get_backend("statevector_simulator")
qaoa = QAOA(quantum_instance=backend, reps=1)
optimizer = WarmStartQAOAOptimizer(
pre_solver=SlsqpOptimizer(),
relax_for_pre_solver=True,
qaoa=qaoa,
epsilon=0.25,
)
result_warm = optimizer.solve(problem)
self.assertIsNotNone(result_warm)
self.assertIsNotNone(result_warm.x)
np.testing.assert_almost_equal([0, 1], result_warm.x, 3)
self.assertIsNotNone(result_warm.fval)
np.testing.assert_almost_equal(1, result_warm.fval, 3)
| 34.914729 | 77 | 0.657194 |
22619f220fa726cec2795343e028e7b06a184f94 | 1,985 | py | Python | GeneratorInterface/Pythia8Interface/test/Py8JetGun_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | GeneratorInterface/Pythia8Interface/test/Py8JetGun_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | GeneratorInterface/Pythia8Interface/test/Py8JetGun_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.source = cms.Source("EmptySource")
process.generator = cms.EDFilter("Pythia8JetGun",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(True),
PGunParameters = cms.PSet(
ParticleID = cms.vint32(211,-211,111,111,130),
# this defines "absolute" energy spead of particles in the jet
MinE = cms.double(0.5),
MaxE = cms.double(2.0),
# the following params define the boost
MinP = cms.double(20.0),
MaxP = cms.double(20.0),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinEta = cms.double(-2.4),
MaxEta = cms.double(2.4)
),
# no detailed pythia6 settings necessary
PythiaParameters = cms.PSet(
parameterSets = cms.vstring()
)
)
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
default = cms.untracked.PSet(
limit = cms.untracked.int32(2)
),
enable = cms.untracked.bool(True)
)
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
generator = cms.PSet(
initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.GEN = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('Py8JetGun.root')
)
process.p = cms.Path(process.generator)
process.outpath = cms.EndPath(process.GEN)
process.schedule = cms.Schedule(process.p, process.outpath)
| 28.357143 | 82 | 0.68262 |
b9dff35f62ddf4feb76c22e777e0cf76411193f3 | 16,632 | py | Python | sopel/modules/meetbot.py | RickardBremer/sopomania | bc336860e53475aea786ca5f2a067f2bf0197294 | [
"EFL-2.0"
] | null | null | null | sopel/modules/meetbot.py | RickardBremer/sopomania | bc336860e53475aea786ca5f2a067f2bf0197294 | [
"EFL-2.0"
] | null | null | null | sopel/modules/meetbot.py | RickardBremer/sopomania | bc336860e53475aea786ca5f2a067f2bf0197294 | [
"EFL-2.0"
] | null | null | null | # coding=utf-8
"""
meetbot.py - Sopel meeting logger module
Copyright © 2012, Elad Alfassa, <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
This module is an attempt to implement at least some of the functionallity of Debian's meetbot
"""
import time
import os
from sopel.config.types import (
StaticSection, FilenameAttribute, ValidatedAttribute
)
from sopel.web import quote
from sopel.modules.url import find_title
from sopel.module import example, commands, rule, priority
from sopel.tools import Ddict, Identifier
import codecs
class MeetbotSection(StaticSection):
meeting_log_path = FilenameAttribute('meeting_log_path', directory=True,
default='~/www/meetings')
"""Path to meeting logs storage directory
This should be an absolute path, accessible on a webserver."""
meeting_log_baseurl = ValidatedAttribute(
'meeting_log_baseurl',
default='http://localhost/~sopel/meetings'
)
"""Base URL for the meeting logs directory"""
def configure(config):
config.define_section('meetbot', MeetbotSection)
config.meetbot.configure_setting(
'meeting_log_path',
'Enter the directory to store logs in.'
)
config.meetbot.configure_setting(
'meeting_log_baseurl',
'Enter the base URL for the meeting logs.',
)
def setup(bot):
bot.config.define_section('meetbot', MeetbotSection)
meetings_dict = Ddict(dict) # Saves metadata about currently running meetings
"""
meetings_dict is a 2D dict.
Each meeting should have:
channel
time of start
head (can stop the meeting, plus all abilities of chairs)
chairs (can add infolines to the logs)
title
current subject
comments (what people who aren't voiced want to add)
Using channel as the meeting ID as there can't be more than one meeting in a channel at the same time.
"""
meeting_log_path = '' # To be defined on meeting start as part of sanity checks, used by logging functions so we don't have to pass them bot
meeting_log_baseurl = '' # To be defined on meeting start as part of sanity checks, used by logging functions so we don't have to pass them bot
meeting_actions = {} # A dict of channels to the actions that have been created in them. This way we can have .listactions spit them back out later on.
#Get the logfile name for the meeting in the requested channel
#Used by all logging functions
def figure_logfile_name(channel):
if meetings_dict[channel]['title'] is 'Untitled meeting':
name = 'untitled'
else:
name = meetings_dict[channel]['title']
# Real simple sluggifying. This bunch of characters isn't exhaustive, but
# whatever. It's close enough for most situations, I think.
for c in ' ./\\:*?"<>|&*`':
name = name.replace(c, '-')
timestring = time.strftime('%Y-%m-%d-%H:%M', time.gmtime(meetings_dict[channel]['start']))
filename = timestring + '_' + name
return filename
#Start HTML log
def logHTML_start(channel):
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.html', 'a', encoding='utf-8')
timestring = time.strftime('%Y-%m-%d %H:%M', time.gmtime(meetings_dict[channel]['start']))
title = '%s at %s, %s' % (meetings_dict[channel]['title'], channel, timestring)
logfile.write('<!doctype html>\n<html>\n<head>\n<meta charset="utf-8">\n<title>%TITLE%</title>\n</head>\n<body>\n<h1>%TITLE%</h1>\n'.replace('%TITLE%', title))
logfile.write('<h4>Meeting started by %s</h4><ul>\n' % meetings_dict[channel]['head'])
logfile.close()
#Write a list item in the HTML log
def logHTML_listitem(item, channel):
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.html', 'a', encoding='utf-8')
logfile.write('<li>' + item + '</li>\n')
logfile.close()
#End the HTML log
def logHTML_end(channel):
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.html', 'a', encoding='utf-8')
current_time = time.strftime('%H:%M:%S', time.gmtime())
logfile.write('</ul>\n<h4>Meeting ended at %s UTC</h4>\n' % current_time)
plainlog_url = meeting_log_baseurl + quote(channel + '/' + figure_logfile_name(channel) + '.log')
logfile.write('<a href="%s">Full log</a>' % plainlog_url)
logfile.write('\n</body>\n</html>')
logfile.close()
#Write a string to the plain text log
def logplain(item, channel):
current_time = time.strftime('%H:%M:%S', time.gmtime())
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.log', 'a', encoding='utf-8')
logfile.write('[' + current_time + '] ' + item + '\r\n')
logfile.close()
#Check if a meeting is currently running
def ismeetingrunning(channel):
try:
if meetings_dict[channel]['running']:
return True
else:
return False
except:
return False
#Check if nick is a chair or head of the meeting
def ischair(nick, channel):
try:
if nick.lower() == meetings_dict[channel]['head'] or nick.lower() in meetings_dict[channel]['chairs']:
return True
else:
return False
except:
return False
#Start meeting (also preforms all required sanity checks)
@commands('startmeeting')
@example('.startmeeting title or .startmeeting')
def startmeeting(bot, trigger):
"""
Start a meeting.
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, there is already a meeting in progress here!')
return
if trigger.is_privmsg:
bot.say('Can only start meetings in channels')
return
#Start the meeting
meetings_dict[trigger.sender]['start'] = time.time()
if not trigger.group(2):
meetings_dict[trigger.sender]['title'] = 'Untitled meeting'
else:
meetings_dict[trigger.sender]['title'] = trigger.group(2)
meetings_dict[trigger.sender]['head'] = trigger.nick.lower()
meetings_dict[trigger.sender]['running'] = True
meetings_dict[trigger.sender]['comments'] = []
global meeting_log_path
meeting_log_path = bot.config.meetbot.meeting_log_path
if not meeting_log_path.endswith('/'):
meeting_log_path = meeting_log_path + '/'
global meeting_log_baseurl
meeting_log_baseurl = bot.config.meetbot.meeting_log_baseurl
if not meeting_log_baseurl.endswith('/'):
meeting_log_baseurl = meeting_log_baseurl + '/'
if not os.path.isdir(meeting_log_path + trigger.sender):
try:
os.makedirs(meeting_log_path + trigger.sender)
except Exception:
bot.say("Can't create log directory for this channel, meeting not started!")
meetings_dict[trigger.sender] = Ddict(dict)
raise
return
#Okay, meeting started!
logplain('Meeting started by ' + trigger.nick.lower(), trigger.sender)
logHTML_start(trigger.sender)
meeting_actions[trigger.sender] = []
bot.say('Meeting started! use .action, .agreed, .info, .chairs, .subject and .comments to control the meeting. to end the meeting, type .endmeeting')
bot.say('Users without speaking permission can use .comment ' +
trigger.sender + ' followed by their comment in a PM with me to '
'vocalize themselves.')
#Change the current subject (will appear as <h3> in the HTML log)
@commands('subject')
@example('.subject roll call')
def meetingsubject(bot, trigger):
"""
Change the meeting subject.
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('what is the subject?')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
meetings_dict[trigger.sender]['current_subject'] = trigger.group(2)
logfile = codecs.open(meeting_log_path + trigger.sender + '/' + figure_logfile_name(trigger.sender) + '.html', 'a', encoding='utf-8')
logfile.write('</ul><h3>' + trigger.group(2) + '</h3><ul>')
logfile.close()
logplain('Current subject: ' + trigger.group(2) + ', (set by ' + trigger.nick + ')', trigger.sender)
bot.say('Current subject: ' + trigger.group(2))
#End the meeting
@commands('endmeeting')
@example('.endmeeting')
def endmeeting(bot, trigger):
"""
End a meeting.
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
meeting_length = time.time() - meetings_dict[trigger.sender]['start']
#TODO: Humanize time output
bot.say("Meeting ended! total meeting length %d seconds" % meeting_length)
logHTML_end(trigger.sender)
htmllog_url = meeting_log_baseurl + quote(trigger.sender + '/' + figure_logfile_name(trigger.sender) + '.html')
logplain('Meeting ended by %s, total meeting length %d seconds' % (trigger.nick, meeting_length), trigger.sender)
bot.say('Meeting minutes: ' + htmllog_url)
meetings_dict[trigger.sender] = Ddict(dict)
del meeting_actions[trigger.sender]
#Set meeting chairs (people who can control the meeting)
@commands('chairs')
@example('.chairs Tyrope Jason elad')
def chairs(bot, trigger):
"""
Set the meeting chairs.
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('Who are the chairs?')
return
if trigger.nick.lower() == meetings_dict[trigger.sender]['head']:
meetings_dict[trigger.sender]['chairs'] = trigger.group(2).lower().split(' ')
chairs_readable = trigger.group(2).lower().replace(' ', ', ')
logplain('Meeting chairs are: ' + chairs_readable, trigger.sender)
logHTML_listitem('<span style="font-weight: bold">Meeting chairs are: </span>' + chairs_readable, trigger.sender)
bot.say('Meeting chairs are: ' + chairs_readable)
else:
bot.say("Only meeting head can set chairs")
#Log action item in the HTML log
@commands('action')
@example('.action elad will develop a meetbot')
def meetingaction(bot, trigger):
"""
Log an action in the meeting log
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .action someone will do something')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
logplain('ACTION: ' + trigger.group(2), trigger.sender)
logHTML_listitem('<span style="font-weight: bold">Action: </span>' + trigger.group(2), trigger.sender)
meeting_actions[trigger.sender].append(trigger.group(2))
bot.say('ACTION: ' + trigger.group(2))
@commands('listactions')
@example('.listactions')
def listactions(bot, trigger):
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
for action in meeting_actions[trigger.sender]:
bot.say('ACTION: ' + action)
#Log agreed item in the HTML log
@commands('agreed')
@example('.agreed Bowties are cool')
def meetingagreed(bot, trigger):
"""
Log an agreement in the meeting log.
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .action someone will do something')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
logplain('AGREED: ' + trigger.group(2), trigger.sender)
logHTML_listitem('<span style="font-weight: bold">Agreed: </span>' + trigger.group(2), trigger.sender)
bot.say('AGREED: ' + trigger.group(2))
#Log link item in the HTML log
@commands('link')
@example('.link http://example.com')
def meetinglink(bot, trigger):
"""
Log a link in the meeing log.
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .action someone will do something')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
link = trigger.group(2)
if not link.startswith("http"):
link = "http://" + link
try:
title = find_title(link)
except:
title = ''
logplain('LINK: %s [%s]' % (link, title), trigger.sender)
logHTML_listitem('<a href="%s">%s</a>' % (link, title), trigger.sender)
bot.say('LINK: ' + link)
#Log informational item in the HTML log
@commands('info')
@example('.info all board members present')
def meetinginfo(bot, trigger):
"""
Log an informational item in the meeting log
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .info some informative thing')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
logplain('INFO: ' + trigger.group(2), trigger.sender)
logHTML_listitem(trigger.group(2), trigger.sender)
bot.say('INFO: ' + trigger.group(2))
#called for every single message
#Will log to plain text only
@rule('(.*)')
@priority('low')
def log_meeting(bot, trigger):
if not ismeetingrunning(trigger.sender):
return
if trigger.startswith('.endmeeting') or trigger.startswith('.chairs') or trigger.startswith('.action') or trigger.startswith('.info') or trigger.startswith('.startmeeting') or trigger.startswith('.agreed') or trigger.startswith('.link') or trigger.startswith('.subject'):
return
logplain('<' + trigger.nick + '> ' + trigger, trigger.sender)
@commands('comment')
def take_comment(bot, trigger):
"""
Log a comment, to be shown with other comments when a chair uses .comments.
Intended to allow commentary from those outside the primary group of people
in the meeting.
Used in private message only, as `.comment <#channel> <comment to add>`
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if not trigger.sender.is_nick():
return
if not trigger.group(4): # <2 arguements were given
bot.say('Usage: .comment <#channel> <comment to add>')
return
target, message = trigger.group(2).split(None, 1)
target = Identifier(target)
if not ismeetingrunning(target):
bot.say("There's not currently a meeting in that channel.")
else:
meetings_dict[trigger.group(3)]['comments'].append((trigger.nick, message))
bot.say("Your comment has been recorded. It will be shown when the"
" chairs tell me to show the comments.")
bot.msg(meetings_dict[trigger.group(3)]['head'], "A new comment has been recorded.")
@commands('comments')
def show_comments(bot, trigger):
"""
Show the comments that have been logged for this meeting with .comment.
https://github.com/sopel-irc/sopel/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
comments = meetings_dict[trigger.sender]['comments']
if comments:
msg = 'The following comments were made:'
bot.say(msg)
logplain('<%s> %s' % (bot.nick, msg), trigger.sender)
for comment in comments:
msg = '<%s> %s' % comment
bot.say(msg)
logplain('<%s> %s' % (bot.nick, msg), trigger.sender)
meetings_dict[trigger.sender]['comments'] = []
else:
bot.say('No comments have been logged.')
| 38.059497 | 275 | 0.666186 |
17c11596d76e181c7c1e809f0d3c52709de8b448 | 3,185 | py | Python | Vol1B/GMRES/gmres.py | joshualy/numerical_computing | 9f474e36fe85ae663bd20e2f2d06265d1f095173 | [
"CC-BY-3.0"
] | null | null | null | Vol1B/GMRES/gmres.py | joshualy/numerical_computing | 9f474e36fe85ae663bd20e2f2d06265d1f095173 | [
"CC-BY-3.0"
] | null | null | null | Vol1B/GMRES/gmres.py | joshualy/numerical_computing | 9f474e36fe85ae663bd20e2f2d06265d1f095173 | [
"CC-BY-3.0"
] | 1 | 2019-11-05T14:45:03.000Z | 2019-11-05T14:45:03.000Z | """
Vol I Lab __: GMRES
Name:
Date:
"""
#Problem 1: Implement the following function
def gmres(A, b, x0, k=100, tol=1e-8):
'''Calculate approximate solution of Ax=b using GMRES algorithm.
INPUTS:
A - Callable function that calculates Ax for any input vector x.
b - A NumPy array of length m.
x0 - An arbitrary initial guess.
k - Maximum number of iterations of the GMRES algorithm. Defaults to 100.
tol - Stop iterating if the residual is less than 'tol'. Defaults to 1e-8.
RETURN:
Return (y, res) where 'y' is an approximate solution to Ax=b and 'res'
is the residual.
'''
raise NotImplementedError("Problem 1 incomplete.")
#Problem 2: Implement the following two functions
def plot_gmres(A, b, x0, tol=1e-8):
'''Use the GMRES algorithm to approximate the solution to Ax=b. Plot the
eigenvalues of A and the convergence of the algorithm.
INPUTS:
A - A 2-D NumPy array of shape mxm.
b - A 1-D NumPy array of length m.
x0 - An arbitrary initial guess.
tol - Stop iterating and create the desired plots when the residual is
less than 'tol'. Defaults to 1e-8.
OUTPUT:
Follow the GMRES algorithm until the residual is less than tol, for a
maximum of m iterations. Then create the two following plots (subplots
of a single figure):
1. Plot the eigenvalues of A in the complex plane.
2. Plot the convergence of the GMRES algorithm by plotting the
iteration number on the x-axis and the residual on the y-axis.
Use a log scale on the y-axis.
'''
raise NotImplementedError("Problem 2 incomplete.")
def make_plots(m=200):
'''Create the matrix An defined in problem 2 in the manual
for n = -4, -2, -0, 2, 4. Call plot_gmres on each, with b
a vector of ones, and an initial guess x0 a vector of zeros.
Print a statement explaining how the convergence relates to
the eigenvalues.
'''
raise NotImplementedError("make_plots not yet implemented.")
#Problem 3: Implement the following two functions
def gmres_k(Amul, b, x0, k=5, tol=1E-8, restarts=50):
'''Use the GMRES(k) algorithm to approximate the solution to Ax=b.
INPUTS:
A - A Callable function that calculates Ax for any vector x.
b - A NumPy array.
x0 - An arbitrary initial guess.
k - Maximum number of iterations of the GMRES algorithm before
restarting. Defaults to 100.
tol - Stop iterating if the residual is less than 'tol'. Defaults
to 1E-8.
restarts - Maximum number of restarts. Defaults to 50.
OUTPUT:
Return (y, res) where 'y' is an approximate solution to Ax=b and 'res'
is the residual.
'''
raise NotImplementedError("Problem 3 incomplete.")
def time_gmres(m=200):
'''Time the gmres and gmres_k functions on each of the matrices
from problem 2. Let x0 be a vector of zeros or anything you like.
The results might be more dramatic with an x0 of larger magnitude.
Print your results. What do you observe?
'''
raise NotImplementedError("time_gmres not yet implemented.")
| 35.786517 | 80 | 0.669702 |
39e0b45eaefc01e65453322b327208514a6de46a | 3,404 | py | Python | test/models/test_rbm.py | pascal-vecsei/netket | 3985355234183e3f5a5a97d542a6240d490dcda2 | [
"Apache-2.0"
] | 1 | 2022-03-06T03:41:57.000Z | 2022-03-06T03:41:57.000Z | test/models/test_rbm.py | balakrish181/netket | fc96a828e1ac71a7ecd16b4b178ca689e1871958 | [
"Apache-2.0"
] | 4 | 2022-02-21T01:35:07.000Z | 2022-03-28T01:32:58.000Z | test/models/test_rbm.py | tobiaswiener/netket | ab1fc406f7765e7191fc9ebbb01f766fa4c627a2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netket as nk
import numpy as np
import jax
import jax.numpy as jnp
from jax.nn.initializers import uniform
from .test_nn import _setup_symm
import pytest
@pytest.mark.parametrize("use_hidden_bias", [True, False])
@pytest.mark.parametrize("use_visible_bias", [True, False])
@pytest.mark.parametrize("symmetries", ["trans", "autom"])
def test_RBMSymm(use_hidden_bias, use_visible_bias, symmetries):
g, hi, perms = _setup_symm(symmetries, N=8)
ma = nk.models.RBMSymm(
symmetries=perms,
alpha=4,
use_visible_bias=use_visible_bias,
use_hidden_bias=use_hidden_bias,
hidden_bias_init=uniform(),
visible_bias_init=uniform(),
)
pars = ma.init(nk.jax.PRNGKey(), hi.random_state(nk.jax.PRNGKey(), 1))
print(pars)
v = hi.random_state(jax.random.PRNGKey(1), 3)
vals = [ma.apply(pars, v[..., p]) for p in np.asarray(perms)]
for val in vals:
assert jnp.allclose(val, vals[0])
vmc = nk.VMC(
nk.operator.Ising(hi, g, h=1.0),
nk.optimizer.Sgd(0.1),
nk.sampler.MetropolisLocal(hi),
ma,
)
vmc.advance(1)
def test_RBMSymm_creation():
hi = nk.hilbert.Spin(1 / 2, N=8)
def check_init(creator):
ma = creator()
_ = ma.init(nk.jax.PRNGKey(0), hi.numbers_to_states(np.arange(3)))
perms = [[0, 1, 2, 3, 4, 5, 6, 7]]
# Test different permutation argument types
check_init(lambda: nk.models.RBMSymm(symmetries=perms))
check_init(lambda: nk.models.RBMSymm(symmetries=jnp.array(perms)))
# wrong shape
with pytest.raises(ValueError):
check_init(lambda: nk.models.RBMSymm(symmetries=perms[0]))
# init with PermutationGroup
check_init(
lambda: nk.models.RBMSymm(
symmetries=nk.graph.Chain(8).translation_group(), alpha=2
)
)
# alpha too small
with pytest.raises(ValueError):
check_init(
lambda: nk.models.RBMSymm(
symmetries=nk.graph.Hypercube(8, 2).automorphisms(), alpha=1
)
)
@pytest.mark.parametrize("use_hidden_bias", [True, False])
@pytest.mark.parametrize("use_visible_bias", [True, False])
def test_RBMMultiVal(use_hidden_bias, use_visible_bias):
N = 8
M = 3
hi = nk.hilbert.Fock(M, N)
g = nk.graph.Chain(N)
ma = nk.models.RBMMultiVal(
alpha=2,
n_classes=M + 1,
use_visible_bias=use_visible_bias,
use_hidden_bias=use_hidden_bias,
hidden_bias_init=uniform(),
visible_bias_init=uniform(),
)
_ = ma.init(nk.jax.PRNGKey(), hi.random_state(nk.jax.PRNGKey(), 1))
vmc = nk.VMC(
nk.operator.BoseHubbard(hi, g, U=1.0),
nk.optimizer.Sgd(0.1),
nk.sampler.MetropolisLocal(hi),
ma,
)
vmc.advance(1)
| 29.094017 | 76 | 0.658049 |
62f31c94abc4d349173982411e04fe55b17f315f | 300 | py | Python | abstract.py | vasmedvedev/automation | 98cdf5c577fc4dd57b2d9b7ccfd96e849394cc95 | [
"MIT"
] | null | null | null | abstract.py | vasmedvedev/automation | 98cdf5c577fc4dd57b2d9b7ccfd96e849394cc95 | [
"MIT"
] | null | null | null | abstract.py | vasmedvedev/automation | 98cdf5c577fc4dd57b2d9b7ccfd96e849394cc95 | [
"MIT"
] | null | null | null | class Proxy(object):
"""
Wrapper aroung xmlrpclib.ServerProxy with platform API methods
"""
def __init__(self, server_proxy):
"""
:param server_proxy: XMLRPC server proxy
:type server_proxy: xmlrpclib.ServerProxy
"""
self.proxy = server_proxy
| 25 | 66 | 0.63 |
13615efa3c0777524b45382ac9b6791566f230de | 8,956 | py | Python | old_envs/yahtzee_singleV5.py | tomarbeiter/yahtzee-rl | 51b09ca059740e798061fdeb31cda2b56255ca07 | [
"Apache-2.0"
] | null | null | null | old_envs/yahtzee_singleV5.py | tomarbeiter/yahtzee-rl | 51b09ca059740e798061fdeb31cda2b56255ca07 | [
"Apache-2.0"
] | null | null | null | old_envs/yahtzee_singleV5.py | tomarbeiter/yahtzee-rl | 51b09ca059740e798061fdeb31cda2b56255ca07 | [
"Apache-2.0"
] | null | null | null | from yahtzee_api.game import Game
import ast
import copy
import numpy as np
class YahtzeeSinglePlayerV5:
"""Built to work with compressed state space of agentV6."""
def __init__(self):
self.ytz = Game(1) # Initialize 1-player game of Yahtzee
def step(self, action, action_space, state_space):
"""Executes a single timestep in the environment.
Args:
action (int): specifies the action to take.
Actions 0-12 score the corresponding index
of the Player's scorecard. Actions 13-25
reroll the dice to pursue the score at
index action - 13. For example, taking action
13 would pass the list of dice indices from
the theoretical scorecard for the 1's item
to the roll function. In this way, the agent
can "freeze" the 1's it rolled and try to get more,
while still being free to roll any of the dice it
chooses on subsequent re-rolls. Action 26 rolls all dice.
See the Yahtzee API docs at
https://yahtzee-api.tomarbeiter.com for more info
about how the scorecard, dice rolling, etc. works.
action_space (list): reference list of the possible actions.
Returns:
Tuple: State (str), possible actions (list), reward (int), done flag (bool), debug (str)
"""
# Roll dice to pursue certain score
if action > 12 and action < 76:
# Reward is based on state before dice are rolled
state = self._id_state(state_space)
reward = self._make_reward(action_space[action], state_space[state], action_space)
dice_to_roll = self._translate_action(action_space[action])
self.ytz.c_player.roll(dice_to_roll)
new_state = self._id_state(state_space)
actions = self._id_actions(state_space[new_state], action_space)
return (new_state, actions, reward, False, "Dice were rolled.")
if action == 76:
self.ytz.c_player.roll([0, 0, 0, 0, 0])
state = self._id_state(state_space)
actions = self._id_actions(state_space[state], action_space)
return (state, actions, 0, False, "Dice were rolled.")
# Score
else:
state = self._id_state(state_space)
reward = self._make_reward(action_space[action], state_space[state], action_space)
self.ytz.c_player.end_turn(action)
new_state = self._id_state(state_space)
actions = self._id_actions(state_space[new_state], action_space)
# One player game, so always advance global turn
self.ytz.next_player()
# If the game is over, set Done flag
return (
new_state,
actions,
reward,
len(self.ytz.winner) != 0,
str(action) + " was scored."
)
def reset(self):
"""Starts a new game of Yahtzee in the same class instance."""
self.ytz = Game(1)
def _translate_action(self, action):
"""Translates the chosen action in step() into dice indices for the API to roll."""
indices = [0, 0, 0, 0, 0]
for i in range(5):
if self.ytz.c_player.dice[i] in action:
indices[i] == 1
return indices
def _id_actions(self, state, action_space):
"""Identify feasible actions (actions that are actually possible) from the given state."""
st = ast.literal_eval(state)
actions = []
# First get scoring possibilities based on what has already been scored:
# Little bit a of a hack since the state score list is compressed, I'm going to pick from the actual scorecard
# ~technically~ it's the same thing
for i in range(1, 14):
if self.ytz.c_player.scorecard[i - 1][2] == 0:
actions.append(i)
else:
actions.append(-1)
# Add any rolling actions that hold dice that have actually been rolled
dice_count = st[0]
for i in range(13, 76):
check = True
for j in range(len(dice_count)):
try:
# For each action, see if "x's" are present
action_space[i].index(j + 1)
except ValueError:
actions.append(-1)
check = False
break
# If we never hit a value error, then all of the dice to keep (given by action)
# are present in the dice that were rolled.
if check:
actions.append(action_space[i])
return actions
def _id_state(self, state_space):
"""Determines the current state of the game.
Compressing 3/4k, Straights, and removing chance.
This will make it much harder to chooose between them
when scoring for the agent, but it reduces the state space
from over 8 million to under 1 million. Tradeoffs.
"""
state = []
# Count how many of each die are currently rolled.
# 1's -> 6's
dice_count = copy.deepcopy([0, 0, 0, 0, 0, 0])
for i in range(5):
dice_count[self.ytz.c_player.dice[i] - 1] += 1
state.append(dice_count)
# Determine which scores have already been scored. "True" if it is
# "available to score", "False" if already scored.
# 3/4k will be True unless both are scored. Same for Straights.
scored = []
for i in range(13):
if self.ytz.c_player.scorecard[i][2] != 0:
scored.append(False)
else:
scored.append(True)
# Combine 3/4k and straights
flag1 = True
flag2 = True
if not scored[6] and not scored[7]:
flag1 = False
if not scored[9] and not scored[10]:
flag2 = False
scored.pop(6)
scored.pop(6)
scored.pop(7)
scored.pop(7)
scored.insert(6, flag1)
scored.insert(8, flag2)
scored.pop()
state.append(scored)
# Rolls remaining boolean
if self.ytz.c_player.rolls_left == 0:
state.append(False)
else:
state.append(True)
state = str(tuple(state))
return state_space.index(state)
def _make_reward(self, action, state, action_space):
"""Generates rewards.
Note that the values chosen for the top half scoring are meant
to make it relaevant in comparison to the bottom half scores.
3 or more in each top half entry gets the bonus, so that is the line
in the sand of high reward. Still want some reward for less than 3 of that type,
because it is better than 0. Just don't over incentivize scoring 2 1's rather than going
for a full house.
"""
state = ast.literal_eval(state)
# Reward for scoring the top half.
# If they score it with more than 3 of the type, get 30.
# If only 1 or 2, get 15 and 10 respectively.
if type(action) is int and action < 7:
if state[0][action - 1] >= 3:
return 30
elif state[0][action - 1] == 2:
return 15
elif state[0][action - 1] == 1:
return 10
else:
return 1
# Reward for scoring the bottom half
if type(action) is int and action >= 7 and action <= 13:
return self.ytz.c_player.t_scorecard[action - 1][0] if \
self.ytz.c_player.t_scorecard[action - 1][0] != 0 else 1
# Rolling rewards
# These are pretty specific on a case by case basis
# Trying to group rolling actions together;
# i.e. choosing to keep a single type, 2 types, 3 types, etc.
# Keeping 1 type:
if len(action) == 1:
# If the score card of the type of dice kept is unscored, or if Yahtzee is available, or a 3/4k, give reward.
return 30 if state[1][action[0] - 1] == True or state[1][9] == True or state[1][6] == True else 1
elif len(action) == 2:
# if full house is available
return 30 if state[1][7] else 1
# Only real reason to keep 3 types of dice is to go for chance
# but, our state compression got rid of chance. So, I'll just give
# a small reward to not totally disincentivize it. Not sure how this will behave.
elif len(action) == 3:
return 20
elif len(action) == 4:
# Straights
return 30 if state[1][8] else 1
elif len(action) > 4:
# Keeping one of each that we have, so clearly pursuing a straight
return 30 if state[1][8] else 1 | 43.057692 | 121 | 0.571572 |
63d7871485577b7d796c3cfeb13b6a03aa0e0e0d | 1,275 | py | Python | sdb/commands/tail.py | alan-maguire/sdb | f8ce7f8571785dc7b9e3a80200df46b80a483f58 | [
"Apache-2.0"
] | null | null | null | sdb/commands/tail.py | alan-maguire/sdb | f8ce7f8571785dc7b9e3a80200df46b80a483f58 | [
"Apache-2.0"
] | null | null | null | sdb/commands/tail.py | alan-maguire/sdb | f8ce7f8571785dc7b9e3a80200df46b80a483f58 | [
"Apache-2.0"
] | 1 | 2020-08-18T07:57:43.000Z | 2020-08-18T07:57:43.000Z | #
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=missing-docstring
import argparse
from collections import deque
from typing import Deque, Iterable
import drgn
import sdb
class Tail(sdb.Command):
# pylint: disable=too-few-public-methods
names = ["tail"]
@classmethod
def _init_parser(cls, name: str) -> argparse.ArgumentParser:
parser = super()._init_parser(name)
parser.add_argument("count", nargs="?", default=10, type=int)
return parser
def call(self, objs: Iterable[drgn.Object]) -> Iterable[drgn.Object]:
queue: Deque[drgn.Object] = deque(maxlen=self.args.count)
for obj in objs:
queue.append(obj)
for obj in queue:
yield obj
| 28.977273 | 74 | 0.702745 |
91bcb9a378056ab268122cd158264fd9fe155808 | 4,200 | py | Python | paragen/utils/data.py | godweiyang/ParaGen | 9665d1244ea38a41fc06b4e0a7f6411985e2221f | [
"Apache-2.0"
] | 50 | 2022-01-18T07:25:46.000Z | 2022-03-14T13:06:18.000Z | paragen/utils/data.py | JiangtaoFeng/ParaGen | 509334bf16e3674e009bb9dc37ecc38ae3b5c977 | [
"Apache-2.0"
] | 2 | 2022-01-19T09:36:42.000Z | 2022-02-23T07:16:02.000Z | paragen/utils/data.py | JiangtaoFeng/ParaGen | 509334bf16e3674e009bb9dc37ecc38ae3b5c977 | [
"Apache-2.0"
] | 6 | 2022-01-19T09:28:53.000Z | 2022-03-10T10:20:08.000Z | from typing import Dict, List
import json
import random
import numpy as np
from paragen.utils.io import SPACE_NORMALIZER
def split_tgt_sequence(tgt, bos, eos):
"""
Split gold target into previous tokens and prediction target.
For examples in text, `[hello, world, !] -> [<bos>, hello, world, !], [hello, world, !, <eos>]`
Args:
tgt: target sequence
bos: begin-of-sequence index
eos: end-of-sequence index
Returns:
- previous tokens
- prediction target
"""
if len(tgt[0]) > 0 and tgt[0][0] == bos and tgt[0][-1] == eos:
prev_tokens = [v[:-1] for v in tgt]
tgt = [v[1:] for v in tgt]
else:
prev_tokens = [[bos] + v for v in tgt]
tgt = [v + [eos] for v in tgt]
return tgt, prev_tokens
def reorganize(samples: List[Dict]):
"""
Transforming List[Dict] to Dict[List] by grouping with keys
Args:
- samples: a list of samples
"""
samples_ = {key: [] for key in samples[0]}
for sample in samples:
for key, val in sample.items():
samples_[key].append(val)
return samples_
def count_sample_token(sample):
"""
Count sample tokens
Args:
sample: a piece of samples
Returns:
- total token numbers
"""
if isinstance(sample, str):
return len(SPACE_NORMALIZER.split(sample))
elif isinstance(sample, list):
return sum([count_sample_token(s) for s in sample])
elif isinstance(sample, Dict):
return sum([count_sample_token(s) for s in sample.values()])
else:
return 1
def transform_data(key, data):
"""
Transform data
Args:
key:
data:
Returns:
"""
if isinstance(data[0], Dict):
return transform_table(data)
else:
return {key: data}
def transform_table(table):
"""
Unsqueeze keys aligning with values
Args:
table: table defining key-value pairs
Returns:
- unsqueezed key-value dict
"""
keys, values = [], []
for sample in table:
ks, vs = [], []
for k, vals in sample.items():
ks.extend([k for _ in vals])
vs.extend(vals)
keys.append(ks)
values.append(vs)
return {'key': keys, 'value': values}
def mask_seq(seq: List, p: float, mask='<mask>'):
"""
Randomly mask tokens in sequence
Args:
seq: original sequence
p: mask probability
mask: mask token
Returns:
- sequence with token mask
"""
seq = [mask if random.random() < p else s for s in seq]
return seq
def delete_token(seq: List, p: float):
"""
Randomly drop tokens
Args:
seq: original sequence
p: drop rate
Returns:
- sequence with randomly deleted tokens
"""
seq = [s for s in seq if random.random() > p]
return seq
def infill_text(seq: List, lam, mask='<mask>'):
"""
Mask a segment in the sequence
Args:
seq: original sequence
lam: possion lambda
mask: mask token
Returns:
- a masked sequence
"""
l = np.random.poisson(lam)
l = min(l, len(seq))
start = random.randint(0, len(seq) - l)
end = start + l
seq = seq[:start] + [mask] + seq[end:]
return seq
def permute(seq: List):
"""
Permute a sequence
Args:
seq: sequence to be shuffle
Returns:
- shuffled sequence
"""
random.shuffle(seq)
return seq
def rotate(seq: List):
"""
Rotate a sequence
Args:
seq: a sequence
Returns:
- rotated sequence
"""
idx = random.randint(0, len(seq) - 1)
seq = seq[idx:] + seq[:idx]
return seq
def possible_load_json(sample):
"""
Callback for json data
Args:
sample: data in raw format
Returns:
sample (dict): a dict of samples consisting of parallel data of different sources
"""
try:
sample = json.loads(sample)
except:
pass
finally:
return sample
def possible_eval(x):
"""
Eval a value if possible
"""
try:
return eval(x)
except:
return x
| 19.905213 | 99 | 0.568095 |
6f73b52c0a0e5b8112d50f2a6f8d35820cc1243c | 9,083 | py | Python | process/TatFeedback.py | donasaur/hiv-model | 97dc3f1461b08067331178e1038a2a21463156bb | [
"MIT"
] | null | null | null | process/TatFeedback.py | donasaur/hiv-model | 97dc3f1461b08067331178e1038a2a21463156bb | [
"MIT"
] | null | null | null | process/TatFeedback.py | donasaur/hiv-model | 97dc3f1461b08067331178e1038a2a21463156bb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This is a process class whose evolve_state method is called
at each timestep.
An instance of TatFeedback is initialized once per Simulation
with the State as input. The states 'host_factors', 'proteins',
and 'reaction_rates' are modified in this process.
At each timestep, evolve_state reads in the amount of Tat in the
nucleus, the amount of pTEFb in the nucleus, the amount of
deacetylated Tat-pTEFb complex, and the amount of acetylated
Tat-pTEFb complex.
The amount of pTEFb in the nucleus during this timestep is given
by the following equation:
pTEFb = pTEFb(from prev. timestep) + pTefb0*exp(R*(t+1))
- pTefb0*exp(R*t)
where pTefb0 is the initial pTefb amount and R is the pTEFb
doubling rate.
Tat and pTEFb moves between its complex form and its free form
in this timestep(composed of 60 second steps), and thus the
quantities of interest above vary during this timestep.
At the end of the timestep, the new amounts of Tat in the
nucleus + pTEFb in the nucleus + deacetylated Tat-pTEFb complex
+ acetylated Tat-pTEFb complex are written back to the State class.
The derived Tat description rate is also changed during this time step
and its new value is written back as well.
Summary of the biology:
This process takes into account the effect of Tat and pTEFb in
the nucleus on the transcription rate of mRNA.
"""
import numpy as np
from scipy.integrate import odeint
from mainaux.Process import Process
from mainaux.InitParamValues import *
#References:
#1. Weinberger, L.S., Burnett, J.C., Toettcher, J.E., Arkin, A.P., Schaffer, D.V. (2005). Stochastic Gene Expression in a Lentiviral Positive-Feedback Loop: HIV-1 Tat Fluctuations Drive Phenotypic Diversity. Cell 122: 169-182.
#2. Kim, H., Yin, J. (2005) In silico mutagenesis of RNA Splicing in HIV-1. Biotechnology and bioengineering 91: 877-893.
#This is a Process Class
class TatFeedback(Process):
def __init__(self, state, param_dict=None):
self.state = state
if param_dict==None:
param_dict = generate_param_dict();
#Constant parameters
self.pTEFb_DOUBLING_RATE = param_dict['pTEFb_DOUBLING_RATE']
self.pTEFb_NUC_INIT = param_dict['pTEFb_NUC_INIT']
self.RATE_TAT_pTEFb_BIND = param_dict['RATE_TAT_pTEFb_BIND'] #1/(molecules*sec) #Weinberger 2005
self.RATE_TAT_pTEFb_UNBIND = param_dict['RATE_TAT_pTEFb_UNBIND'] #1/sec #Weinberger 2005
self.RATE_TAT_pTEFb_ACETYL = param_dict['RATE_TAT_pTEFb_ACETYL'] #1/(molecules*sec) #Weinberger 2005
self.RATE_TAT_pTEFb_DEACETYL = param_dict['RATE_TAT_pTEFb_DEACETYL'] #1/sec #Weinberger 2005
self.RATE_TAT_ACT_TRANSCRIPTION = param_dict['RATE_TAT_ACT_TRANSCRIPTION'] #/sec #Weinberger 2005
# solve the system dy/dt = f(y, t)
def TatODE(self, y, t):
Tat_nuc = y[0]
pTEFb_nuc = y[1]
Tat_pTEFb_deacetyl = y[2]
Tat_pTEFb_acetyl = y[3]
#"mRNA" = y[4]
# the model equations
f0 = self.RATE_TAT_ACT_TRANSCRIPTION*Tat_pTEFb_acetyl - self.RATE_TAT_pTEFb_BIND*Tat_nuc*pTEFb_nuc
f1 = self.RATE_TAT_ACT_TRANSCRIPTION*Tat_pTEFb_acetyl - self.RATE_TAT_pTEFb_BIND*Tat_nuc*pTEFb_nuc
f2 = self.RATE_TAT_pTEFb_BIND*Tat_nuc*pTEFb_nuc - self.RATE_TAT_pTEFb_ACETYL*Tat_pTEFb_deacetyl
f3 = self.RATE_TAT_pTEFb_ACETYL*Tat_pTEFb_deacetyl - self.RATE_TAT_ACT_TRANSCRIPTION*Tat_pTEFb_acetyl
f4 = self.RATE_TAT_ACT_TRANSCRIPTION*Tat_pTEFb_acetyl
return [f0, f1, f2, f3, f4]
def ODE_discretizer(self, soln, free_Tat, pTEFb_nuc, Tat_pTEFb_deacetyl, Tat_pTEFb_acetyl):
#This function discretizes, mass balances, and ensures positive values of ODE solutions for integration with the rest of the system
#soln: ODE solution
#prev_mRNA_abundances: abundances of mRNAs before applying the ODE (last timestep)
#prev_protein_abundances: abundance of free Rev before applying the ODE (last timestep)
soln[-1,:][soln[-1,:] == .5] = 1
soln_round = np.around(soln[-1,:]) #discretize
soln_round[soln_round<0]=0 #don't allow negatives
Tat_before = free_Tat + Tat_pTEFb_deacetyl + Tat_pTEFb_acetyl
Tat_after = np.sum(soln_round[np.array([0,2,3])])
temp_counter = 0
while Tat_after != Tat_before: #mass balance (Tat)
temp_counter +=1
discrepancy = Tat_after-Tat_before # positive if Tat_after > Tat_before (Tat was created, so need to remove Tat from system)
array_of_indices_of_interest = [0,2,3]
temp_index = array_of_indices_of_interest[np.random.randint(0,3)] #randomly pick bins to adjust the discrepancy
soln_round[temp_index]=soln_round[temp_index]-discrepancy
soln_round[soln_round<0]=0
Tat_after = np.sum(soln_round[np.array([0,2,3])])
if temp_counter > 9999999999999:
print('ERROR: Error in Tat mass balance.')
break
pTEFb_after = soln_round[1]+soln_round[2]+soln_round[3]
pTEFb_before = Tat_pTEFb_deacetyl + Tat_pTEFb_acetyl + pTEFb_nuc # Keep in mind pTEFb_nuc has already been incremented; pTEFb_nuc does not represent amount from previous timestep
if pTEFb_after != pTEFb_before: #mass balance (pTEFb)...care less about this than Tat because shoudl be in abundance from Host
discrepancy = pTEFb_after - pTEFb_before
soln_round[1] = soln_round[1]-discrepancy
if soln_round[1]<0:
soln_round[1] = 0
print('ERROR: Error in pTEFb mass balance. Amt of pTEFb in nucleus went below zero')
free_Tat=soln_round[0]
pTEFb_nuc=soln_round[1]
Tat_pTEFb_deacetyl=soln_round[2]
Tat_pTEFb_acetyl=soln_round[3]
return [free_Tat, pTEFb_nuc, Tat_pTEFb_deacetyl, Tat_pTEFb_acetyl]
def evolve_state(self, timestep):
# print('The timestep is:')
# print(timestep)
#get variables
host_factor_state = self.state.get_state('host_factors')
pTEFb_nuc = host_factor_state.pTEFb_nuc
Tat_pTEFb_deacetyl = host_factor_state.Tat_pTEFb_deacetyl
Tat_pTEFb_acetyl = host_factor_state.Tat_pTEFb_acetyl
protein_state = self.state.get_state('proteins')
proteins_nuc = protein_state.proteins_nuc
reaction_rate_state = self.state.get_state('reaction_rates')
#Tat_derived_transcription_rate = reaction_rate_state.Tat_derived_transcription_rate
#evolve state
#replenish pTFEb -- exponential doubling as Tcell grows
pTEFb_nuc = pTEFb_nuc + np.around(self.pTEFb_NUC_INIT*np.exp(self.pTEFb_DOUBLING_RATE*(timestep+1))) - np.around(self.pTEFb_NUC_INIT*np.exp(self.pTEFb_DOUBLING_RATE*(timestep)))
#determine the effect of Tat feedback...dependent on the abundance of Tat in the nucleus
y0 = [proteins_nuc[2], pTEFb_nuc, Tat_pTEFb_deacetyl, Tat_pTEFb_acetyl, 0] # initial condition vector
# solve the ODEs
t_seg_Tat = np.linspace(0, 59, 60) # time grid for Tat feedback integration
soln = odeint(self.TatODE, y0, t_seg_Tat) #use if you have scipy otherwise runge kutta
#soln = matplotlib.mlab.rk4(TatODE, y0, tsegTat)
#Accounting and discretizing and mass balance
[free_Tat, pTEFb_nuc, Tat_pTEFb_deacetyl, Tat_pTEFb_acetyl] = self.ODE_discretizer(soln, proteins_nuc[2], pTEFb_nuc, Tat_pTEFb_deacetyl, Tat_pTEFb_acetyl)
proteins_nuc[2] = free_Tat
Tat_derived_transcription_rate = np.max([0, soln[-1,4]]) #allow no negatives
##NOTE here, the ODE moves items into bin 4 = "mRNA" to indicate the number of mRNA made in the given minute
#However, in the Transcription Class, mRNA cannot be made beyond a threshold MAX_TAT_ENHANCEMENT*BASAL_TRANSCRIPTION_RATE
#When the ODE moves mass out of the Tat_pTEFb_acetyl bin into the mRNA bin, it recycles back Tat and pTEFb to their free forms
#They must now re-work their ways up to the acetyl state
#This is a source of error, however, once Tat_feedback gets past this threshold, it usually stays ON, and therefore, this error is likely not associated
#with a big impact on system dynamics.
#write back parameters to state object
protein_state.protein_Nuc = proteins_nuc # Update the free Tat value
host_factor_state.pTEFb_nuc = pTEFb_nuc # Update pTEFb value in the nucleus
host_factor_state.Tat_pTEFb_deacetyl = Tat_pTEFb_deacetyl # Update the pTEFb deacetyl value
host_factor_state.Tat_pTEFb_acetyl = Tat_pTEFb_acetyl # Update the pTEFb acetyl value
reaction_rate_state.Tat_derived_transcription_rate = Tat_derived_transcription_rate
#update state to new values
#self.state.set_state('proteins', protein_state)
#self.state.set_state('host_factors', host_factor_state)
| 54.389222 | 227 | 0.703182 |
c551541385c78b134194ab0afae77eb677143ef1 | 467 | py | Python | plotly/validators/scatter3d/error_z/_visible.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/scatter3d/error_z/_visible.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/scatter3d/error_z/_visible.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='visible', parent_name='scatter3d.error_z', **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 29.1875 | 78 | 0.638116 |
c43b6b0c6f01ec48221671029bd2b87c3534ed7f | 3,423 | py | Python | tests/BlazingSQLTest/EndToEndTests/oldScripts/mainE2ELegacyTests.py | mario21ic/blazingsql | 92ed45f5af438fedc8cad82e4ef8ed3f3fb7eed6 | [
"Apache-2.0"
] | null | null | null | tests/BlazingSQLTest/EndToEndTests/oldScripts/mainE2ELegacyTests.py | mario21ic/blazingsql | 92ed45f5af438fedc8cad82e4ef8ed3f3fb7eed6 | [
"Apache-2.0"
] | null | null | null | tests/BlazingSQLTest/EndToEndTests/oldScripts/mainE2ELegacyTests.py | mario21ic/blazingsql | 92ed45f5af438fedc8cad82e4ef8ed3f3fb7eed6 | [
"Apache-2.0"
] | null | null | null | from Configuration import Settings
from Configuration import ExecutionMode
from EndToEndTests.oldScripts import hiveFileTest
from EndToEndTests.oldScripts import unsignedTypeTest
from EndToEndTests.oldScripts import columnBasisTest
from EndToEndTests.oldScripts import dateTest
from EndToEndTests.oldScripts import fileSystemHdfsTest
from EndToEndTests.oldScripts import fileSystemS3Test
from EndToEndTests.oldScripts import fileSystemGSTest
from EndToEndTests.oldScripts import loggingTest
from EndToEndTests.oldScripts import smilesTest
from EndToEndTests.oldScripts import configOptionsTest
from EndToEndTests.oldScripts import tablesFromSQL
from EndToEndTests.oldScripts import concurrentTest
def runLegacyTest(bc, dask_client, drill, spark):
targetTestGroups = Settings.data["RunSettings"]["targetTestGroups"]
nRals = Settings.data["RunSettings"]["nRals"]
dir_data_file = Settings.data["TestSettings"]["dataDirectory"]
testsWithNulls = Settings.data["RunSettings"]["testsWithNulls"]
runAllTests = (
len(targetTestGroups) == 0
) # if targetTestGroups was empty the user wants to run all the tests
if runAllTests or ("hiveFileTest" in targetTestGroups):
hiveFileTest.main(dask_client, spark, dir_data_file, bc, nRals)
if runAllTests or ("unsignedTypeTest" in targetTestGroups):
unsignedTypeTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("columnBasisTest" in targetTestGroups):
columnBasisTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("dateTest" in targetTestGroups):
dateTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
# HDFS is not working yet
# fileSystemHdfsTest.main(dask_client, drill, dir_data_file, bc)
# HDFS is not working yet
# mixedFileSystemTest.main(dask_client, drill, dir_data_file, bc)
if testsWithNulls != "true":
if Settings.execution_mode != ExecutionMode.GPUCI:
if runAllTests or ("fileSystemS3Test" in targetTestGroups):
fileSystemS3Test.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("fileSystemGSTest" in targetTestGroups):
fileSystemGSTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("loggingTest" in targetTestGroups):
loggingTest.main(dask_client, dir_data_file, bc, nRals)
# TODO re enable this test once we have the new version of dask
# https://github.com/dask/distributed/issues/4645
# https://github.com/rapidsai/cudf/issues/7773
# if runAllTests or ("smilesTest" in targetTestGroups):
# smilesTest.main(dask_client, spark, dir_data_file, bc, nRals)
if testsWithNulls == "true":
if Settings.execution_mode != ExecutionMode.GPUCI:
if runAllTests or ("tablesFromSQL" in targetTestGroups):
tablesFromSQL.main(dask_client, drill, spark, dir_data_file, bc, nRals)
# if testsWithNulls != "true":
# if runAllTests or ("concurrentTest" in targetTestGroups):
# concurrentTest.main(dask_client, drill, dir_data_file, bc, nRals)
# WARNING!!! This Test must be the last one to test ----------------------------------------------------------------
if runAllTests or ("configOptionsTest" in targetTestGroups):
configOptionsTest.main(dask_client, drill, spark, dir_data_file, bc, nRals) | 46.256757 | 120 | 0.730646 |
c5fcc439fea34210f0fe52cf80f9e6fa2f32e6a5 | 1,263 | py | Python | backend/venv/src/api/user/admin.py | AkashSDas/camps_for_champs | 1bf7e51905b5b3efc47f94ffcfde7167dace4475 | [
"MIT"
] | null | null | null | backend/venv/src/api/user/admin.py | AkashSDas/camps_for_champs | 1bf7e51905b5b3efc47f94ffcfde7167dace4475 | [
"MIT"
] | null | null | null | backend/venv/src/api/user/admin.py | AkashSDas/camps_for_champs | 1bf7e51905b5b3efc47f94ffcfde7167dace4475 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from .forms import UserAdminChangeForm, UserAdminCreationForm
# User model
User = get_user_model()
# =========================================================
# Customizing the interface of User model in the Admin Page
# =========================================================
class UserAdmin(BaseUserAdmin):
form = UserAdminChangeForm
add_form = UserAdminCreationForm
list_display = ['email', 'username', 'is_active', 'is_staff', 'is_admin']
list_filter = ['username']
fieldsets = [
[None, {'fields': ['email', 'password', ]}],
['Personal Info', {'fields': ['username', ]}],
['Permissions', {'fields': ['is_active', 'is_staff', 'is_admin']}],
]
add_fieldsets = [
[
None,
{
'classes': ['wide', ],
'fields': ['username', 'email', 'password', 'confirm_password'],
},
],
]
search_fields = ['username', 'email', ]
ordering = ['username', ]
filter_horizontal = []
# Registering User model and its interface in admin page
admin.site.register(User, UserAdmin)
| 28.066667 | 80 | 0.561362 |
08d15caf63683ffe576dc9467b0013e2c29fc7ef | 44,352 | py | Python | scripts/automation/trex_perf.py | alialnu/trex-core | ae4ab05a6215fd0a859adde40dac6afa8bf0f950 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_perf.py | alialnu/trex-core | ae4ab05a6215fd0a859adde40dac6afa8bf0f950 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_perf.py | alialnu/trex-core | ae4ab05a6215fd0a859adde40dac6afa8bf0f950 | [
"Apache-2.0"
] | null | null | null | #!/router/bin/python-2.7.4
import h_avc
import ConfigParser
import threading
import time,signal
import argparse
import sys
import os
sys.path.append(os.path.join('trex_control_plane', 'stf', 'trex_stf_lib'))
from trex_client import CTRexClient
import subprocess
from time import sleep
import signal
import textwrap
import getpass
import random
import datetime
from datetime import timedelta
import traceback
import math
import re
import termios
import errno
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
from email.mime.image import MIMEImage
from distutils.version import StrictVersion
class TrexRunException(Exception):
def __init__ (self, reason, cmd = None, std_log = None, err_log = None):
self.reason = reason
self.std_log = std_log
self.err_log = err_log
# generate the error message
self.message = "\nSummary of error:\n\n %s\n" % (reason)
if std_log:
self.message += "\nConsole Log:\n\n %s\n" % (self.std_log)
if err_log:
self.message += "\nStd Error Log:\n\n %s\n" % (self.err_log)
def __str__(self):
return self.message
############################# utility functions start #################################
def verify_glibc_version ():
x = subprocess.check_output("/usr/bin/ldd --version", shell=True)
m = re.match("^ldd \([^\)]+\) (.*)", x)
if not m:
raise Exception("Cannot determine LDD version")
current_version = m.group(1)
if StrictVersion(current_version) < StrictVersion("2.5"):
raise Exception("GNU ldd version required for graph plotting is at least 2.5, system is %s - please run simple 'find'" % current_version)
def get_median(numericValues):
theValues = sorted(numericValues)
if len(theValues) % 2 == 1:
return theValues[(len(theValues)+1)/2-1]
else:
lower = theValues[len(theValues)/2-1]
upper = theValues[len(theValues)/2]
return (float(lower + upper)) / 2
def list_to_clusters(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def cpu_histo_to_str (cpu_histo):
s = "\nCPU Samplings:\n\n"
period = 0
clusters = list(list_to_clusters(cpu_histo, 10))
for cluster in clusters:
period += 10
line = "%3s Seconds: [" % period
cluster += (10 - len(cluster)) * [None]
for x in cluster:
if (x != None):
line += "%5.1f%%, " % x
else:
line += " "
line = line[:-2] # trim the comma and space
line += " " # return the space
line += "]\n"
s += line
return s
# Terminal Manager Class
class TermMng:
def __enter__(self):
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
# copy new and remove echo
new = self.old[:]
new[3] &= ~termios.ECHO
self.tcsetattr_flags = termios.TCSAFLUSH
if hasattr(termios, 'TCSASOFT'):
self.tcsetattr_flags |= termios.TCSASOFT
termios.tcsetattr(self.fd, self.tcsetattr_flags, new)
def __exit__ (self ,type, value, traceback):
termios.tcsetattr(self.fd, self.tcsetattr_flags, self.old)
############################# utility functions stop #################################
def send_mail(send_from, send_to, subject, html_text, txt_attachments=[], images=[], server="localhost"):
assert isinstance(send_to, list)
assert isinstance(txt_attachments, list)
assert isinstance(images, list)
# create a multi part message
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg['Cc'] = "imarom@cisco.com"
# add all images to the text as embbeded images
for image in images:
html_text += '<br><img src="cid:{0}"><br>'.format(image)
fp = open(image, 'rb')
image_object = MIMEImage(fp.read())
fp.close()
image_object.add_header('Content-ID', image)
msg.attach(image_object)
# attach the main report as embedded HTML
msg.attach( MIMEText(html_text, 'html') )
# attach regualr txt files
for f in txt_attachments:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
smtp = smtplib.SMTP(server)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
# convert HTML to image - returning a image file as a string
def html2image (html_filename, image_filename):
cmd = "./phantom/phantomjs ./phantom/rasterize.js {0} {1}".format(html_filename, image_filename)
subprocess.call(cmd, shell=True)
assert os.path.exists(image_filename)
return (image_filename)
# convert results of run to a string
def run_results_to_str (results, cond_type):
output = ""
output += "M: {0:<12.6f}\n".format(results['m'])
output += "BW: {0:<12,.2f} [Mbps]\n".format(results['tx'])
output += "PPS: {0:<12,} [pkts]\n".format(int(results['total-pps']))
output += "CPU: {0:.4f} %\n".format(results['cpu_util'])
output += "Maximum Latency: {0:<12,} [usec]\n".format(int(results['maximum-latency']))
output += "Average Latency: {0:<12,} [usec]\n".format(int(results['average-latency']))
output += "Pkt Drop: {0:<12,} [pkts]\n".format(int(results['total-pkt-drop']))
output += "Condition: {0:<12} ({1})\n".format("Passed" if check_condition(cond_type, results) else "Failed", cond_type_to_str(cond_type))
return (output)
############################# classes #################################
class ErrorHandler(object):
def __init__ (self, exception, traceback):
if isinstance(exception, TrexRunException):
logger.log("\n*** Script Terminated Due To Trex Failure")
logger.log("\n********************** TRex Error - Report **************************\n")
logger.log(str(exception))
logger.flush()
elif isinstance(exception, IOError):
logger.log("\n*** Script Terminated Due To IO Error")
logger.log("\nEither Router address or the Trex config is bad or some file is missing - check traceback below")
logger.log("\n********************** IO Error - Report **************************\n")
logger.log(str(exception))
logger.log(str(traceback))
logger.flush()
else:
logger.log("\n*** Script Terminated Due To Fatal Error")
logger.log("\n********************** Internal Error - Report **************************\n")
logger.log(str(exception) + "\n")
logger.log(str(traceback))
logger.flush()
# call the handler
g_kill_cause = "error"
os.kill(os.getpid(), signal.SIGUSR1)
# simple HTML table
class HTMLTable:
def __init__ (self):
self.table_rows = []
def add_row (self, param, value):
self.table_rows.append([param, value])
def generate_table(self):
txt = '<table class="myWideTable" style="width:50%">'
txt += "<tr><th>Parameter</th><th>Results</th></tr>"
for row in self.table_rows:
txt += "<tr><td>{0}</td><td>{1}</td></tr>".format(row[0], row[1])
txt += "</table>"
return txt
# process results and dispatch it
class JobReporter:
def __init__ (self, job_summary):
self.job_summary = job_summary
pass
def __plot_results_to_str (self, plot_results):
output = "\nPlotted Points: \n\n"
for p in plot_results:
output += "BW : {0:8.2f}, ".format(p['tx'])
output += "PPS : {0:8,} ".format(int(p['total-pps']))
output += "CPU : {0:8.2f} %, ".format(p['cpu_util'])
output += "Max Latency : {0:10,}, ".format(int(p['maximum-latency']))
output += "Avg Latency : {0:10,}, ".format(int(p['average-latency']))
output += "Pkt Drop : {0:12,}, \n".format(int(p['total-pkt-drop']))
return (output + "\n")
def __summary_to_string (self):
output = ""
output += "\n-== Job Completed Successfully ==-\n\n"
output += "Job Report:\n\n"
output += "Job Name: {0}\n".format(self.job_summary['job_name'])
output += "YAML file: {0}\n".format(self.job_summary['yaml'])
output += "Job Type: {0}\n".format(self.job_summary['job_type_str'])
output += "Condition: {0}\n".format(self.job_summary['cond_name'])
output += "Job Dir: {0}\n".format(self.job_summary['job_dir'])
output += "Job Log: {0}\n".format(self.job_summary['log_filename'])
output += "Email Report: {0}\n".format(self.job_summary['email'])
output += "Job Total Time: {0}\n\n".format(self.job_summary['total_run_time'])
if (self.job_summary.get('find_results') != None):
find_results = self.job_summary['find_results']
output += ("Maximum BW Point Details:\n\n")
output += run_results_to_str(find_results, self.job_summary['cond_type'])
if (self.job_summary.get('plot_results') != None):
plot_results = self.job_summary['plot_results']
output += self.__plot_results_to_str(plot_results)
return output
# simple print to screen of the job summary
def print_summary (self):
summary = self.__summary_to_string()
logger.log(summary)
def __generate_graph_report (self, plot_results):
graph_data = str( [ [x['tx'], x['cpu_util']/100, x['maximum-latency'], x['average-latency']] for x in plot_results ] )
table_data = str( [ [x['tx'], x['total-pps'], x['cpu_util']/100, x['norm_cpu'], x['maximum-latency'], x['average-latency'], x['total-pkt-drop']] for x in plot_results ] )
with open ("graph_template.html", "r") as myfile:
data = myfile.read()
data = data.replace("!@#$template_fill_head!@#$", self.job_summary['yaml'])
data = data.replace("!@#$template_fill_graph!@#$", graph_data[1:(len(graph_data) - 1)])
data = data.replace("!@#$template_fill_table!@#$", table_data[1:(len(table_data) - 1)])
# generate HTML report
graph_filename = self.job_summary['graph_filename']
text_file = open(graph_filename, "w")
text_file.write(str(data))
text_file.close()
return graph_filename
def __generate_body_report (self):
job_setup_table = HTMLTable()
job_setup_table.add_row("User Name", self.job_summary['user'])
job_setup_table.add_row("Job Name", self.job_summary['job_name'])
job_setup_table.add_row("Job Type", self.job_summary['job_type_str'])
job_setup_table.add_row("Test Condition", self.job_summary['cond_name'])
job_setup_table.add_row("YAML File", self.job_summary['yaml'])
job_setup_table.add_row("Job Total Time", "{0}".format(self.job_summary['total_run_time']))
job_summary_table = HTMLTable()
find_results = self.job_summary['find_results']
if find_results != None:
job_summary_table.add_row("Maximum Bandwidth", "{0:,.2f} [Mbps]".format(find_results['tx']))
job_summary_table.add_row("Maximum PPS", "{0:,} [pkts]".format(int(find_results['total-pps'])))
job_summary_table.add_row("CPU Util.", "{0:.2f}%".format(find_results['cpu_util']))
job_summary_table.add_row("Maximum Latency", "{0:,} [usec]".format(int(find_results['maximum-latency'])))
job_summary_table.add_row("Average Latency", "{0:,} [usec]".format(int(find_results['average-latency'])))
job_summary_table.add_row("Total Pkt Drop", "{0:,} [pkts]".format(int(find_results['total-pkt-drop'])))
with open ("report_template.html", "r") as myfile:
data = myfile.read()
data = data.replace("!@#$template_fill_job_setup_table!@#$", job_setup_table.generate_table())
data = data.replace("!@#$template_fill_job_summary_table!@#$", job_summary_table.generate_table())
return data
# create an email report and send to the user
def send_email_report (self):
images = []
logger.log("\nCreating E-Mail Report...\n")
# generate main report
report_str = self.__generate_body_report()
# generate graph report (if exists)
plot_results = self.job_summary['plot_results']
if plot_results:
logger.log("Generating Plot Results HTML ...\n")
graph_filename = self.__generate_graph_report(plot_results)
logger.log("Converting HTML to image ...\n")
images.append(html2image(graph_filename, graph_filename + ".png"))
else:
graph_filename = None
# create email
from_addr = 'TrexReporter@cisco.com'
to_addr = []
to_addr.append(self.job_summary['email'])
to_addr.append('imarom@cisco.com')
attachments = []
attachments.append(self.job_summary['log_filename'])
logger.log("Attaching log {0}...".format(self.job_summary['log_filename']))
if graph_filename:
attachments.append(graph_filename)
logger.log("Attaching plotting report {0}...".format(graph_filename))
logger.flush()
send_mail(from_addr, to_addr, "TRex Performance Report", report_str, attachments, images)
logger.log("\nE-mail sent successfully to: " + self.job_summary['email'])
# dummy logger in case logger creation failed
class DummyLogger(object):
def __init__(self):
pass
def log(self, text, force = False, newline = True):
text_out = (text + "\n") if newline else text
sys.stdout.write(text_out)
def console(self, text, force = False, newline = True):
self.log(text, force, newline)
def flush (self):
pass
# logger object
class MyLogger(object):
def __init__(self, log_filename):
# Store the original stdout and stderr
sys.stdout.flush()
sys.stderr.flush()
self.stdout_fd = os.dup(sys.stdout.fileno())
self.devnull = os.open('/dev/null', os.O_WRONLY)
self.log_file = open(log_filename, 'w')
self.silenced = False
self.pending_log_file_prints = 0
self.active = True
def shutdown (self):
self.active = False
def reactive (self):
self.active = True
# silence all prints from stdout
def silence(self):
os.dup2(self.devnull, sys.stdout.fileno())
self.silenced = True
# restore stdout status
def restore(self):
sys.stdout.flush()
sys.stderr.flush()
# Restore normal stdout
os.dup2(self.stdout_fd, sys.stdout.fileno())
self.silenced = False
#print a message to the log (both stdout / log file)
def log(self, text, force = False, newline = True):
if not self.active:
return
self.log_file.write((text + "\n") if newline else text)
self.pending_log_file_prints += 1
if (self.pending_log_file_prints >= 10):
self.log_file.flush()
self.pending_log_file_prints = 0
self.console(text, force, newline)
# print a message to the console alone
def console(self, text, force = False, newline = True):
if not self.active:
return
_text = (text + "\n") if newline else text
# if we are silenced and not forced - go home
if self.silenced and not force:
return
if self.silenced:
os.write(self.stdout_fd, _text)
else:
sys.stdout.write(_text)
sys.stdout.flush()
# flush
def flush(self):
sys.stdout.flush()
self.log_file.flush()
def __del__(self):
os.close(self.devnull)
if self.log_file:
self.log_file.flush()
self.log_file.close()
# simple progress bar
class ProgressBar(threading.Thread):
def __init__(self, time_sec, router):
super(ProgressBar, self).__init__()
self.active = True
self.time_sec = time_sec + 15
self.router = router
def run (self):
global g_stop
col = 40
delta_for_sec = float(col) / self.time_sec
accu = 0.0
for i in range(self.time_sec):
if (self.active == False):
# print 100% - done
bar = "\r[" + ('#' * col) + "] {0:.2f} %".format(100)
logger.console(bar, force = True, newline = False)
break
if (g_stop == True):
break
sleep(1)
accu += delta_for_sec
bar = "\r[" + ('#' * int(accu)) + (' ' * (col - int(accu))) + "] {0:.2f} %".format( (accu/col) * 100 )
bar += " / Router CPU: {0:.2f} %".format(self.router.get_last_cpu_util())
logger.console(bar, force = True, newline = False)
logger.console("\r\n", force = True, newline = False)
logger.flush()
def stop (self):
self.active = False
self.join()
# global vars
g_stop = False
logger = DummyLogger()
# cleanup list is a list of callables to be run when cntrl+c is caught
cleanup_list = []
################ threads ########################
# sampler
class Sample_Thread (threading.Thread):
def __init__(self, threadID, router):
threading.Thread.__init__(self)
self.threadID = threadID
self.router = router
self.stop = False
def run(self):
self.router.clear_sampling_stats()
try:
while (self.stop==False) and (g_stop==False):
self.router.sample_stats()
time.sleep(1);
except Exception as e:
ErrorHandler(e, traceback.format_exc())
def do_stop(self):
self.stop = True
def general_cleanup_on_error ():
global g_stop
global cleanup_list
# mark all the threads to finish
g_stop = True;
# shutdown and flush the logger
logger.shutdown()
if logger:
logger.flush()
# execute the registered callables
for c in cleanup_list:
c()
# dummy wait for threads to finish (TODO: make this more smart)
time.sleep(2)
exit(-1)
# just a dummy for preventing chain calls
def signal_handler_dummy (sig_id, frame):
pass
def error_signal_handler (sig_id, frame):
# make sure no chain of calls
signal.signal(signal.SIGUSR1, signal_handler_dummy)
signal.signal(signal.SIGINT, signal_handler_dummy)
general_cleanup_on_error()
def int_signal_handler(sig_id, frame):
# make sure no chain of calls
signal.signal(signal.SIGINT, signal_handler_dummy)
signal.signal(signal.SIGUSR1, signal_handler_dummy)
logger.log("\n\nCaught Cntrl+C... Cleaning up!\n\n")
general_cleanup_on_error()
# Trex with sampling
class CTRexWithRouter:
def __init__(self, trex, trex_params):
self.trex = trex;
self.trex_params = trex_params
if self.trex_params['router_type'] == "ASR":
self.router = h_avc.ASR1k(self.trex_params['router_interface'], self.trex_params['router_port'], self.trex_params['router_password'])
elif self.trex_params['router_type'] == "ISR":
self.router = h_avc.ISR(self.trex_params['router_interface'], self.trex_params['router_port'], self.trex_params['router_password'])
else:
raise Exception("unknown router type in config file")
self.router.connect()
def get_router (self):
return self.router
def run(self, m, duration):
self.sample_thread = Sample_Thread(1, self.router)
self.sample_thread.start();
# launch trex
try:
# trex_res = self.trex.run(m, duration);
self.trex.start_trex(c = self.trex_params['trex_cores'],
m = m,
d = duration,
f = self.trex_params['trex_yaml_file'],
nc = True,
l = self.trex_params['trex_latency'],
limit_ports = self.trex_params['trex_limit_ports'])
self.trex.sample_to_run_finish(20) # collect trex-sample every 20 seconds.
except Exception:
self.sample_thread.do_stop() # signal to stop
self.sample_thread.join() # wait for it to realy stop
raise
self.sample_thread.do_stop() # signal to stop
self.sample_thread.join() # wait for it to realy stop
self.res = self.trex.get_result_obj()
results = {}
results['status'] = True
results['trex_results'] = self.res
results['avc_results'] = self.router.get_stats()
return (results)
#return(trex_res.get_status() == STATUS_OK);
# sanity checks to see run really went well
def sanity_test_run (trex_r, avc_r):
pass
#if (sum(avc_r['cpu_histo']) == 0):
#raise h_trex.TrexRunException("CPU utilization from router is zero, check connectivity")
def _trex_run (job_summary, m, duration):
trex_thread = job_summary['trex_thread']
p = ProgressBar(duration, trex_thread.get_router())
p.start()
try:
results = trex_thread.run(m, duration)
except Exception as e:
p.stop()
raise
p.stop()
if (results == None):
raise Exception("Failed to run Trex")
# fetch values
trex_r = results['trex_results']
avc_r = results['avc_results']
sanity_test_run(trex_r, avc_r)
res_dict = {}
res_dict['m'] = m
total_tx_bps = trex_r.get_last_value("trex-global.data.m_tx_bps")
res_dict['tx'] = total_tx_bps / (1000 * 1000) # EVENTUALLY CONTAINS IN MBPS (EXTRACTED IN BPS)
res_dict['cpu_util'] = avc_r['cpu_util']
if int(res_dict['cpu_util']) == 0:
res_dict['norm_cpu']=1;
else:
res_dict['norm_cpu'] = (res_dict['tx'] / res_dict['cpu_util']) * 100
res_dict['maximum-latency'] = max ( trex_r.get_max_latency().values() ) #trex_r.res['maximum-latency']
res_dict['average-latency'] = trex_r.get_avg_latency()['all'] #trex_r.res['average-latency']
logger.log(cpu_histo_to_str(avc_r['cpu_histo']))
res_dict['total-pkt-drop'] = trex_r.get_total_drops()
res_dict['expected-bps'] = trex_r.get_expected_tx_rate()['m_tx_expected_bps']
res_dict['total-pps'] = get_median( trex_r.get_value_list("trex-global.data.m_tx_pps") )#trex_r.res['total-pps']
res_dict['m_total_pkt'] = trex_r.get_last_value("trex-global.data.m_total_tx_pkts")
res_dict['latency_condition'] = job_summary['trex_params']['trex_latency_condition']
return res_dict
def trex_run (job_summary, m, duration):
res = _trex_run (job_summary, m, duration)
return res
def m_to_mbps (job_summary, m):
return (m * job_summary['base_m_unit'])
# find the correct range of M
def find_m_range (job_summary):
trex = job_summary['trex']
trex_config = job_summary['trex_params']
# if not provided - guess the correct range of bandwidth
if not job_summary['m_range']:
m_range = [0.0, 0.0]
# 1 Mbps -> 1 Gbps
LOW_TX = 1.0 * 1000 * 1000
MAX_TX = 1.0 * 1000 * 1000 * 1000
# for 10g go to 10g
if trex_config['trex_machine_type'] == "10G":
MAX_TX *= 10
# dual injection can potentially reach X2 speed
if trex_config['trex_is_dual'] == True:
MAX_TX *= 2
else:
m_range = job_summary['m_range']
LOW_TX = m_range[0] * 1000 * 1000
MAX_TX = m_range[1] * 1000 * 1000
logger.log("\nSystem Settings - Min: {0:,} Mbps / Max: {1:,} Mbps".format(LOW_TX / (1000 * 1000), MAX_TX / (1000 * 1000)))
logger.log("\nTrying to get system minimum M and maximum M...")
res_dict = trex_run(job_summary, 1, 30)
# figure out low / high M
m_range[0] = (LOW_TX / res_dict['expected-bps']) * 1
m_range[1] = (MAX_TX / res_dict['expected-bps']) * 1
# return both the m_range and the base m unit for future calculation
results = {}
results['m_range'] = m_range
results['base_m_unit'] = res_dict['expected-bps'] /(1000 * 1000)
return (results)
# calculate points between m_range[0] and m_range[1]
def calculate_plot_points (job_summary, m_range, plot_count):
cond_type = job_summary['cond_type']
delta_m = (m_range[1] - m_range[0]) / plot_count
m_current = m_range[0]
m_end = m_range[1]
logger.log("\nStarting Plot Graph Task ...\n")
logger.log("Plotting Range Is From: {0:.2f} [Mbps] To: {1:.2f} [Mbps] Over {2} Points".format(m_to_mbps(job_summary, m_range[0]),
m_to_mbps(job_summary, m_range[1]),
plot_count))
logger.log("Delta Between Points is {0:.2f} [Mbps]".format(m_to_mbps(job_summary, delta_m)))
plot_points = []
duration = 180
iter = 1
trex = job_summary['trex']
while (iter <= plot_count):
logger.log("\nPlotting Point [{0}/{1}]:\n".format(iter, plot_count))
logger.log("Estimated BW ~= {0:,.2f} [Mbps]\n".format(m_to_mbps(job_summary, m_current)))
logger.log("M = {0:.6f}".format(m_current))
logger.log("Duration = {0} seconds\n".format(duration))
res_dict = trex_run(job_summary, m_current, duration)
print_trex_results(res_dict, cond_type)
plot_points.append(dict(res_dict))
m_current += delta_m
iter = iter + 1
# last point - make sure its the maximum point
if (iter == plot_count):
m_current = m_range[1]
#print "waiting for system to stabilize ..."
#time.sleep(30);
return plot_points
def cond_type_to_str (cond_type):
return "Max Latency" if cond_type=='latency' else "Pkt Drop"
# success condition (latency or drop)
def check_condition (cond_type, res_dict):
if cond_type == 'latency':
if res_dict['maximum-latency'] < res_dict['latency_condition']:
return True
else:
return False
# drop condition is a bit more complex - it should create high latency in addition to 0.2% drop
elif cond_type == 'drop':
if (res_dict['maximum-latency'] > (res_dict['latency_condition']+2000) ) and (res_dict['total-pkt-drop'] > (0.002 * res_dict['m_total_pkt'])):
return False
else:
return True
assert(0)
def print_trex_results (res_dict, cond_type):
logger.log("\nRun Results:\n")
output = run_results_to_str(res_dict, cond_type)
logger.log(output)
######################## describe a find job ########################
class FindJob:
# init a job object with min / max
def __init__ (self, min, max, job_summary):
self.min = float(min)
self.max = float(max)
self.job_summary = job_summary
self.cond_type = job_summary['cond_type']
self.success_points = []
self.iter_num = 1
self.found = False
self.iter_duration = 300
def _distance (self):
return ( (self.max - self.min) / min(self.max, self.min) )
def time_to_end (self):
time_in_sec = (self.iters_to_end() * self.iter_duration)
return timedelta(seconds = time_in_sec)
def iters_to_end (self):
# find 2% point
ma = self.max
mi = self.min
iter = 0
while True:
dist = (ma - mi) / min(ma , mi)
if dist < 0.02:
break
if random.choice(["up", "down"]) == "down":
ma = (ma + mi) / 2
else:
mi = (ma + mi) / 2
iter += 1
return (iter)
def _cur (self):
return ( (self.min + self.max) / 2 )
def _add_success_point (self, res_dict):
self.success_points.append(res_dict.copy())
def _is_found (self):
return (self.found)
def _next_iter_duration (self):
return (self.iter_duration)
# execute iteration
def _execute (self):
# reset the found var before running
self.found = False
# run and print results
res_dict = trex_run(self.job_summary, self._cur(), self.iter_duration)
self.iter_num += 1
cur = self._cur()
if (self._distance() < 0.02):
if (check_condition(self.cond_type, res_dict)):
# distance < 2% and success - we are done
self.found = True
else:
# lower to 90% of current and retry
self.min = cur * 0.9
self.max = cur
else:
# success
if (check_condition(self.cond_type, res_dict)):
self.min = cur
else:
self.max = cur
if (check_condition(self.cond_type, res_dict)):
self._add_success_point(res_dict)
return res_dict
# find the max M before
def find_max_m (self):
res_dict = {}
while not self._is_found():
logger.log("\n-> Starting Find Iteration #{0}\n".format(self.iter_num))
logger.log("Estimated BW ~= {0:,.2f} [Mbps]".format(m_to_mbps(self.job_summary, self._cur())))
logger.log("M = {0:.6f}".format(self._cur()))
logger.log("Duration = {0} seconds".format(self._next_iter_duration()))
logger.log("Current BW Range = {0:,.2f} [Mbps] / {1:,.2f} [Mbps]".format(m_to_mbps(self.job_summary, self.min), m_to_mbps(self.job_summary, self.max)))
logger.log("Est. Iterations Left = {0} Iterations".format(self.iters_to_end()))
logger.log("Est. Time Left = {0}\n".format(self.time_to_end()))
res_dict = self._execute()
print_trex_results(res_dict, self.cond_type)
find_results = res_dict.copy()
find_results['max_m'] = self._cur()
return (find_results)
######################## describe a plot job ########################
class PlotJob:
def __init__(self, findjob):
self.job_summary = findjob.job_summary
self.plot_points = list(findjob.success_points)
self.plot_points.sort(key = lambda item:item['tx'])
def plot (self, duration = 300):
return self.plot_points
# add points if needed
#iter = 0
#for point in self.success_points:
#iter += 1
#logger.log("\nPlotting Point [{0}/{1}]:\n".format(iter, self.plot_count))
#logger.log("Estimated BW ~= {0:,.2f} [Mbps]\n".format(m_to_mbps(self.job_summary, point['m'])))
#logger.log("M = {0:.6f}".format(point['m']))
#logger.log("Duration = {0} seconds\n".format(duration))
#res_dict = trex_run(self.job_summary, point['m'], duration)
#print_trex_results(res_dict, self.job_summary['cond_type'])
#self.plot_points.append(dict(res_dict))
#self.plot_points = list(self.success_points)
#print self.plot_points
#self.plot_points.sort(key = lambda item:item['m'])
#print self.plot_points
#return self.plot_points
def generate_job_id ():
return (str(int(random.getrandbits(32))))
def print_header ():
logger.log("--== TRex Performance Tool v1.0 (2014) ==--")
# print startup summary
def log_startup_summary (job_summary):
trex = job_summary['trex']
trex_config = job_summary['trex_params']
logger.log("\nWork Request Details:\n")
logger.log("Setup Details:\n")
logger.log("TRex Config File: {0}".format(job_summary['config_file']))
logger.log("Machine Name: {0}".format(trex_config['trex_name']))
logger.log("TRex Type: {0}".format(trex_config['trex_machine_type']))
logger.log("TRex Dual Int. Tx: {0}".format(trex_config['trex_is_dual']))
logger.log("Router Interface: {0}".format(trex_config['router_interface']))
logger.log("\nJob Details:\n")
logger.log("Job Name: {0}".format(job_summary['job_name']))
logger.log("YAML file: {0}".format(job_summary['yaml']))
logger.log("Job Type: {0}".format(job_summary['job_type_str']))
logger.log("Condition Type: {0}".format(job_summary['cond_name']))
logger.log("Job Log: {0}".format(job_summary['log_filename']))
logger.log("Email Report: {0}".format(job_summary['email']))
# logger.log("\nTrex Command Used:\n{0}".format(trex.build_cmd(1, 10)))
def load_trex_config_params (filename, yaml_file):
config = {}
parser = ConfigParser.ConfigParser()
try:
parser.read(filename)
config['trex_name'] = parser.get("trex", "machine_name")
config['trex_port'] = parser.get("trex", "machine_port")
config['trex_hisory_size'] = parser.getint("trex", "history_size")
config['trex_latency_condition'] = parser.getint("trex", "latency_condition")
config['trex_yaml_file'] = yaml_file
# support legacy data
config['trex_latency'] = parser.getint("trex", "latency")
config['trex_limit_ports'] = parser.getint("trex", "limit_ports")
config['trex_cores'] = parser.getint("trex", "cores")
config['trex_machine_type'] = parser.get("trex", "machine_type")
config['trex_is_dual'] = parser.getboolean("trex", "is_dual")
# optional Trex parameters
if parser.has_option("trex", "config_file"):
config['trex_config_file'] = parser.get("trex", "config_file")
else:
config['trex_config_file'] = None
if parser.has_option("trex", "misc_params"):
config['trex_misc_params'] = parser.get("trex", "misc_params")
else:
config['trex_misc_params'] = None
# router section
if parser.has_option("router", "port"):
config['router_port'] = parser.get("router", "port")
else:
# simple telnet port
config['router_port'] = 23
config['router_interface'] = parser.get("router", "interface")
config['router_password'] = parser.get("router", "password")
config['router_type'] = parser.get("router", "type")
except Exception as inst:
raise TrexRunException("\nBad configuration file: '{0}'\n\n{1}".format(filename, inst))
return config
def prepare_for_run (job_summary):
global logger
# generate unique id
job_summary['job_id'] = generate_job_id()
job_summary['job_dir'] = "trex_job_{0}".format(job_summary['job_id'])
job_summary['start_time'] = datetime.datetime.now()
if not job_summary['email']:
job_summary['user'] = getpass.getuser()
job_summary['email'] = "{0}@cisco.com".format(job_summary['user'])
# create dir for reports
try:
job_summary['job_dir'] = os.path.abspath( os.path.join(os.getcwd(), 'logs', job_summary['job_dir']) )
print(job_summary['job_dir'])
os.makedirs( job_summary['job_dir'] )
except OSError as err:
if err.errno == errno.EACCES:
# fall back. try creating the dir name at /tmp path
job_summary['job_dir'] = os.path.join("/tmp/", "trex_job_{0}".format(job_summary['job_id']) )
os.makedirs(job_summary['job_dir'])
job_summary['log_filename'] = os.path.join(job_summary['job_dir'], "trex_log_{0}.txt".format(job_summary['job_id']))
job_summary['graph_filename'] = os.path.join(job_summary['job_dir'], "trex_graph_{0}.html".format(job_summary['job_id']))
# init logger
logger = MyLogger(job_summary['log_filename'])
# mark those as not populated yet
job_summary['find_results'] = None
job_summary['plot_results'] = None
# create trex client instance
trex_params = load_trex_config_params(job_summary['config_file'],job_summary['yaml'])
trex = CTRexClient(trex_host = trex_params['trex_name'],
max_history_size = trex_params['trex_hisory_size'],
trex_daemon_port = trex_params['trex_port'])
job_summary['trex'] = trex
job_summary['trex_params'] = trex_params
# create trex task thread
job_summary['trex_thread'] = CTRexWithRouter(trex, trex_params);
# in case of an error we need to call the remote cleanup
cleanup_list.append(trex.stop_trex)
# signal handler
signal.signal(signal.SIGINT, int_signal_handler)
signal.signal(signal.SIGUSR1, error_signal_handler)
def after_run (job_summary):
job_summary['total_run_time'] = datetime.datetime.now() - job_summary['start_time']
reporter = JobReporter(job_summary)
reporter.print_summary()
reporter.send_email_report()
def launch (job_summary):
prepare_for_run(job_summary)
print_header()
log_startup_summary(job_summary)
# find the correct M range if not provided
range_results = find_m_range(job_summary)
job_summary['base_m_unit'] = range_results['base_m_unit']
if job_summary['m_range']:
m_range = job_summary['m_range']
else:
m_range = range_results['m_range']
logger.log("\nJob Bandwidth Working Range:\n")
logger.log("Min M = {0:.6f} / {1:,.2f} [Mbps] \nMax M = {2:.6f} / {3:,.2f} [Mbps]".format(m_range[0], m_to_mbps(job_summary, m_range[0]), m_range[1], m_to_mbps(job_summary, m_range[1])))
# job time
findjob = FindJob(m_range[0], m_range[1], job_summary)
job_summary['find_results'] = findjob.find_max_m()
if job_summary['job_type'] == "all":
# plot points to graph
plotjob = PlotJob(findjob)
job_summary['plot_results'] = plotjob.plot()
after_run(job_summary)
# populate the fields for run
def populate_fields (job_summary, args):
job_summary['config_file'] = args.config_file
job_summary['job_type'] = args.job
job_summary['cond_type'] = args.cond_type
job_summary['yaml'] = args.yaml
if args.n:
job_summary['job_name'] = args.n
else:
job_summary['job_name'] = "Nameless"
# did the user provided an email
if args.e:
job_summary['email'] = args.e
else:
job_summary['email'] = None
# did the user provide a range ?
if args.m:
job_summary['m_range'] = args.m
else:
job_summary['m_range'] = None
# some pretty shows
job_summary['cond_name'] = 'Drop Pkt' if (args.cond_type == 'drop') else 'High Latency'
if args.job == "find":
job_summary['job_type_str'] = "Find Max BW"
elif args.job == "plot":
job_summary['job_type_str'] = "Plot Graph"
else:
job_summary['job_type_str'] = "Find Max BW & Plot Graph"
if args.job != "find":
verify_glibc_version()
# verify file exists for argparse
def is_valid_file (parser, err_msg, filename):
if not os.path.exists(filename):
parser.error("{0}: '{1}'".format(err_msg, filename))
else:
return (filename) # return an open file handle
def entry ():
job_summary = {}
parser = argparse.ArgumentParser()
parser.add_argument("-n", help="Job Name",
type = str)
parser.add_argument("-m", help="M Range [default: auto calcuation]",
nargs = 2,
type = float)
parser.add_argument("-e", help="E-Mail for report [default: whoami@cisco.com]",
type = str)
parser.add_argument("-c", "--cfg", dest = "config_file", required = True,
help = "Configuration File For Trex/Router Pair",
type = lambda x: is_valid_file(parser, "config file does not exists",x))
parser.add_argument("job", help = "Job type",
type = str,
choices = ['find', 'plot', 'all'])
parser.add_argument("cond_type", help="type of failure condition",
type = str,
choices = ['latency','drop'])
parser.add_argument("-f", "--yaml", dest = "yaml", required = True,
help="YAML file to use", type = str)
args = parser.parse_args()
with TermMng():
try:
populate_fields(job_summary, args)
launch(job_summary)
except Exception as e:
ErrorHandler(e, traceback.format_exc())
logger.log("\nReport bugs to imarom@cisco.com\n")
g_stop = True
def dummy_test ():
job_summary = {}
find_results = {}
job_summary['config_file'] = 'config/trex01-1g.cfg'
job_summary['yaml'] = 'dummy.yaml'
job_summary['email'] = 'imarom@cisco.com'
job_summary['job_name'] = 'test'
job_summary['job_type_str'] = 'test'
prepare_for_run(job_summary)
time.sleep(2)
job_summary['yaml'] = 'dummy.yaml'
job_summary['job_type'] = 'find'
job_summary['cond_name'] = 'Drop'
job_summary['cond_type'] = 'drop'
job_summary['job_id']= 94817231
find_results['tx'] = 210.23
find_results['m'] = 1.292812
find_results['total-pps'] = 1000
find_results['cpu_util'] = 74.0
find_results['maximum-latency'] = 4892
find_results['average-latency'] = 201
find_results['total-pkt-drop'] = 0
findjob = FindJob(1,1,job_summary)
plotjob = PlotJob(findjob)
job_summary['plot_results'] = plotjob.plot()
job_summary['find_results'] = find_results
job_summary['plot_results'] = [{'cpu_util': 2.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 999980.0, 'average-latency': 85.0, 'tx': 0.00207*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 221.0},
{'cpu_util': 8.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 48500000.0, 'average-latency': 87.0, 'tx': 0.05005*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 279.0},
{'cpu_util': 14.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 95990000.0, 'average-latency': 92.0, 'tx': 0.09806*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 273.0},
{'cpu_util': 20.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 143490000.0, 'average-latency': 95.0, 'tx': 0.14613*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 271.0},
{'cpu_util': 25.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 190980000.0, 'average-latency': 97.0, 'tx': 0.1933*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 302.0},
{'cpu_util': 31.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 238480000.0, 'average-latency': 98.0, 'tx': 0.24213*1000, 'total-pkt-drop': 1.0, 'maximum-latency': 292.0},
{'cpu_util': 37.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 285970000.0, 'average-latency': 99.0, 'tx': 0.29011*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 344.0},
{'cpu_util': 43.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 333470000.0, 'average-latency': 100.0, 'tx': 0.3382*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 351.0},
{'cpu_util': 48.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 380970000.0, 'average-latency': 100.0, 'tx': 0.38595*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 342.0},
{'cpu_util': 54.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 428460000.0, 'average-latency': 19852.0, 'tx': 0.43438*1000, 'total-pkt-drop': 1826229.0, 'maximum-latency': 25344.0}]
after_run(job_summary)
if __name__ == "__main__":
entry ()
| 35.005525 | 223 | 0.59605 |
aead9dc73ed063e1c5865040eaa2652b26aa3ad3 | 5,571 | py | Python | basicsr/data/transforms.py | RuijiaoSun/BasicSR | b60162e9a0f17c63b87fce36092d08ab81304ab3 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-08-07T02:15:31.000Z | 2021-09-09T02:52:15.000Z | basicsr/data/transforms.py | kuijiang0802/BasicSR | 5c757162b348a09d236e00c2cc04463c0a8bba45 | [
"Apache-2.0",
"MIT"
] | null | null | null | basicsr/data/transforms.py | kuijiang0802/BasicSR | 5c757162b348a09d236e00c2cc04463c0a8bba45 | [
"Apache-2.0",
"MIT"
] | null | null | null | import cv2
import random
def mod_crop(img, scale):
"""Mod crop images, used during testing.
Args:
img (ndarray): Input image.
scale (int): Scale factor.
Returns:
ndarray: Result image.
"""
img = img.copy()
if img.ndim in (2, 3):
h, w = img.shape[0], img.shape[1]
h_remainder, w_remainder = h % scale, w % scale
img = img[:h - h_remainder, :w - w_remainder, ...]
else:
raise ValueError(f'Wrong img ndim: {img.ndim}.')
return img
def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path):
"""Paired random crop.
It crops lists of lq and gt images with corresponding locations.
Args:
img_gts (list[ndarray] | ndarray): GT images. Note that all images
should have the same shape. If the input is an ndarray, it will
be transformed to a list containing itself.
img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
should have the same shape. If the input is an ndarray, it will
be transformed to a list containing itself.
gt_patch_size (int): GT patch size.
scale (int): Scale factor.
gt_path (str): Path to ground-truth.
Returns:
list[ndarray] | ndarray: GT images and LQ images. If returned results
only have one element, just return ndarray.
"""
if not isinstance(img_gts, list):
img_gts = [img_gts]
if not isinstance(img_lqs, list):
img_lqs = [img_lqs]
h_lq, w_lq, _ = img_lqs[0].shape
h_gt, w_gt, _ = img_gts[0].shape
lq_patch_size = gt_patch_size // scale
if h_gt != h_lq * scale or w_gt != w_lq * scale:
raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
f'multiplication of LQ ({h_lq}, {w_lq}).')
if h_lq < lq_patch_size or w_lq < lq_patch_size:
raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
f'({lq_patch_size}, {lq_patch_size}). '
f'Please remove {gt_path}.')
# randomly choose top and left coordinates for lq patch
top = random.randint(0, h_lq - lq_patch_size)
left = random.randint(0, w_lq - lq_patch_size)
# crop lq patch
img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]
# crop corresponding gt patch
top_gt, left_gt = int(top * scale), int(left * scale)
img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]
if len(img_gts) == 1:
img_gts = img_gts[0]
if len(img_lqs) == 1:
img_lqs = img_lqs[0]
return img_gts, img_lqs
def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):
"""Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
We use vertical flip and transpose for rotation implementation.
All the images in the list use the same augmentation.
Args:
imgs (list[ndarray] | ndarray): Images to be augmented. If the input
is an ndarray, it will be transformed to a list.
hflip (bool): Horizontal flip. Default: True.
rotation (bool): Ratotation. Default: True.
flows (list[ndarray]: Flows to be augmented. If the input is an
ndarray, it will be transformed to a list.
Dimension is (h, w, 2). Default: None.
return_status (bool): Return the status of flip and rotation.
Default: False.
Returns:
list[ndarray] | ndarray: Augmented images and flows. If returned
results only have one element, just return ndarray.
"""
hflip = hflip and random.random() < 0.5
vflip = rotation and random.random() < 0.5
rot90 = rotation and random.random() < 0.5
def _augment(img):
if hflip: # horizontal
cv2.flip(img, 1, img)
if vflip: # vertical
cv2.flip(img, 0, img)
if rot90:
img = img.transpose(1, 0, 2)
return img
def _augment_flow(flow):
if hflip: # horizontal
cv2.flip(flow, 1, flow)
flow[:, :, 0] *= -1
if vflip: # vertical
cv2.flip(flow, 0, flow)
flow[:, :, 1] *= -1
if rot90:
flow = flow.transpose(1, 0, 2)
flow = flow[:, :, [1, 0]]
return flow
if not isinstance(imgs, list):
imgs = [imgs]
imgs = [_augment(img) for img in imgs]
if len(imgs) == 1:
imgs = imgs[0]
if flows is not None:
if not isinstance(flows, list):
flows = [flows]
flows = [_augment_flow(flow) for flow in flows]
if len(flows) == 1:
flows = flows[0]
return imgs, flows
else:
if return_status:
return imgs, (hflip, vflip, rot90)
else:
return imgs
def img_rotate(img, angle, center=None, scale=1.0):
"""Rotate image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees. Positive values mean
counter-clockwise rotation.
center (tuple[int]): Rotation center. If the center is None,
initialize it as the center of the image. Default: None.
scale (float): Isotropic scale factor. Default: 1.0.
"""
(h, w) = img.shape[:2]
if center is None:
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, scale)
rotated_img = cv2.warpAffine(img, matrix, (w, h))
return rotated_img
| 33.560241 | 103 | 0.591635 |
66bb4fb986f69b2d491f15868e0fe6d89ebddf80 | 845 | py | Python | bitmovin_api_sdk/analytics/insights/organizations/organizations_api.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/analytics/insights/organizations/organizations_api.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/analytics/insights/organizations/organizations_api.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.analytics.insights.organizations.settings.settings_api import SettingsApi
class OrganizationsApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(OrganizationsApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.settings = SettingsApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
| 30.178571 | 95 | 0.688757 |
19de75d35fef9d4e5727b85a571e6a8c67bf9022 | 3,196 | py | Python | desihigh/prep/prep_data.py | AlbertoRosado1/desihigh | a9f9d78c0c7605a704ae8008633f7aa9f641f7e4 | [
"BSD-3-Clause"
] | 22 | 2021-01-29T05:34:05.000Z | 2022-03-19T19:02:07.000Z | desihigh/prep/prep_data.py | AlbertoRosado1/desihigh | a9f9d78c0c7605a704ae8008633f7aa9f641f7e4 | [
"BSD-3-Clause"
] | 34 | 2021-01-18T02:22:52.000Z | 2022-02-18T15:17:42.000Z | desihigh/prep/prep_data.py | AlbertoRosado1/desihigh | a9f9d78c0c7605a704ae8008633f7aa9f641f7e4 | [
"BSD-3-Clause"
] | 28 | 2021-02-01T02:02:25.000Z | 2022-02-25T18:15:00.000Z | import astropy.io.fits as fits
import numpy as np
import pylab as pl
from astropy.table import Table, join, vstack
from scipy.ndimage.filters import gaussian_filter
from desitarget.cmx import cmx_targetmask
# Coma on petal 0 of 70510.
tiles = {'mws': 66003, 'bgs': 66003, 'elg': 67230, 'lrg': 68002, 'qso': 68002}
allids = []
for tracer, band in zip(['mws', 'bgs', 'elg', 'lrg', 'qso'], ['B', 'B', 'Z', 'Z', 'Z']):
zbest = Table.read('../../../andes/zbest-0-{}-20200315.fits'.format(tiles[tracer]))
coadd = fits.open('../../../andes/coadd-0-{}-20200315.fits'.format(tiles[tracer]))
assert np.all(coadd['FIBERMAP'].data['TARGETID'] == zbest['TARGETID'])
tinfo = Table(coadd['FIBERMAP'].data)['TARGETID', 'FLUX_G', 'FLUX_R', 'FLUX_Z', 'CMX_TARGET', 'TARGET_RA', 'TARGET_DEC']
zbest = join(zbest, tinfo, join_type='left', keys='TARGETID')
if tracer == 'mws':
zbest = zbest[(zbest['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_MWS')) != 0]
zbest = zbest[(zbest['SPECTYPE'] == 'STAR')]
elif tracer == 'bgs':
zbest = zbest[(zbest['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0]
zbest = zbest[(zbest['SPECTYPE'] == 'GALAXY')]
elif tracer in ['elg', 'lrg']:
zbest = zbest[(zbest['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_{}'.format(tracer.upper()))) != 0]
zbest = zbest[(zbest['SPECTYPE'] == 'GALAXY')]
else:
zbest = zbest[(zbest['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_QSO')) != 0]
zbest = zbest[(zbest['SPECTYPE'] == 'QSO')]
dChs = zbest['DELTACHI2']
rank = np.argsort(dChs)
lcut = np.percentile(dChs[rank], 60)
hcut = np.percentile(dChs[rank], 95)
cut = (dChs[rank] > lcut) & (dChs[rank] < hcut)
zs = zbest[rank][cut]
zs.sort('TARGETID')
for x in ['NPIXELS', 'NUMEXP', 'NUMTILE', 'NCOEFF']:
del zs[x]
assert np.all(zs['ZWARN'] == 0)
# Limit number of rows.
zs = zs[:5]
print('\n\n')
print(zs)
zs.write('../student_andes/zbest-{}-{}-20200315.fits'.format(tracer, tiles[tracer]), format='fits', overwrite=True)
tids = zs['TARGETID']
allids += tids.tolist()
isin = np.isin(coadd['FIBERMAP'].data['TARGETID'], tids)
assert np.all(coadd['FIBERMAP'].data['TARGETID'][isin] == tids)
wave = []
flux = []
for arm in ['B', 'R', 'Z']:
wave += coadd['{:s}_WAVELENGTH'.format(arm)].data.tolist()
flux += coadd['{:s}_FLUX'.format(arm)].data[isin].T.tolist()
wave = np.array(wave)
rank = np.argsort(wave)
wave = wave[rank]
flux = np.array(flux)
flux = flux[rank, :]
for i, x in enumerate(flux.T):
flux[:,i] = gaussian_filter(x, 3)
# pl.clf()
# pl.plot(wave, flux, lw=1.0)
# pl.show()
result = Table()
result['WAVELENGTH'] = wave
for i, x in enumerate(flux.T):
result['TARGET{:d}'.format(tids[i])] = flux[:,i]
# print(result)
result.write('../student_andes/coadd-{}-{}-20200315.fits'.format(tracer, tiles[tracer]), format='fits', overwrite=True)
allids = np.array(allids)
np.savetxt('../student_andes/all_ids.txt', allids, fmt='%d')
print('\n\nDone.\n\n')
| 29.592593 | 123 | 0.592929 |
9247de22b6ef0b72ec5b1f6be429473622f2f09a | 570 | py | Python | website/archive/binaries/mac/src/globplot/biopython-1.50/Bio/Writer.py | homiak/jabaws | f9c48c1311a07bffd99866f3d70a82e889f10493 | [
"Apache-2.0"
] | 2 | 2016-05-09T04:20:06.000Z | 2017-03-07T10:25:53.000Z | website/archive/binaries/mac/src/globplot/biopython-1.50/Bio/Writer.py | homiak/jabaws | f9c48c1311a07bffd99866f3d70a82e889f10493 | [
"Apache-2.0"
] | null | null | null | website/archive/binaries/mac/src/globplot/biopython-1.50/Bio/Writer.py | homiak/jabaws | f9c48c1311a07bffd99866f3d70a82e889f10493 | [
"Apache-2.0"
] | 1 | 2019-08-19T22:05:14.000Z | 2019-08-19T22:05:14.000Z | """Part of an old unused and undocumented sequence writing framework (DEPRECATED)."""
import warnings
warnings.warn("Bio.Writer and Bio.writer.* are deprecated. If you do use"\
+" these modules, please get in touch via the mailing list or"\
+" bugzilla to avoid their permanent removal from Biopython.", \
DeprecationWarning)
class Writer:
def __init__(self, outfile):
self.outfile = outfile
def writeHeader(self):
pass
def write(self, record):
pass
def writeFooter(self):
pass
| 31.666667 | 85 | 0.64386 |
bbc1533dfe80d66256486b5b58e09dd921ab83f8 | 2,047 | py | Python | generated/nifake/setup.py | imranadamjee/nimi-python | 1931d17326e13556c46b1cdeb58b7bd36c57b1f3 | [
"MIT"
] | null | null | null | generated/nifake/setup.py | imranadamjee/nimi-python | 1931d17326e13556c46b1cdeb58b7bd36c57b1f3 | [
"MIT"
] | null | null | null | generated/nifake/setup.py | imranadamjee/nimi-python | 1931d17326e13556c46b1cdeb58b7bd36c57b1f3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file was generated
from setuptools.command.test import test as test_command
from setuptools import setup
class PyTest(test_command):
def finalize_options(self):
test_command.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
pytest.main(self.test_args)
pypi_name = 'nifake'
def read_contents(file_to_read):
with open(file_to_read, 'r') as f:
return f.read()
setup(
name=pypi_name,
zip_safe=True,
version='1.3.4.dev0',
description='NI-FAKE Python API',
long_description=read_contents('README.rst'),
long_description_content_type='text/x-rst',
author='NI',
author_email="opensource@ni.com",
url="https://github.com/ni/nimi-python",
maintainer="NI",
maintainer_email="opensource@ni.com",
keywords=['nifake'],
license='MIT',
include_package_data=True,
packages=['nifake'],
install_requires=[
'enum34;python_version<"3.4"',
'singledispatch;python_version<"3.4"',
'hightime>=0.2.0',
'nitclk',
],
setup_requires=['pytest-runner', ],
tests_require=['pytest'],
test_suite='tests',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Manufacturing",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: System :: Hardware :: Hardware Drivers"
],
cmdclass={'test': PyTest},
package_data={pypi_name: ['VERSION']},
)
| 28.041096 | 70 | 0.621397 |
fefbe072dae79cd5bb4ab22b788fa841fa67986a | 11,603 | py | Python | v1.0.0.test/toontown/uberdog/TTGameServicesManagerUD.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 4 | 2019-07-01T15:46:43.000Z | 2021-07-23T16:26:48.000Z | v1.0.0.test/toontown/uberdog/TTGameServicesManagerUD.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 1 | 2019-06-29T03:40:05.000Z | 2021-06-13T01:15:16.000Z | v1.0.0.test/toontown/uberdog/TTGameServicesManagerUD.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 4 | 2019-07-28T21:18:46.000Z | 2021-02-25T06:37:25.000Z | from otp.uberdog.GameServicesManagerUD import *
from toontown.makeatoon.NameGenerator import NameGenerator
from toontown.toon.ToonDNA import ToonDNA
from toontown.toonbase import TTLocalizer
def judgeName(name):
return True
class SetNamePatternOperation(AvatarOperation):
notify = DirectNotifyGlobal.directNotify.newCategory('SetNamePatternOperation')
postAccountState = 'RetrieveAvatar'
def __init__(self, gameServicesManager, target):
AvatarOperation.__init__(self, gameServicesManager, target)
self.avId = None
self.pattern = None
return
def enterStart(self, avId, pattern):
self.avId = avId
self.pattern = pattern
self.demand('RetrieveAccount')
def enterRetrieveAvatar(self):
if self.avId and self.avId not in self.avList:
self.demand('Kill', 'Tried to name an avatar not in the account!')
return
self.gameServicesManager.air.dbInterface.queryObject(self.gameServicesManager.air.dbId, self.avId, self.__handleAvatar)
def __handleAvatar(self, dclass, fields):
if dclass != self.gameServicesManager.air.dclassesByName['DistributedToonUD']:
self.demand('Kill', "One of the account's avatars is invalid!")
return
if fields['WishNameState'][0] != 'OPEN':
self.demand('Kill', 'Avatar is not in a nameable state!')
self.demand('SetName')
def enterSetName(self):
parts = []
for p, f in self.pattern:
part = self.gameServicesManager.nameGenerator.nameDictionary.get(p, ('',
''))[1]
if f:
part = part[:1].upper() + part[1:]
else:
part = part.lower()
parts.append(part)
parts[2] += parts.pop(3)
while '' in parts:
parts.remove('')
name = (' ').join(parts)
self.gameServicesManager.air.dbInterface.updateObject(self.gameServicesManager.air.dbId, self.avId, self.gameServicesManager.air.dclassesByName['DistributedToonUD'], {'WishNameState': ('LOCKED', ), 'WishName': ('', ),
'setName': (
name,)})
self.gameServicesManager.air.writeServerEvent('avatar-named', self.avId, name)
self.gameServicesManager.sendUpdateToAccountId(self.target, 'namePatternResponse', [self.avId, 1])
self.demand('Off')
class SetNameTypedOperation(AvatarOperation):
notify = DirectNotifyGlobal.directNotify.newCategory('SetNameTypedOperation')
postAccountState = 'RetrieveAvatar'
def __init__(self, gameServicesManager, target):
AvatarOperation.__init__(self, gameServicesManager, target)
self.avId = None
self.name = None
return
def enterStart(self, avId, name):
self.avId = avId
self.name = name
if self.avId:
self.demand('RetrieveAccount')
return
self.demand('JudgeName')
def enterRetrieveAvatar(self):
if self.avId and self.avId not in self.avList:
self.demand('Kill', 'Tried to name an avatar not in the account!')
return
self.gameServicesManager.air.dbInterface.queryObject(self.gameServicesManager.air.dbId, self.avId, self.__handleAvatar)
def __handleAvatar(self, dclass, fields):
if dclass != self.gameServicesManager.air.dclassesByName['DistributedToonUD']:
self.demand('Kill', "One of the account's avatars is invalid!")
return
if fields['WishNameState'][0] != 'OPEN':
self.demand('Kill', 'Avatar is not in a nameable state!')
self.demand('JudgeName')
def enterJudgeName(self):
status = judgeName(self.name)
tempAutoAcceptBool = True
if self.avId and status:
if tempAutoAcceptBool:
self.gameServicesManager.air.dbInterface.updateObject(self.gameServicesManager.air.dbId, self.avId, self.gameServicesManager.air.dclassesByName['DistributedToonUD'], {'WishNameState': ('LOCKED', ), 'WishName': ('', ),
'setName': (
self.name,)})
else:
self.gameServicesManager.air.dbInterface.updateObject(self.gameServicesManager.air.dbId, self.avId, self.gameServicesManager.air.dclassesByName['DistributedToonUD'], {'WishNameState': ('PENDING', ), 'WishName': (
self.name,)})
if self.avId:
if tempAutoAcceptBool:
self.gameServicesManager.air.writeServerEvent('avatar-name-accepted', self.avId, self.name)
else:
self.gameServicesManager.air.writeServerEvent('avatar-wish-name', self.avId, self.name)
if status and tempAutoAcceptBool:
status = 2
self.gameServicesManager.sendUpdateToAccountId(self.target, 'nameTypedResponse', [self.avId, status])
self.demand('Off')
class CreateAvatarOperation(GameOperation):
notify = DirectNotifyGlobal.directNotify.newCategory('CreateAvatarOperation')
def __init__(self, gameServicesManager, target):
GameOperation.__init__(self, gameServicesManager, target)
self.index = None
self.dna = None
self.name = None
return
def enterStart(self, dna, name, index):
if index >= 6:
self.demand('Kill', 'Invalid index specified!')
return
if not ToonDNA().isValidNetString(dna):
self.demand('Kill', 'Invalid DNA specified!')
return
self.index = index
self.dna = dna
self.name = name
self.demand('RetrieveAccount')
def enterRetrieveAccount(self):
self.gameServicesManager.air.dbInterface.queryObject(self.gameServicesManager.air.dbId, self.target, self.__handleRetrieve)
def __handleRetrieve(self, dclass, fields):
if dclass != self.gameServicesManager.air.dclassesByName['AccountUD']:
self.demand('Kill', 'Your account object (%s) was not found in the database!' % dclass)
return
self.account = fields
self.avList = self.account['ACCOUNT_AV_SET']
self.avList = self.avList[:6]
self.avList += [0] * (6 - len(self.avList))
if self.avList[self.index]:
self.demand('Kill', 'This avatar slot is already taken by another avatar!')
return
self.demand('CreateAvatar')
def enterCreateAvatar(self):
dna = ToonDNA()
dna.makeFromNetString(self.dna)
colorString = TTLocalizer.NumToColor[dna.headColor]
animalType = TTLocalizer.AnimalToSpecies[dna.getAnimal()]
name = (' ').join((colorString, animalType))
if self.name != '':
toonFields = {'setName': (self.name,), 'WishNameState': ('LOCKED', ), 'WishName': ('', ),
'setDNAString': (
self.dna,),
'setDISLid': (
self.target,),
'setNametagStyle': (100, )}
else:
toonFields = {'setName': (name,), 'WishNameState': ('OPEN', ),
'WishName': ('', ),
'setDNAString': (
self.dna,),
'setDISLid': (
self.target,),
'setNametagStyle': (100, )}
self.gameServicesManager.air.dbInterface.createObject(self.gameServicesManager.air.dbId, self.gameServicesManager.air.dclassesByName['DistributedToonUD'], toonFields, self.__handleCreate)
def __handleCreate(self, avId):
if not avId:
self.demand('Kill', 'Database failed to create the new avatar object!')
return
self.avId = avId
self.demand('StoreAvatar')
def enterStoreAvatar(self):
self.avList[self.index] = self.avId
self.gameServicesManager.air.dbInterface.updateObject(self.gameServicesManager.air.dbId, self.target, self.gameServicesManager.air.dclassesByName['AccountUD'], {'ACCOUNT_AV_SET': self.avList}, {'ACCOUNT_AV_SET': self.account['ACCOUNT_AV_SET']}, self.__handleStoreAvatar)
def __handleStoreAvatar(self, fields):
if fields:
self.demand('Kill', 'Database failed to associate the new avatar to your account!')
return
self.gameServicesManager.air.writeServerEvent('avatar-created', self.avId, self.target, self.dna.encode('hex'), self.index)
self.gameServicesManager.sendUpdateToAccountId(self.target, 'createAvatarResponse', [self.avId])
self.demand('Off')
class AcknowledgeNameOperation(AvatarOperation):
notify = DirectNotifyGlobal.directNotify.newCategory('AcknowledgeNameFSM')
postAccountState = 'GetTargetAvatar'
def __init__(self, gameServicesManager, target):
AvatarOperation.__init__(self, gameServicesManager, target)
self.avId = None
return
def enterStart(self, avId):
self.avId = avId
self.demand('RetrieveAccount')
def enterGetTargetAvatar(self):
if self.avId not in self.avList:
self.demand('Kill', 'Tried to acknowledge name on an avatar not in the account!')
return
self.gameServicesManager.air.dbInterface.queryObject(self.gameServicesManager.air.dbId, self.avId, self.__handleAvatar)
def __handleAvatar(self, dclass, fields):
if dclass != self.gameServicesManager.air.dclassesByName['DistributedToonUD']:
self.demand('Kill', "One of the account's avatars is invalid!")
return
wishNameState = fields['WishNameState'][0]
wishName = fields['WishName'][0]
name = fields['setName'][0]
if wishNameState == 'APPROVED':
wishNameState = 'LOCKED'
name = wishName
wishName = ''
elif wishNameState == 'REJECTED':
wishNameState = 'OPEN'
wishName = ''
else:
self.demand('Kill', 'Tried to acknowledge name on an avatar in invalid state (%s) !' % wishNameState)
return
self.gameServicesManager.air.dbInterface.updateObject(self.gameServicesManager.air.dbId, self.avId, self.gameServicesManager.air.dclassesByName['DistributedToonUD'], {'WishNameState': (wishNameState,), 'WishName': (
wishName,),
'setName': (
name,)}, {'WishNameState': fields['WishNameState'], 'WishName': fields['WishName'],
'setName': fields['setName']})
self.gameServicesManager.sendUpdateToAccountId(self.target, 'acknowledgeAvatarNameResponse', [])
self.demand('Off')
class TTGameServicesManagerUD(GameServicesManagerUD):
notify = DirectNotifyGlobal.directNotify.newCategory('TTGameServicesManagerUD')
avatarDclass = 'DistributedToonUD'
def __init__(self, air):
GameServicesManagerUD.__init__(self, air)
self.nameGenerator = None
return
def announceGenerate(self):
GameServicesManagerUD.announceGenerate(self)
self.nameGenerator = NameGenerator()
def setNamePattern(self, avId, p1, f1, p2, f2, p3, f3, p4, f4):
self.runOperation(SetNamePatternOperation, avId, [(p1, f1), (p2, f2),
(
p3, f3), (p4, f4)])
def setNameTyped(self, avId, name):
self.runOperation(SetNameTypedOperation, avId, name)
def createAvatar(self, dna, name, index):
self.runOperation(CreateAvatarOperation, dna, name, index)
def acknowledgeAvatarName(self, avId):
self.runOperation(AcknowledgeNameOperation, avId) | 43.294776 | 278 | 0.633715 |
31642be85a92db77a7c7e0f5e138b323ce3d5838 | 51 | py | Python | salesforce_oauth2/__init__.py | RealtyShares/salesforce-oauth2 | 2e5f9a6deaa6b8acf556ca8bd968359f36a9cdbc | [
"Apache-2.0"
] | 26 | 2015-01-08T13:39:23.000Z | 2020-09-03T19:28:41.000Z | salesforce_oauth2/__init__.py | RealtyShares/salesforce-oauth2 | 2e5f9a6deaa6b8acf556ca8bd968359f36a9cdbc | [
"Apache-2.0"
] | null | null | null | salesforce_oauth2/__init__.py | RealtyShares/salesforce-oauth2 | 2e5f9a6deaa6b8acf556ca8bd968359f36a9cdbc | [
"Apache-2.0"
] | 14 | 2015-03-14T17:19:03.000Z | 2021-06-06T15:13:07.000Z | from salesforce_oauth2.auth import SalesforceOAuth2 | 51 | 51 | 0.921569 |
dd5de8146eb76e0b1ffc904f960c1faec7baf533 | 1,889 | py | Python | core/GraphDataBlock.py | giuseppefutia/GraphTSNE | 88d0ad97fe0d79b34ae9af3867f82a0d96f58bd1 | [
"MIT"
] | 120 | 2019-04-17T03:43:55.000Z | 2022-02-20T03:44:58.000Z | core/GraphDataBlock.py | giuseppefutia/GraphTSNE | 88d0ad97fe0d79b34ae9af3867f82a0d96f58bd1 | [
"MIT"
] | 1 | 2020-04-20T21:21:51.000Z | 2022-01-27T12:49:39.000Z | core/GraphDataBlock.py | giuseppefutia/GraphTSNE | 88d0ad97fe0d79b34ae9af3867f82a0d96f58bd1 | [
"MIT"
] | 16 | 2019-04-21T02:45:37.000Z | 2022-03-18T03:05:31.000Z | import numpy as np
import scipy.sparse as sp
from sklearn import manifold
import torch
class GraphDataBlock(object):
"""
Attributes:
edge_to_starting_vertex (scipy coo matrix): matrix of size E x V mapping edges to start vertices
edge_to_ending_vertex (scipy coo matrix): matrix of size E x V mapping edges to end vertices
inputs (np.arrray): data feature matrix of size n x f
labels (np,array): data class label matrix of size n x 1
adj_matrix (scipy coo matrix): adjacency matrix of size n x n
original indices (np.array): indices of data points within the original dataset
precomputed_path_matrix (scipy csr matrix)
"""
def __init__(self, X, labels, W=None):
# Convert to torch if numpy array
if sp.issparse(X):
X = X.toarray()
if type(X) is np.ndarray:
X = torch.from_numpy(X).type(torch.FloatTensor)
# Get affinity matrix
if W is None:
embedder = manifold.SpectralEmbedding(n_components=2)
W = embedder._get_affinity_matrix(X)
W = sp.coo_matrix(W) # sparse matrix
# Get edge information
nb_edges = W.nnz
nb_vertices = W.shape[0]
self.edge_to_starting_vertex = sp.coo_matrix((np.ones(nb_edges), (np.arange(nb_edges), W.row)),
shape=(nb_edges, nb_vertices))
self.edge_to_ending_vertex = sp.coo_matrix((np.ones(nb_edges), (np.arange(nb_edges), W.col)),
shape=(nb_edges, nb_vertices))
# Save as attributes
self.inputs = X # data matrix
self.labels = labels # labels
self.adj_matrix = W # affinity matrix
# Placeholders
self.original_indices = None
self.precomputed_path_matrix = None
| 38.55102 | 104 | 0.60667 |
2cd63c9d794496c2722049f8dd6e01b83b01bff5 | 1,166 | py | Python | pyps/gwinstek.py | LevyHe/PyUds | 5d9297578790996d2d83c8fd13eb2b335cc38e4b | [
"MIT"
] | 2 | 2021-03-30T02:02:39.000Z | 2021-04-09T11:00:30.000Z | pyps/gwinstek.py | xinkunlong/PyUds | 5d9297578790996d2d83c8fd13eb2b335cc38e4b | [
"MIT"
] | 1 | 2020-12-03T14:23:11.000Z | 2020-12-03T14:23:11.000Z | pyps/gwinstek.py | xinkunlong/PyUds | 5d9297578790996d2d83c8fd13eb2b335cc38e4b | [
"MIT"
] | 4 | 2021-03-30T02:02:41.000Z | 2021-12-24T03:00:07.000Z | '''
Created on 16 jun 2014
@author: Anders Gertz
'''
import serial
import time
import pyps
class PSP2010(pyps.base.Base):
'''
API for PSP-2010 and equivalents.
'''
def __init__(self, port, baud=2400):
self.sleeptime = 0.5
self.ser = None
self.ser = serial.Serial(port, baud, timeout=1, xonxoff=False)
self.lastsent = time.clock() - self.sleeptime
def __del__(self):
if self.ser is not None and self.ser.isOpen():
self.ser.close()
def setv(self, volts):
volts = max(0.0, min(19.9, volts))
self.tx("SV %05.2f" % volts)
def seti(self, amps):
amps = max(0.0, min(9.99, amps))
self.tx("SI %.2f" % amps)
def setonoff(self, on):
self.tx("KOE" if on else "KOD")
def tx(self, command):
s = self.lastsent + self.sleeptime - time.clock()
if s > 0:
time.sleep(s)
self.ser.write(command + '\r')
self.lastsent = time.clock()
if __name__ == '__main__':
p = PSP2010('COM1')
p.setonoff(False)
p.setv(13)
p.setv(12)
p.setonoff(True)
| 23.32 | 71 | 0.538593 |
30cb6f3dd66229cbe84a8ea977fe7b95c36e9d40 | 36,032 | py | Python | torchdrug/data/molecule.py | jspisak/torchdrug | e02cd7d16bd1a3e5ee1e60efd35e2937b6b39cf7 | [
"Apache-2.0"
] | 2 | 2022-01-02T10:54:15.000Z | 2022-01-13T11:09:27.000Z | torchdrug/data/molecule.py | jspisak/torchdrug | e02cd7d16bd1a3e5ee1e60efd35e2937b6b39cf7 | [
"Apache-2.0"
] | null | null | null | torchdrug/data/molecule.py | jspisak/torchdrug | e02cd7d16bd1a3e5ee1e60efd35e2937b6b39cf7 | [
"Apache-2.0"
] | 1 | 2022-01-02T10:54:19.000Z | 2022-01-02T10:54:19.000Z | import math
import warnings
from matplotlib import pyplot as plt
from rdkit import Chem, RDLogger
from rdkit.Chem.Scaffolds import MurckoScaffold
import torch
from torch_scatter import scatter_add, scatter_min, scatter_max
from torchdrug import utils
from torchdrug.data import constant, Graph, PackedGraph
from torchdrug.core import Registry as R
from torchdrug.data.rdkit import draw
plt.switch_backend("agg")
class Molecule(Graph):
"""
Molecule graph with chemical features.
Parameters:
edge_list (array_like, optional): list of edges of shape :math:`(|E|, 3)`.
Each tuple is (node_in, node_out, bond_type).
atom_type (array_like, optional): atom types of shape :math:`(|V|,)`
bond_type (array_like, optional): bond types of shape :math:`(|E|,)`
formal_charge (array_like, optional): formal charges of shape :math:`(|V|,)`
explicit_hs (array_like, optional): number of explicit hydrogens of shape :math:`(|V|,)`
chiral_tag (array_like, optional): chirality tags of shape :math:`(|V|,)`
radical_electrons (array_like, optional): number of radical electrons of shape :math:`(|V|,)`
atom_map (array_likeb optional): atom mappings of shape :math:`(|V|,)`
bond_stereo (array_like, optional): bond stereochem of shape :math:`(|E|,)`
stereo_atoms (array_like, optional): ids of stereo atoms of shape :math:`(|E|,)`
"""
bond2id = {"SINGLE": 0, "DOUBLE": 1, "TRIPLE": 2, "AROMATIC": 3}
atom2valence = {1: 1, 5: 3, 6: 4, 7: 3, 8: 2, 9: 1, 14: 4, 15: 5, 16: 6, 17: 1, 35: 1, 53: 7}
bond2valence = [1, 2, 3, 1.5]
id2bond = {v: k for k, v in bond2id.items()}
empty_mol = Chem.MolFromSmiles("")
dummy_mol = Chem.MolFromSmiles("CC")
dummy_atom = dummy_mol.GetAtomWithIdx(0)
dummy_bond = dummy_mol.GetBondWithIdx(0)
def __init__(self, edge_list=None, atom_type=None, bond_type=None, formal_charge=None, explicit_hs=None,
chiral_tag=None, radical_electrons=None, atom_map=None, bond_stereo=None, stereo_atoms=None,
**kwargs):
if "num_relation" not in kwargs:
kwargs["num_relation"] = len(self.bond2id)
super(Molecule, self).__init__(edge_list=edge_list, **kwargs)
atom_type, bond_type = self._standarize_atom_bond(atom_type, bond_type)
formal_charge = self._standarize_attribute(formal_charge, self.num_node)
explicit_hs = self._standarize_attribute(explicit_hs, self.num_node)
chiral_tag = self._standarize_attribute(chiral_tag, self.num_node)
radical_electrons = self._standarize_attribute(radical_electrons, self.num_node)
atom_map = self._standarize_attribute(atom_map, self.num_node)
bond_stereo = self._standarize_attribute(bond_stereo, self.num_edge)
stereo_atoms = self._standarize_attribute(stereo_atoms, (self.num_edge, 2))
with self.node():
self.atom_type = atom_type
self.formal_charge = formal_charge
self.explicit_hs = explicit_hs
self.chiral_tag = chiral_tag
self.radical_electrons = radical_electrons
self.atom_map = atom_map
with self.edge():
self.bond_type = bond_type
self.bond_stereo = bond_stereo
self.stereo_atoms = stereo_atoms
def _standarize_atom_bond(self, atom_type, bond_type):
if atom_type is None:
raise ValueError("`atom_type` should be provided")
if bond_type is None:
raise ValueError("`bond_type` should be provided")
atom_type = torch.as_tensor(atom_type, dtype=torch.long, device=self.device)
bond_type = torch.as_tensor(bond_type, dtype=torch.long, device=self.device)
return atom_type, bond_type
def _standarize_attribute(self, attribute, size):
if attribute is not None:
attribute = torch.as_tensor(attribute, dtype=torch.long, device=self.device)
else:
if isinstance(size, torch.Tensor):
size = size.tolist()
attribute = torch.zeros(size, dtype=torch.long, device=self.device)
return attribute
@classmethod
def _standarize_option(cls, option):
if option is None:
option = []
elif isinstance(option, str):
option = [option]
return option
def _check_no_stereo(self):
if (self.bond_stereo > 0).any():
warnings.warn("Try to apply masks on molecules with stereo bonds. This may produce invalid molecules. "
"To discard stereo information, call `mol.bond_stereo[:] = 0` before applying masks.")
def _maybe_num_node(self, edge_list):
if len(edge_list):
return edge_list[:, :2].max().item() + 1
else:
return 0
@classmethod
def from_smiles(cls, smiles, node_feature="default", edge_feature="default", graph_feature=None,
with_hydrogen=False, kekulize=False):
"""
Create a molecule from a SMILES string.
Parameters:
smiles (str): SMILES string
node_feature (str or list of str, optional): node features to extract
edge_feature (str or list of str, optional): edge features to extract
graph_feature (str or list of str, optional): graph features to extract
with_hydrogen (bool, optional): store hydrogens in the molecule graph.
By default, hydrogens are dropped
kekulize (bool, optional): convert aromatic bonds to single/double bonds.
Note this only affects the relation in ``edge_list``.
For ``bond_type``, aromatic bonds are always stored explicitly.
By default, aromatic bonds are stored.
"""
mol = Chem.MolFromSmiles(smiles)
if mol is None:
raise ValueError("Invalid SMILES `%s`" % smiles)
return cls.from_molecule(mol, node_feature, edge_feature, graph_feature, with_hydrogen, kekulize)
@classmethod
def from_molecule(cls, mol, node_feature="default", edge_feature="default", graph_feature=None,
with_hydrogen=False, kekulize=False):
"""
Create a molecule from a RDKit object.
Parameters:
mol (rdchem.Mol): molecule
node_feature (str or list of str, optional): node features to extract
edge_feature (str or list of str, optional): edge features to extract
graph_feature (str or list of str, optional): graph features to extract
with_hydrogen (bool, optional): store hydrogens in the molecule graph.
By default, hydrogens are dropped
kekulize (bool, optional): convert aromatic bonds to single/double bonds.
Note this only affects the relation in ``edge_list``.
For ``bond_type``, aromatic bonds are always stored explicitly.
By default, aromatic bonds are stored.
"""
if mol is None:
mol = cls.empty_mol
if with_hydrogen:
mol = Chem.AddHs(mol)
if kekulize:
Chem.Kekulize(mol)
node_feature = cls._standarize_option(node_feature)
edge_feature = cls._standarize_option(edge_feature)
graph_feature = cls._standarize_option(graph_feature)
atom_type = []
formal_charge = []
explicit_hs = []
chiral_tag = []
radical_electrons = []
atom_map = []
_node_feature = []
atoms = [mol.GetAtomWithIdx(i) for i in range(mol.GetNumAtoms())] + [cls.dummy_atom]
for atom in atoms:
atom_type.append(atom.GetAtomicNum())
formal_charge.append(atom.GetFormalCharge())
explicit_hs.append(atom.GetNumExplicitHs())
chiral_tag.append(atom.GetChiralTag())
radical_electrons.append(atom.GetNumRadicalElectrons())
atom_map.append(atom.GetAtomMapNum())
feature = []
for name in node_feature:
func = R.get("features.atom.%s" % name)
feature += func(atom)
_node_feature.append(feature)
atom_type = torch.tensor(atom_type)[:-1]
atom_map = torch.tensor(atom_map)[:-1]
formal_charge = torch.tensor(formal_charge)[:-1]
explicit_hs = torch.tensor(explicit_hs)[:-1]
chiral_tag = torch.tensor(chiral_tag)[:-1]
radical_electrons = torch.tensor(radical_electrons)[:-1]
if len(node_feature) > 0:
_node_feature = torch.tensor(_node_feature)[:-1]
else:
_node_feature = None
edge_list = []
bond_type = []
bond_stereo = []
stereo_atoms = []
_edge_feature = []
bonds = [mol.GetBondWithIdx(i) for i in range(mol.GetNumBonds())] + [cls.dummy_bond]
for bond in bonds:
type = str(bond.GetBondType())
stereo = bond.GetStereo()
if stereo:
_atoms = [a for a in bond.GetStereoAtoms()]
else:
_atoms = [0, 0]
if type not in cls.bond2id:
continue
type = cls.bond2id[type]
h, t = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
edge_list += [[h, t, type], [t, h, type]]
# always explicitly store aromatic bonds, no matter kekulize or not
if bond.GetIsAromatic():
type = cls.bond2id["AROMATIC"]
bond_type += [type, type]
bond_stereo += [stereo, stereo]
stereo_atoms += [_atoms, _atoms]
feature = []
for name in edge_feature:
func = R.get("features.bond.%s" % name)
feature += func(bond)
_edge_feature += [feature, feature]
edge_list = edge_list[:-2]
bond_type = torch.tensor(bond_type)[:-2]
bond_stereo = torch.tensor(bond_stereo)[:-2]
stereo_atoms = torch.tensor(stereo_atoms)[:-2]
if len(edge_feature) > 0:
_edge_feature = torch.tensor(_edge_feature)[:-2]
else:
_edge_feature = None
_graph_feature = []
for name in graph_feature:
func = R.get("features.molecule.%s" % name)
_graph_feature += func(mol)
if len(graph_feature) > 0:
_graph_feature = torch.tensor(_graph_feature)
else:
_graph_feature = None
num_relation = len(cls.bond2id) - 1 if kekulize else len(cls.bond2id)
return cls(edge_list, atom_type, bond_type,
formal_charge=formal_charge, explicit_hs=explicit_hs,
chiral_tag=chiral_tag, radical_electrons=radical_electrons, atom_map=atom_map,
bond_stereo=bond_stereo, stereo_atoms=stereo_atoms,
node_feature=_node_feature, edge_feature=_edge_feature, graph_feature=_graph_feature,
num_node=mol.GetNumAtoms(), num_relation=num_relation)
def to_smiles(self, isomeric=True, atom_map=True, canonical=False):
"""
Return a SMILES string of this molecule.
Parameters:
isomeric (bool, optional): keep isomeric information or not
atom_map (bool, optional): keep atom mapping or not
canonical (bool, optional): if true, return the canonical form of smiles
Returns:
str
"""
mol = self.to_molecule()
if not atom_map:
for atom in mol.GetAtoms():
atom.SetAtomMapNum(0)
smiles = Chem.MolToSmiles(mol, isomericSmiles=isomeric)
if canonical:
smiles_set = set()
while smiles not in smiles_set:
smiles_set.add(smiles)
mol = Chem.MolFromSmiles(smiles)
smiles = Chem.MolToSmiles(mol, isomericSmiles=isomeric)
return smiles
def to_molecule(self, ignore_error=False):
"""
Return a RDKit object of this molecule.
Parameters:
ignore_error (bool, optional): if true, return ``None`` for illegal molecules.
Otherwise, raise an exception.
Returns:
rdchem.Mol
"""
mol = Chem.RWMol()
atom_type = self.atom_type.tolist()
bond_type = self.bond_type.tolist()
formal_charge = self.formal_charge.tolist()
explicit_hs = self.explicit_hs.tolist()
chiral_tag = self.chiral_tag.tolist()
radical_electrons = self.radical_electrons.tolist()
atom_map = self.atom_map.tolist()
bond_stereo = self.bond_stereo.tolist()
stereo_atoms = self.stereo_atoms.tolist()
for i in range(self.num_node):
atom = Chem.Atom(atom_type[i])
atom.SetFormalCharge(formal_charge[i])
atom.SetNumExplicitHs(explicit_hs[i])
atom.SetChiralTag(Chem.ChiralType(chiral_tag[i]))
atom.SetNumRadicalElectrons(radical_electrons[i])
atom.SetNoImplicit(explicit_hs[i] > 0 or radical_electrons[i] > 0)
atom.SetAtomMapNum(atom_map[i])
mol.AddAtom(atom)
edge_list = self.edge_list.tolist()
for i in range(self.num_edge):
h, t, type = edge_list[i]
if h < t:
j = mol.AddBond(h, t, Chem.BondType.names[self.id2bond[type]])
bond = mol.GetBondWithIdx(j - 1)
bond.SetIsAromatic(bond_type[i] == self.bond2id["AROMATIC"])
bond.SetStereo(Chem.BondStereo(bond_stereo[i]))
j = 0
for i in range(self.num_edge):
h, t, type = edge_list[i]
if h < t:
if bond_stereo[i]:
bond = mol.GetBondWithIdx(j)
bond.SetStereoAtoms(*stereo_atoms[i])
j += 1
if ignore_error:
try:
with utils.no_rdkit_log():
mol.UpdatePropertyCache()
Chem.AssignStereochemistry(mol)
except:
mol = None
else:
mol.UpdatePropertyCache()
Chem.AssignStereochemistry(mol)
return mol
def ion_to_molecule(self):
"""
Convert ions to molecules by adjusting hydrogens and electrons.
Note [N+] will not be converted.
"""
data_dict = self.data_dict
formal_charge = data_dict.pop("formal_charge")
explicit_hs = data_dict.pop("explicit_hs")
radical_electrons = data_dict.pop("radical_electrons")
pos_nitrogen = (self.atom_type == 7) & (self.explicit_valence > 3)
formal_charge = pos_nitrogen.long()
explicit_hs = torch.zeros_like(explicit_hs)
radical_electrons = torch.zeros_like(radical_electrons)
return type(self)(self.edge_list, edge_weight=self.edge_weight,
num_node=self.num_node, num_relation=self.num_relation,
formal_charge=formal_charge, explicit_hs=explicit_hs, radical_electrons=radical_electrons,
meta_dict=self.meta_dict, **data_dict)
def to_scaffold(self, chirality=False):
"""
Return a scaffold SMILES string of this molecule.
Parameters:
chirality (bool, optional): consider chirality in the scaffold or not
Returns:
str
"""
smiles = self.to_smiles()
scaffold = MurckoScaffold.MurckoScaffoldSmiles(smiles, includeChirality=chirality)
return scaffold
def node_mask(self, index, compact=False):
self._check_no_stereo()
return super(Molecule, self).node_mask(index, compact)
def edge_mask(self, index):
self._check_no_stereo()
return super(Molecule, self).edge_mask(index)
def undirected(self, add_inverse=False):
if add_inverse:
raise ValueError("Bonds are undirected relations, but `add_inverse` is specified")
return super(Molecule, self).undirected(add_inverse)
@property
def num_atom(self):
"""Number of atoms."""
return self.num_node
@property
def num_bond(self):
"""Number of bonds."""
return self.num_edge
@utils.cached_property
def explicit_valence(self):
bond2valence = torch.tensor(self.bond2valence, device=self.device)
explicit_valence = scatter_add(bond2valence[self.edge_list[:, 2]], self.edge_list[:, 0], dim_size=self.num_node)
return explicit_valence.round().long()
@utils.cached_property
def is_valid(self):
"""A coarse implementation of valence check."""
# TODO: cross-check by any domain expert
atom2valence = torch.tensor(float["nan"]).repeat(constant.NUM_ATOM)
for k, v in self.atom2valence:
atom2valence[k] = v
atom2valence = torch.as_tensor(atom2valence, device=self.device)
max_atom_valence = atom2valence[self.atom_type]
# special case for nitrogen
pos_nitrogen = (self.atom_type == 7) & (self.formal_charge == 1)
max_atom_valence[pos_nitrogen] = 4
if torch.isnan(max_atom_valence).any():
index = torch.isnan(max_atom_valence).nonzero()[0]
raise ValueError("Fail to check valence. Unknown atom type %d" % self.atom_type[index])
is_valid = (self.explicit_valence <= max_atom_valence).all()
return is_valid
@utils.cached_property
def is_valid_rdkit(self):
try:
with utils.no_rdkit_log():
mol = self.to_molecule()
Chem.SanitizeMol(mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES)
is_valid = torch.ones(1, dtype=torch.bool, device=self.device)
except ValueError:
is_valid = torch.zeros(1, dtype=torch.bool, device=self.device)
return is_valid
def visualize(self, title=None, save_file=None, figure_size=(3, 3), ax=None, atom_map=False):
"""
Visualize this molecule with matplotlib.
Parameters:
title (str, optional): title for this molecule
save_file (str, optional): ``png`` or ``pdf`` file to save visualization.
If not provided, show the figure in window.
figure_size (tuple of int, optional): width and height of the figure
ax (matplotlib.axes.Axes, optional): axis to plot the figure
atom_map (bool, optional): visualize atom mapping or not
"""
is_root = ax is None
if ax is None:
fig = plt.figure(figsize=figure_size)
if title is not None:
ax = plt.gca()
else:
ax = fig.add_axes([0, 0, 1, 1])
if title is not None:
ax.set_title(title)
mol = self.to_molecule()
if not atom_map:
for atom in mol.GetAtoms():
atom.SetAtomMapNum(0)
draw.MolToMPL(mol, ax=ax)
ax.set_frame_on(False)
if is_root:
if save_file:
fig.savefig(save_file)
else:
fig.show()
def __eq__(self, other):
smiles = self.to_smiles(isomeric=False, atom_map=False, canonical=True)
other_smiles = other.to_smiles(isomeric=False, atom_map=False, canonical=True)
return smiles == other_smiles
class PackedMolecule(PackedGraph, Molecule):
"""
Container for molecules with variadic sizes.
Parameters:
edge_list (array_like, optional): list of edges of shape :math:`(|E|, 3)`.
Each tuple is (node_in, node_out, bond_type).
atom_type (array_like, optional): atom types of shape :math:`(|V|,)`
bond_type (array_like, optional): bond types of shape :math:`(|E|,)`
num_nodes (array_like, optional): number of nodes in each graph
By default, it will be inferred from the largest id in `edge_list`
num_edges (array_like, optional): number of edges in each graph
num_relation (int, optional): number of relations
offsets (array_like, optional): node id offsets of shape :math:`(|E|,)`.
If not provided, nodes in `edge_list` should be relative index, i.e., the index in each graph.
If provided, nodes in `edge_list` should be absolute index, i.e., the index in the packed graph.
"""
unpacked_type = Molecule
def __init__(self, edge_list=None, atom_type=None, bond_type=None, num_nodes=None, num_edges=None, offsets=None,
**kwargs):
if "num_relation" not in kwargs:
kwargs["num_relation"] = len(self.bond2id)
super(PackedMolecule, self).__init__(edge_list=edge_list, num_nodes=num_nodes, num_edges=num_edges,
offsets=offsets, atom_type=atom_type, bond_type=bond_type, **kwargs)
def ion_to_molecule(self):
"""
Convert ions to molecules by adjusting hydrogens and electrons.
Note [N+] will not be converted.
"""
data_dict = self.data_dict
formal_charge = data_dict.pop("formal_charge")
explicit_hs = data_dict.pop("explicit_hs")
radical_electrons = data_dict.pop("radical_electrons")
pos_nitrogen = (self.atom_type == 7) & (self.explicit_valence > 3)
formal_charge = pos_nitrogen.long()
explicit_hs = torch.zeros_like(explicit_hs)
radical_electrons = torch.zeros_like(radical_electrons)
return type(self)(self.edge_list, edge_weight=self.edge_weight,
num_nodes=self.num_nodes, num_edges=self.num_edges, num_relation=self.num_relation,
offsets=self._offsets, formal_charge=formal_charge, explicit_hs=explicit_hs,
radical_electrons=radical_electrons, meta_dict=self.meta_dict, **data_dict)
@utils.cached_property
def is_valid(self):
"""A coarse implementation of valence check."""
# TODO: cross-check by any domain expert
atom2valence = torch.tensor(float("nan")).repeat(118)
for k, v in self.atom2valence.items():
atom2valence[k] = v
atom2valence = torch.as_tensor(atom2valence, device=self.device)
max_atom_valence = atom2valence[self.atom_type]
# special case for nitrogen
pos_nitrogen = (self.atom_type == 7) & (self.formal_charge == 1)
max_atom_valence[pos_nitrogen] = 4
if torch.isnan(max_atom_valence).any():
index = torch.isnan(max_atom_valence).nonzero()[0]
raise ValueError("Fail to check valence. Unknown atom type %d" % self.atom_type[index])
is_valid = self.explicit_valence <= max_atom_valence
is_valid = scatter_min(is_valid.long(), self.node2graph, dim_size=self.batch_size)[0].bool()
return is_valid
@utils.cached_property
def is_valid_rdkit(self):
return torch.cat([mol.is_valid_rdkit for mol in self])
@classmethod
def from_smiles(cls, smiles_list, node_feature="default", edge_feature="default", graph_feature=None,
with_hydrogen=False, kekulize=False):
"""
Create a packed molecule from a list of SMILES strings.
Parameters:
smiles_list (str): list of SMILES strings
node_feature (str or list of str, optional): node features to extract
edge_feature (str or list of str, optional): edge features to extract
graph_feature (str or list of str, optional): graph features to extract
with_hydrogen (bool, optional): store hydrogens in the molecule graph.
By default, hydrogens are dropped
kekulize (bool, optional): convert aromatic bonds to single/double bonds.
Note this only affects the relation in ``edge_list``.
For ``bond_type``, aromatic bonds are always stored explicitly.
By default, aromatic bonds are stored.
"""
mols = []
for smiles in smiles_list:
mol = Chem.MolFromSmiles(smiles)
if mol is None:
raise ValueError("Invalid SMILES `%s`" % smiles)
mols.append(mol)
return cls.from_molecule(mols, node_feature, edge_feature, graph_feature, with_hydrogen, kekulize)
@classmethod
def from_molecule(cls, mols, node_feature="default", edge_feature="default", graph_feature=None,
with_hydrogen=False, kekulize=False):
"""
Create a packed molecule from a list of RDKit objects.
Parameters:
mols (list of rdchem.Mol): molecules
node_feature (str or list of str, optional): node features to extract
edge_feature (str or list of str, optional): edge features to extract
graph_feature (str or list of str, optional): graph features to extract
with_hydrogen (bool, optional): store hydrogens in the molecule graph.
By default, hydrogens are dropped
kekulize (bool, optional): convert aromatic bonds to single/double bonds.
Note this only affects the relation in ``edge_list``.
For ``bond_type``, aromatic bonds are always stored explicitly.
By default, aromatic bonds are stored.
"""
node_feature = cls._standarize_option(node_feature)
edge_feature = cls._standarize_option(edge_feature)
graph_feature = cls._standarize_option(graph_feature)
atom_type = []
formal_charge = []
explicit_hs = []
chiral_tag = []
radical_electrons = []
atom_map = []
edge_list = []
bond_type = []
bond_stereo = []
stereo_atoms = []
_node_feature = []
_edge_feature = []
_graph_feature = []
num_nodes = []
num_edges = []
mols = mols + [cls.dummy_mol]
for mol in mols:
if mol is None:
mol = cls.empty_mol
if with_hydrogen:
mol = Chem.AddHs(mol)
if kekulize:
Chem.Kekulize(mol)
for atom in mol.GetAtoms():
atom_type.append(atom.GetAtomicNum())
formal_charge.append(atom.GetFormalCharge())
explicit_hs.append(atom.GetNumExplicitHs())
chiral_tag.append(atom.GetChiralTag())
radical_electrons.append(atom.GetNumRadicalElectrons())
atom_map.append(atom.GetAtomMapNum())
feature = []
for name in node_feature:
func = R.get("features.atom.%s" % name)
feature += func(atom)
_node_feature.append(feature)
for bond in mol.GetBonds():
type = str(bond.GetBondType())
stereo = bond.GetStereo()
if stereo:
_atoms = list(bond.GetStereoAtoms())
else:
_atoms = [0, 0]
if type not in cls.bond2id:
continue
type = cls.bond2id[type]
h, t = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
feature = []
for name in edge_feature:
func = R.get("features.bond.%s" % name)
feature += func(bond)
edge_list += [[h, t, type], [t, h, type]]
# always explicitly store aromatic bonds
if bond.GetIsAromatic():
type = cls.bond2id["AROMATIC"]
bond_type += [type, type]
bond_stereo += [stereo, stereo]
stereo_atoms += [_atoms, _atoms]
_edge_feature += [feature, feature]
for name in graph_feature:
func = R.get("features.molecule.%s" % name)
_graph_feature += func(mol)
num_nodes.append(mol.GetNumAtoms())
num_edges.append(mol.GetNumBonds() * 2)
atom_type = torch.tensor(atom_type)[:-2]
atom_map = torch.tensor(atom_map)[:-2]
formal_charge = torch.tensor(formal_charge)[:-2]
explicit_hs = torch.tensor(explicit_hs)[:-2]
chiral_tag = torch.tensor(chiral_tag)[:-2]
radical_electrons = torch.tensor(radical_electrons)[:-2]
if len(node_feature) > 0:
_node_feature = torch.tensor(_node_feature)[:-2]
else:
_node_feature = None
num_nodes = num_nodes[:-1]
num_edges = num_edges[:-1]
edge_list = torch.tensor(edge_list)[:-2]
bond_type = torch.tensor(bond_type)[:-2]
bond_stereo = torch.tensor(bond_stereo)[:-2]
stereo_atoms = torch.tensor(stereo_atoms)[:-2]
if len(edge_feature) > 0:
_edge_feature = torch.tensor(_edge_feature)[:-2]
else:
_edge_feature = None
if len(graph_feature) > 0:
_graph_feature = torch.tensor(_graph_feature)[:-1]
else:
_graph_feature = None
num_relation = len(cls.bond2id) - 1 if kekulize else len(cls.bond2id)
return cls(edge_list, atom_type, bond_type,
formal_charge=formal_charge, explicit_hs=explicit_hs,
chiral_tag=chiral_tag, radical_electrons=radical_electrons, atom_map=atom_map,
bond_stereo=bond_stereo, stereo_atoms=stereo_atoms,
node_feature=_node_feature, edge_feature=_edge_feature, graph_feature=_graph_feature,
num_nodes=num_nodes, num_edges=num_edges, num_relation=num_relation)
def to_smiles(self, isomeric=True, atom_map=True, canonical=False):
"""
Return a list of SMILES strings.
Parameters:
isomeric (bool, optional): keep isomeric information or not
atom_map (bool, optional): keep atom mapping or not
canonical (bool, optional): if true, return the canonical form of smiles
Returns:
list of str
"""
mols = self.to_molecule()
smiles_list = []
for mol in mols:
if not atom_map:
for atom in mol.GetAtoms():
atom.SetAtomMapNum(0)
smiles = Chem.MolToSmiles(mol, isomericSmiles=isomeric)
if canonical:
smiles_set = set()
while smiles not in smiles_set:
smiles_set.add(smiles)
mol = Chem.MolFromSmiles(smiles)
smiles = Chem.MolToSmiles(mol, isomericSmiles=isomeric)
smiles_list.append(smiles)
return smiles_list
def to_molecule(self, ignore_error=False):
"""
Return a list of RDKit objects.
Parameters:
ignore_error (bool, optional): if true, return ``None`` for illegal molecules.
Otherwise, raise an exception.
Returns:
list of rdchem.Mol
"""
atom_type = self.atom_type.tolist()
bond_type = self.bond_type.tolist()
formal_charge = self.formal_charge.tolist()
explicit_hs = self.explicit_hs.tolist()
chiral_tag = self.chiral_tag.tolist()
radical_electrons = self.radical_electrons.tolist()
atom_map = self.atom_map.tolist()
bond_stereo = self.bond_stereo.tolist()
stereo_atoms = self.stereo_atoms.tolist()
num_cum_nodes = [0] + self.num_cum_nodes.tolist()
num_cum_edges = [0] + self.num_cum_edges.tolist()
edge_list = self.edge_list.clone()
edge_list[:, :2] -= self._offsets.unsqueeze(-1)
edge_list = edge_list.tolist()
mols = []
for i in range(self.batch_size):
mol = Chem.RWMol()
for j in range(num_cum_nodes[i], num_cum_nodes[i + 1]):
atom = Chem.Atom(atom_type[j])
atom.SetFormalCharge(formal_charge[j])
atom.SetNumExplicitHs(explicit_hs[j])
atom.SetChiralTag(Chem.ChiralType(chiral_tag[j]))
atom.SetNumRadicalElectrons(radical_electrons[j])
atom.SetNoImplicit(explicit_hs[j] > 0 or radical_electrons[j] > 0)
atom.SetAtomMapNum(atom_map[j])
mol.AddAtom(atom)
for j in range(num_cum_edges[i], num_cum_edges[i + 1]):
h, t, type = edge_list[j]
if h < t:
k = mol.AddBond(h, t, Chem.BondType.names[self.id2bond[type]])
bond = mol.GetBondWithIdx(k - 1)
bond.SetIsAromatic(bond_type[j] == self.bond2id["AROMATIC"])
bond.SetStereo(Chem.BondStereo(bond_stereo[j]))
k = 0
for j in range(num_cum_edges[i], num_cum_edges[i + 1]):
h, t, type = edge_list[j]
if h < t:
if bond_stereo[j]:
bond = mol.GetBondWithIdx(k)
# These do not necessarily need to be the highest 'ranking' atoms like CIP stereo requires.
# They can be any arbitrary atoms neighboring the begin and end atoms of this bond respectively.
# STEREOCIS or STEREOTRANS is then set relative to only these atoms.
bond.SetStereoAtoms(*stereo_atoms[j])
k += 1
if ignore_error:
try:
with utils.no_rdkit_log():
mol.UpdatePropertyCache()
Chem.AssignStereochemistry(mol)
except:
mol = None
else:
mol.UpdatePropertyCache()
Chem.AssignStereochemistry(mol)
mols.append(mol)
return mols
def node_mask(self, index, compact=False):
self._check_no_stereo()
return super(PackedMolecule, self).node_mask(index, compact)
def edge_mask(self, index):
self._check_no_stereo()
return super(PackedMolecule, self).edge_mask(index)
def undirected(self, add_inverse=False):
if add_inverse:
raise ValueError("Bonds are undirected relations, but `add_inverse` is specified")
return super(PackedMolecule, self).undirected(add_inverse)
def visualize(self, titles=None, save_file=None, figure_size=(3, 3), num_row=None, num_col=None, atom_map=False):
"""
Visualize the packed molecules with matplotlib.
Parameters:
titles (list of str, optional): title for each molecule. Default is the ID of each molecule.
save_file (str, optional): ``png`` or ``pdf`` file to save visualization.
If not provided, show the figure in window.
figure_size (tuple of int, optional): width and height of the figure
num_row (int, optional): number of rows in the figure
num_col (int, optional): number of columns in the figure
atom_map (bool, optional): visualize atom mapping or not
"""
if titles is None:
graph = self.get_item(0)
titles = ["%s %d" % (type(graph).__name__, i) for i in range(self.batch_size)]
if num_col is None:
if num_row is None:
num_col = math.ceil(self.batch_size ** 0.5)
else:
num_col = math.ceil(self.batch_size / num_row)
if num_row is None:
num_row = math.ceil(self.batch_size / num_col)
figure_size = (num_col * figure_size[0], num_row * figure_size[1])
fig = plt.figure(figsize=figure_size)
for i in range(self.batch_size):
graph = self.get_item(i)
ax = fig.add_subplot(num_row, num_col, i + 1)
graph.visualize(title=titles[i], ax=ax, atom_map=atom_map)
# remove the space of axis labels
fig.tight_layout()
if save_file:
fig.savefig(save_file)
else:
fig.show()
Molecule.packed_type = PackedMolecule | 42.340776 | 121 | 0.601465 |
068a8cc8ac6499bd4df641c7df3b3e890fb262a3 | 5,548 | py | Python | imageio/core/findlib.py | gerlero/imageio | 4b3550ddedf99ecdfa71768d20a4a6251adcbabf | [
"BSD-2-Clause"
] | null | null | null | imageio/core/findlib.py | gerlero/imageio | 4b3550ddedf99ecdfa71768d20a4a6251adcbabf | [
"BSD-2-Clause"
] | null | null | null | imageio/core/findlib.py | gerlero/imageio | 4b3550ddedf99ecdfa71768d20a4a6251adcbabf | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015-1018, imageio contributors
# Copyright (C) 2013, Zach Pincus, Almar Klein and others
""" This module contains generic code to find and load a dynamic library.
"""
import os
import sys
import ctypes
LOCALDIR = os.path.abspath(os.path.dirname(__file__))
# Flag that can be patched / set to True to disable loading non-system libs
SYSTEM_LIBS_ONLY = False
def looks_lib(fname):
"""Returns True if the given filename looks like a dynamic library.
Based on extension, but cross-platform and more flexible.
"""
fname = fname.lower()
if sys.platform.startswith("win"):
return fname.endswith(".dll")
elif sys.platform.startswith("darwin"):
return fname.endswith(".dylib")
else:
return fname.endswith(".so") or ".so." in fname
def generate_candidate_libs(lib_names, lib_dirs=None):
"""Generate a list of candidate filenames of what might be the dynamic
library corresponding with the given list of names.
Returns (lib_dirs, lib_paths)
"""
lib_dirs = lib_dirs or []
# Get system dirs to search
sys_lib_dirs = [
"/lib",
"/usr/lib",
"/usr/lib/x86_64-linux-gnu",
"/usr/lib/aarch64-linux-gnu",
"/usr/local/lib",
"/opt/local/lib",
]
# Get Python dirs to search (shared if for Pyzo)
py_sub_dirs = ["bin", "lib", "DLLs", "Library/bin", "shared"]
py_lib_dirs = [os.path.join(sys.prefix, d) for d in py_sub_dirs]
if hasattr(sys, "base_prefix"):
py_lib_dirs += [os.path.join(sys.base_prefix, d) for d in py_sub_dirs]
# Get user dirs to search (i.e. HOME)
home_dir = os.path.expanduser("~")
user_lib_dirs = [os.path.join(home_dir, d) for d in ["lib"]]
# Select only the dirs for which a directory exists, and remove duplicates
potential_lib_dirs = lib_dirs + sys_lib_dirs + py_lib_dirs + user_lib_dirs
lib_dirs = []
for ld in potential_lib_dirs:
if os.path.isdir(ld) and ld not in lib_dirs:
lib_dirs.append(ld)
# Now attempt to find libraries of that name in the given directory
# (case-insensitive)
lib_paths = []
for lib_dir in lib_dirs:
# Get files, prefer short names, last version
files = os.listdir(lib_dir)
files = reversed(sorted(files))
files = sorted(files, key=len)
for lib_name in lib_names:
# Test all filenames for name and ext
for fname in files:
if fname.lower().startswith(lib_name) and looks_lib(fname):
lib_paths.append(os.path.join(lib_dir, fname))
# Return (only the items which are files)
lib_paths = [lp for lp in lib_paths if os.path.isfile(lp)]
return lib_dirs, lib_paths
def load_lib(exact_lib_names, lib_names, lib_dirs=None):
"""load_lib(exact_lib_names, lib_names, lib_dirs=None)
Load a dynamic library.
This function first tries to load the library from the given exact
names. When that fails, it tries to find the library in common
locations. It searches for files that start with one of the names
given in lib_names (case insensitive). The search is performed in
the given lib_dirs and a set of common library dirs.
Returns ``(ctypes_library, library_path)``
"""
# Checks
assert isinstance(exact_lib_names, list)
assert isinstance(lib_names, list)
if lib_dirs is not None:
assert isinstance(lib_dirs, list)
exact_lib_names = [n for n in exact_lib_names if n]
lib_names = [n for n in lib_names if n]
# Get reference name (for better messages)
if lib_names:
the_lib_name = lib_names[0]
elif exact_lib_names:
the_lib_name = exact_lib_names[0]
else:
raise ValueError("No library name given.")
# Collect filenames of potential libraries
# First try a few bare library names that ctypes might be able to find
# in the default locations for each platform.
if SYSTEM_LIBS_ONLY:
lib_dirs, lib_paths = [], []
else:
lib_dirs, lib_paths = generate_candidate_libs(lib_names, lib_dirs)
lib_paths = exact_lib_names + lib_paths
# Select loader
if sys.platform.startswith("win"):
loader = ctypes.windll
else:
loader = ctypes.cdll
# Try to load until success
the_lib = None
errors = []
for fname in lib_paths:
try:
the_lib = loader.LoadLibrary(fname)
break
except Exception as err:
# Don't record errors when it couldn't load the library from an
# exact name -- this fails often, and doesn't provide any useful
# debugging information anyway, beyond "couldn't find library..."
if fname not in exact_lib_names:
errors.append((fname, err))
# No success ...
if the_lib is None:
if errors:
# No library loaded, and load-errors reported for some
# candidate libs
err_txt = ["%s:\n%s" % (l, str(e)) for l, e in errors]
msg = (
"One or more %s libraries were found, but "
+ "could not be loaded due to the following errors:\n%s"
)
raise OSError(msg % (the_lib_name, "\n\n".join(err_txt)))
else:
# No errors, because no potential libraries found at all!
msg = "Could not find a %s library in any of:\n%s"
raise OSError(msg % (the_lib_name, "\n".join(lib_dirs)))
# Done
return the_lib, fname
| 34.246914 | 78 | 0.638789 |
5c5d42d0a91bf12adac458a20b9a350084dcceda | 1,467 | py | Python | pkgbuild/archlinux/python3/dirname.py | GameMaker2k/Neo-Hockey-Test | 5737bfedf0d83f69964e85ac1dbf7e6a93c13f44 | [
"BSD-3-Clause"
] | 1 | 2020-04-04T10:25:42.000Z | 2020-04-04T10:25:42.000Z | pkgbuild/archlinux/python3/dirname.py | GameMaker2k/Neo-Hockey-Test | 5737bfedf0d83f69964e85ac1dbf7e6a93c13f44 | [
"BSD-3-Clause"
] | null | null | null | pkgbuild/archlinux/python3/dirname.py | GameMaker2k/Neo-Hockey-Test | 5737bfedf0d83f69964e85ac1dbf7e6a93c13f44 | [
"BSD-3-Clause"
] | 3 | 2021-09-07T08:44:33.000Z | 2021-12-07T23:49:39.000Z | #!/usr/bin/env python3
'''
This program is free software; you can redistribute it and/or modify
it under the terms of the Revised BSD License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Revised BSD License for more details.
Copyright 2011-2016 Game Maker 2k - https://github.com/GameMaker2k
Copyright 2011-2016 Kazuki Przyborowski - https://github.com/KazukiPrzyborowski
$FileInfo: dirname.py - Last Update: 2/15/2016 Ver. 0.0.5 RC 3 - Author: cooldude2k $
'''
from __future__ import absolute_import, division, print_function, unicode_literals;
import os, sys, argparse;
__version_info__ = (0, 0, 5, "rc3");
if(__version_info__[3]!=None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2])+"+"+str(__version_info__[3]);
if(__version_info__[3]==None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2]);
proname = "dirname";
prover = __version__;
profullname = proname+" "+prover;
parser = argparse.ArgumentParser(conflict_handler = "resolve", add_help = True);
parser.add_argument("-v", "--version", action = "version", version = profullname);
parser.add_argument("filepath", help = "enter a file name/path");
getargs = parser.parse_args();
print(os.path.dirname(getargs.filepath));
| 40.75 | 127 | 0.732106 |
28759764eabc28d1f4b8a315ff2b7d54597bdae1 | 1,919 | py | Python | localized_fields/lookups.py | GabLeRoux/django-localized-fields | f0ac0f7f2503317fde5d75ba8481e34db83512bd | [
"MIT"
] | null | null | null | localized_fields/lookups.py | GabLeRoux/django-localized-fields | f0ac0f7f2503317fde5d75ba8481e34db83512bd | [
"MIT"
] | null | null | null | localized_fields/lookups.py | GabLeRoux/django-localized-fields | f0ac0f7f2503317fde5d75ba8481e34db83512bd | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib.postgres.fields.hstore import KeyTransform
from django.contrib.postgres.lookups import (SearchLookup, TrigramSimilar,
Unaccent)
from django.db.models.expressions import Col
from django.db.models.lookups import (Contains, EndsWith, Exact, IContains,
IEndsWith, IExact, In, IRegex, IsNull,
IStartsWith, Regex, StartsWith)
from django.utils import translation
class LocalizedLookupMixin():
def process_lhs(self, qn, connection):
if isinstance(self.lhs, Col):
language = translation.get_language() or settings.LANGUAGE_CODE
self.lhs = KeyTransform(language, self.lhs)
return super().process_lhs(qn, connection)
def get_prep_lookup(self):
return str(self.rhs)
class LocalizedSearchLookup(LocalizedLookupMixin, SearchLookup):
pass
class LocalizedUnaccent(LocalizedLookupMixin, Unaccent):
pass
class LocalizedTrigramSimilair(LocalizedLookupMixin, TrigramSimilar):
pass
class LocalizedExact(LocalizedLookupMixin, Exact):
pass
class LocalizedIExact(LocalizedLookupMixin, IExact):
pass
class LocalizedIn(LocalizedLookupMixin, In):
pass
class LocalizedContains(LocalizedLookupMixin, Contains):
pass
class LocalizedIContains(LocalizedLookupMixin, IContains):
pass
class LocalizedStartsWith(LocalizedLookupMixin, StartsWith):
pass
class LocalizedIStartsWith(LocalizedLookupMixin, IStartsWith):
pass
class LocalizedEndsWith(LocalizedLookupMixin, EndsWith):
pass
class LocalizedIEndsWith(LocalizedLookupMixin, IEndsWith):
pass
class LocalizedIsNullWith(LocalizedLookupMixin, IsNull):
pass
class LocalizedRegexWith(LocalizedLookupMixin, Regex):
pass
class LocalizedIRegexWith(LocalizedLookupMixin, IRegex):
pass
| 23.691358 | 76 | 0.729026 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.