edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
import requests, json
#Buscar estudiantes por ID
url = 'http://school.labs.com.es/api/students/'
#Ingresar el ID con input
id_ = input("Digame el ID: ")
data = None
headers = { 'Content-Type' : 'application/json' }
response = requests.get(url + str(id_))
if (response.status_code == 200):
data = response.json()
for key in data.keys():
if(key == 'id'):
continue
print(f"{key}: {data[key]}")
print("")
else:
print(f'Error: ', response.reason)
nombre = input(f"Nombre ({data["firstName"]}): ")
if (nombre != ''):
data['firstName'] = nombre
apellidos = input(f"Apellidos ({data["lastName"]}): ")
if (apellidos != ''):
data['lastName'] = apellidos
classID = input(f"Clase ({data["classId"]}): ")
if (classID != ''):
data['classId'] = int(classID)
url = 'http://school.labs.com.es/api/students/' + str(data['id'])
response = requests.put(url, data=json.dumps(data), headers=headers )
if (response.status_code == 204):
print(f"Registro modificado correctamente.")
print("")
else:
print('Error: ', response.reason) | import requests, json
#Buscar estudiantes por ID
url = 'http://school.labs.com.es/api/students/'
#Ingresar el ID con input
id_ = input("Digame el ID: ")
data = None
headers = { 'Content-Type' : 'application/json' }
response = requests.get(url + str(id_))
if (response.status_code == 200):
data = response.json()
for key in data.keys():
if(key == 'id'):
continue
print(f"{key}: {data[key]}")
print("")
else:
print(f'Error: ', response.reason)
nombre = input(f"Nombre ({data['firstName']}): ")
if (nombre != ''):
data['firstName'] = nombre
apellidos = input(f"Apellidos ({data['lastName']}): ")
if (apellidos != ''):
data['lastName'] = apellidos
classID = input(f"Clase ({data['classId']}): ")
if (classID != ''):
data['classId'] = int(classID)
url = 'http://school.labs.com.es/api/students/' + str(data['id'])
response = requests.put(url, data=json.dumps(data), headers=headers )
if (response.status_code == 204):
print(f"Registro modificado correctamente.")
print("")
else:
print('Error: ', response.reason) |
import os
from collections import Counter
from pathlib import Path
from typing import List, Optional, Tuple
import numpy as np
from tqdm import tqdm
from .filter import get_agents_slice_from_frames, get_frames_slice_from_scenes, get_tl_faces_slice_from_frames
from .zarr_dataset import ChunkedDataset
GIGABYTE = 1 * 1024 * 1024 * 1024
def _compute_path_size(path: str) -> int:
"""
Compute the total size of the folder, considering also nested elements.
Can be run to get zarr total size
Args:
path (str): base path
Returns:
(int): total size in bytes
"""
root_directory = Path(path)
return sum(f.stat().st_size for f in root_directory.glob("**/*") if f.is_file())
def _get_num_els_in_scene_range(zarr_dataset: ChunkedDataset, scene_index_start: int, scene_index_end: int) -> dict:
"""
Get numbers of scenes, frames, agents, tl_lights in a set of scene in a zarr
Args:
zarr_dataset (ChunkedDataset): zarr dataset to use for computing number of elements
scene_index_start (int): start from this scene (included)
scene_index_end (int): end before this scene (excluded!!)
Returns:
dict: a dict with keys equal to zarr initialise args
"""
assert scene_index_end > scene_index_start
scene_start = zarr_dataset.scenes[scene_index_start]
scene_end = zarr_dataset.scenes[scene_index_end - 1]
frame_start = zarr_dataset.frames[scene_start["frame_index_interval"][0]]
frame_end = zarr_dataset.frames[scene_end["frame_index_interval"][1] - 1]
return {
"num_scenes": scene_index_end - scene_index_start,
"num_frames": scene_end["frame_index_interval"][1] - scene_start["frame_index_interval"][0],
"num_agents": frame_end["agent_index_interval"][1] - frame_start["agent_index_interval"][0],
"num_tl_faces": frame_end["traffic_light_faces_index_interval"][1]
- frame_start["traffic_light_faces_index_interval"][0],
}
def _append_zarr_subset(
input_zarr: ChunkedDataset,
output_zarr: ChunkedDataset,
scene_index_start: int,
scene_index_end: int,
output_zarr_num_els: Optional[dict] = None,
) -> None:
"""
Append a subset of input_zarr into output_zarr. To avoid appending (slow), output_zarr must be opened in write mode
and with pre-allocated shape already. End indices of output_zarr are read from output_zarr_num_els, or 0 is assumed
otherwise
Args:
input_zarr (ChunkedDataset): origin zarr in read mode
output_zarr (ChunkedDataset): zarr already opened in write mode and with pre-allocated arrays
scene_index_start (int): index of the first scene to copy
scene_index_end (int): index of the last scene (excluded)
output_zarr_num_els (Optional[dict]): if None, write starting from 0 index in the output zarr
Returns:
"""
# indices to assign in the destination array
if output_zarr_num_els is None:
idx_output_scene, idx_output_frame, idx_output_agent, idx_output_tl_face = 0, 0, 0, 0
else:
idx_output_scene = output_zarr_num_els["num_scenes"]
idx_output_frame = output_zarr_num_els["num_frames"]
idx_output_agent = output_zarr_num_els["num_agents"]
idx_output_tl_face = output_zarr_num_els["num_tl_faces"]
# relative indices to subtract before copying to erase input history
idx_start_frame = input_zarr.scenes[scene_index_start]["frame_index_interval"][0]
idx_start_agent = input_zarr.frames[idx_start_frame]["agent_index_interval"][0]
idx_start_tl_face = input_zarr.frames[idx_start_frame]["traffic_light_faces_index_interval"][0]
# if output_zarr_num_els is not zero we also need to add output_history
idx_start_frame = idx_output_frame - idx_start_frame
idx_start_agent = idx_output_agent - idx_start_agent
idx_start_tl_face = idx_output_tl_face - idx_start_tl_face
for idx_scene in range(scene_index_start, scene_index_end):
# get slices from input zarr
scenes = input_zarr.scenes[idx_scene : idx_scene + 1]
frames = input_zarr.frames[get_frames_slice_from_scenes(*scenes)]
agents = input_zarr.agents[get_agents_slice_from_frames(*frames[[0, -1]])]
tl_faces = input_zarr.tl_faces[get_tl_faces_slice_from_frames(*frames[[0, -1]])]
# fix indices
scenes["frame_index_interval"] += idx_start_frame
frames["agent_index_interval"] += idx_start_agent
frames["traffic_light_faces_index_interval"] += idx_start_tl_face
# copy from input_zarr to output_zarr
output_zarr.scenes[idx_output_scene : idx_output_scene + len(scenes)] = scenes
output_zarr.frames[idx_output_frame : idx_output_frame + len(frames)] = frames
output_zarr.agents[idx_output_agent : idx_output_agent + len(agents)] = agents
output_zarr.tl_faces[idx_output_tl_face : idx_output_tl_face + len(tl_faces)] = tl_faces
# update output indices
idx_output_scene += len(scenes)
idx_output_frame += len(frames)
idx_output_agent += len(agents)
idx_output_tl_face += len(tl_faces)
def zarr_concat(input_zarrs: List[str], output_zarr: str) -> None:
"""
Concat many zarr into a single one. Takes care of updating indices for frames and agents.
Args:
input_zarrs (List[str]): a list of paths to input zarrs
output_zarr (str): the path to the output zarr
Returns:
"""
assert not os.path.exists(output_zarr), "we need to pre-allocate zarr, can't append fast"
output_dataset = ChunkedDataset(output_zarr)
# we need to estimate how much to allocate by reading all input zarrs lens
# we also store them for later use
num_els_valid_zarrs = []
valid_zarrs = []
tqdm_bar = tqdm(input_zarrs, desc="computing total size to allocate")
for input_zarr in tqdm_bar:
try:
input_dataset = ChunkedDataset(input_zarr)
input_dataset.open()
except (ValueError, KeyError):
print(f"{input_zarr} is not valid! skipping")
continue
num_els_valid_zarrs.append(_get_num_els_in_scene_range(input_dataset, 0, len(input_dataset.scenes)))
valid_zarrs.append(input_zarr)
# we can now pre-allocate the output dataset
total_num_els: Counter = Counter()
for num_el in num_els_valid_zarrs:
total_num_els += Counter(num_el)
output_dataset.initialize(**total_num_els)
cur_num_els = Counter({"num_scenes": 0, "num_frames": 0, "num_agents": 0, "num_tl_faces": 0})
tqdm_bar = tqdm(valid_zarrs)
for idx, input_zarr in enumerate(tqdm_bar):
tqdm_bar.set_description(f"working on {input_zarr}")
input_dataset = ChunkedDataset(input_zarr)
input_dataset.open()
_append_zarr_subset(input_dataset, output_dataset, 0, len(input_dataset.scenes), cur_num_els)
cur_num_els += Counter(num_els_valid_zarrs[idx])
def zarr_split(input_zarr: str, output_path: str, split_infos: List[dict]) -> List[Tuple[int, int]]:
"""
Split the input zarr into many zarrs. Names and sizes can be passed using the split_infos arg.
Args:
input_zarr (str): path of the original zarr
output_path (str): base destination path
split_infos (List[dict]): list of dict. Each element should have `name` (final path is output_path+name)
and `split_size_GB` with the size of the split. Last element must have `split_size_GB` set to -1 to collect
the last part of the input_zarrr.
Returns:
List[Tuple[int, int]]: indices of scenes where a split occurred
"""
input_dataset = ChunkedDataset(input_zarr)
input_dataset.open()
assert len(split_infos) > 0
assert split_infos[-1]["split_size_GB"] == -1, "last split element should have split_size_GB equal to -1"
# compute the size of the input_dataset in GB
size_input_gb = _compute_path_size(input_zarr) / GIGABYTE
num_scenes_input = len(input_dataset.scenes)
# ensure the defined splits don't overspill the input dataset
num_scenes_output = [
int(num_scenes_input * split_info["split_size_GB"] / size_input_gb) for split_info in split_infos[:-1]
]
assert sum(num_scenes_output) < num_scenes_input, "size exceed"
num_scenes_output.append(num_scenes_input - sum(num_scenes_output))
cur_scene = 0
cuts_track = [] # keep track of start-end of the cuts
tqdm_bar = tqdm(zip(num_scenes_output, split_infos))
for num_scenes, split_info in tqdm_bar:
start_cut = cur_scene
end_cut = cur_scene + num_scenes
tqdm_bar.set_description(f"cutting scenes {start_cut}-{end_cut} into {split_info["name"]}")
num_els_output = _get_num_els_in_scene_range(input_dataset, start_cut, end_cut)
output_dataset = ChunkedDataset(str(Path(output_path) / split_info["name"]))
output_dataset.initialize(**num_els_output)
_append_zarr_subset(input_dataset, output_dataset, start_cut, end_cut)
cuts_track.append((start_cut, end_cut))
cur_scene = end_cut
return cuts_track
def zarr_scenes_chop(input_zarr: str, output_zarr: str, num_frames_to_copy: int) -> None:
"""
Copy `num_frames_to_keep` from each scene in input_zarr and paste them into output_zarr
Args:
input_zarr (str): path to the input zarr
output_zarr (str): path to the output zarr
num_frames_to_copy (int): how many frames to copy from the start of each scene
Returns:
"""
input_dataset = ChunkedDataset(input_zarr)
input_dataset.open()
# check we can actually copy the frames we want from each scene
assert np.all(np.diff(input_dataset.scenes["frame_index_interval"], 1) > num_frames_to_copy), "not enough frames"
output_dataset = ChunkedDataset(output_zarr)
output_dataset.initialize()
# current indices where to copy in the output_dataset
cur_scene_idx, cur_frame_idx, cur_agent_idx, cur_tl_face_idx = 0, 0, 0, 0
for idx in tqdm(range(len(input_dataset.scenes)), desc="copying"):
# get data and immediately chop frames, agents and traffic lights
scene = input_dataset.scenes[idx]
first_frame_idx = scene["frame_index_interval"][0]
frames = input_dataset.frames[first_frame_idx : first_frame_idx + num_frames_to_copy]
agents = input_dataset.agents[get_agents_slice_from_frames(*frames[[0, -1]])]
tl_faces = input_dataset.tl_faces[get_tl_faces_slice_from_frames(*frames[[0, -1]])]
# reset interval relative to our output (subtract current history and add output history)
scene["frame_index_interval"][0] = cur_frame_idx
scene["frame_index_interval"][1] = cur_frame_idx + num_frames_to_copy # address for less frames
frames["agent_index_interval"] += cur_agent_idx - frames[0]["agent_index_interval"][0]
frames["traffic_light_faces_index_interval"] += (
cur_tl_face_idx - frames[0]["traffic_light_faces_index_interval"][0]
)
# write in dest using append (slow)
output_dataset.scenes.append(scene[None, ...]) # need 2D array to concatenate
output_dataset.frames.append(frames)
output_dataset.agents.append(agents)
output_dataset.tl_faces.append(tl_faces)
# increase indices in output
cur_scene_idx += len(scene)
cur_frame_idx += len(frames)
cur_agent_idx += len(agents)
cur_tl_face_idx += len(tl_faces)
| import os
from collections import Counter
from pathlib import Path
from typing import List, Optional, Tuple
import numpy as np
from tqdm import tqdm
from .filter import get_agents_slice_from_frames, get_frames_slice_from_scenes, get_tl_faces_slice_from_frames
from .zarr_dataset import ChunkedDataset
GIGABYTE = 1 * 1024 * 1024 * 1024
def _compute_path_size(path: str) -> int:
"""
Compute the total size of the folder, considering also nested elements.
Can be run to get zarr total size
Args:
path (str): base path
Returns:
(int): total size in bytes
"""
root_directory = Path(path)
return sum(f.stat().st_size for f in root_directory.glob("**/*") if f.is_file())
def _get_num_els_in_scene_range(zarr_dataset: ChunkedDataset, scene_index_start: int, scene_index_end: int) -> dict:
"""
Get numbers of scenes, frames, agents, tl_lights in a set of scene in a zarr
Args:
zarr_dataset (ChunkedDataset): zarr dataset to use for computing number of elements
scene_index_start (int): start from this scene (included)
scene_index_end (int): end before this scene (excluded!!)
Returns:
dict: a dict with keys equal to zarr initialise args
"""
assert scene_index_end > scene_index_start
scene_start = zarr_dataset.scenes[scene_index_start]
scene_end = zarr_dataset.scenes[scene_index_end - 1]
frame_start = zarr_dataset.frames[scene_start["frame_index_interval"][0]]
frame_end = zarr_dataset.frames[scene_end["frame_index_interval"][1] - 1]
return {
"num_scenes": scene_index_end - scene_index_start,
"num_frames": scene_end["frame_index_interval"][1] - scene_start["frame_index_interval"][0],
"num_agents": frame_end["agent_index_interval"][1] - frame_start["agent_index_interval"][0],
"num_tl_faces": frame_end["traffic_light_faces_index_interval"][1]
- frame_start["traffic_light_faces_index_interval"][0],
}
def _append_zarr_subset(
input_zarr: ChunkedDataset,
output_zarr: ChunkedDataset,
scene_index_start: int,
scene_index_end: int,
output_zarr_num_els: Optional[dict] = None,
) -> None:
"""
Append a subset of input_zarr into output_zarr. To avoid appending (slow), output_zarr must be opened in write mode
and with pre-allocated shape already. End indices of output_zarr are read from output_zarr_num_els, or 0 is assumed
otherwise
Args:
input_zarr (ChunkedDataset): origin zarr in read mode
output_zarr (ChunkedDataset): zarr already opened in write mode and with pre-allocated arrays
scene_index_start (int): index of the first scene to copy
scene_index_end (int): index of the last scene (excluded)
output_zarr_num_els (Optional[dict]): if None, write starting from 0 index in the output zarr
Returns:
"""
# indices to assign in the destination array
if output_zarr_num_els is None:
idx_output_scene, idx_output_frame, idx_output_agent, idx_output_tl_face = 0, 0, 0, 0
else:
idx_output_scene = output_zarr_num_els["num_scenes"]
idx_output_frame = output_zarr_num_els["num_frames"]
idx_output_agent = output_zarr_num_els["num_agents"]
idx_output_tl_face = output_zarr_num_els["num_tl_faces"]
# relative indices to subtract before copying to erase input history
idx_start_frame = input_zarr.scenes[scene_index_start]["frame_index_interval"][0]
idx_start_agent = input_zarr.frames[idx_start_frame]["agent_index_interval"][0]
idx_start_tl_face = input_zarr.frames[idx_start_frame]["traffic_light_faces_index_interval"][0]
# if output_zarr_num_els is not zero we also need to add output_history
idx_start_frame = idx_output_frame - idx_start_frame
idx_start_agent = idx_output_agent - idx_start_agent
idx_start_tl_face = idx_output_tl_face - idx_start_tl_face
for idx_scene in range(scene_index_start, scene_index_end):
# get slices from input zarr
scenes = input_zarr.scenes[idx_scene : idx_scene + 1]
frames = input_zarr.frames[get_frames_slice_from_scenes(*scenes)]
agents = input_zarr.agents[get_agents_slice_from_frames(*frames[[0, -1]])]
tl_faces = input_zarr.tl_faces[get_tl_faces_slice_from_frames(*frames[[0, -1]])]
# fix indices
scenes["frame_index_interval"] += idx_start_frame
frames["agent_index_interval"] += idx_start_agent
frames["traffic_light_faces_index_interval"] += idx_start_tl_face
# copy from input_zarr to output_zarr
output_zarr.scenes[idx_output_scene : idx_output_scene + len(scenes)] = scenes
output_zarr.frames[idx_output_frame : idx_output_frame + len(frames)] = frames
output_zarr.agents[idx_output_agent : idx_output_agent + len(agents)] = agents
output_zarr.tl_faces[idx_output_tl_face : idx_output_tl_face + len(tl_faces)] = tl_faces
# update output indices
idx_output_scene += len(scenes)
idx_output_frame += len(frames)
idx_output_agent += len(agents)
idx_output_tl_face += len(tl_faces)
def zarr_concat(input_zarrs: List[str], output_zarr: str) -> None:
"""
Concat many zarr into a single one. Takes care of updating indices for frames and agents.
Args:
input_zarrs (List[str]): a list of paths to input zarrs
output_zarr (str): the path to the output zarr
Returns:
"""
assert not os.path.exists(output_zarr), "we need to pre-allocate zarr, can't append fast"
output_dataset = ChunkedDataset(output_zarr)
# we need to estimate how much to allocate by reading all input zarrs lens
# we also store them for later use
num_els_valid_zarrs = []
valid_zarrs = []
tqdm_bar = tqdm(input_zarrs, desc="computing total size to allocate")
for input_zarr in tqdm_bar:
try:
input_dataset = ChunkedDataset(input_zarr)
input_dataset.open()
except (ValueError, KeyError):
print(f"{input_zarr} is not valid! skipping")
continue
num_els_valid_zarrs.append(_get_num_els_in_scene_range(input_dataset, 0, len(input_dataset.scenes)))
valid_zarrs.append(input_zarr)
# we can now pre-allocate the output dataset
total_num_els: Counter = Counter()
for num_el in num_els_valid_zarrs:
total_num_els += Counter(num_el)
output_dataset.initialize(**total_num_els)
cur_num_els = Counter({"num_scenes": 0, "num_frames": 0, "num_agents": 0, "num_tl_faces": 0})
tqdm_bar = tqdm(valid_zarrs)
for idx, input_zarr in enumerate(tqdm_bar):
tqdm_bar.set_description(f"working on {input_zarr}")
input_dataset = ChunkedDataset(input_zarr)
input_dataset.open()
_append_zarr_subset(input_dataset, output_dataset, 0, len(input_dataset.scenes), cur_num_els)
cur_num_els += Counter(num_els_valid_zarrs[idx])
def zarr_split(input_zarr: str, output_path: str, split_infos: List[dict]) -> List[Tuple[int, int]]:
"""
Split the input zarr into many zarrs. Names and sizes can be passed using the split_infos arg.
Args:
input_zarr (str): path of the original zarr
output_path (str): base destination path
split_infos (List[dict]): list of dict. Each element should have `name` (final path is output_path+name)
and `split_size_GB` with the size of the split. Last element must have `split_size_GB` set to -1 to collect
the last part of the input_zarrr.
Returns:
List[Tuple[int, int]]: indices of scenes where a split occurred
"""
input_dataset = ChunkedDataset(input_zarr)
input_dataset.open()
assert len(split_infos) > 0
assert split_infos[-1]["split_size_GB"] == -1, "last split element should have split_size_GB equal to -1"
# compute the size of the input_dataset in GB
size_input_gb = _compute_path_size(input_zarr) / GIGABYTE
num_scenes_input = len(input_dataset.scenes)
# ensure the defined splits don't overspill the input dataset
num_scenes_output = [
int(num_scenes_input * split_info["split_size_GB"] / size_input_gb) for split_info in split_infos[:-1]
]
assert sum(num_scenes_output) < num_scenes_input, "size exceed"
num_scenes_output.append(num_scenes_input - sum(num_scenes_output))
cur_scene = 0
cuts_track = [] # keep track of start-end of the cuts
tqdm_bar = tqdm(zip(num_scenes_output, split_infos))
for num_scenes, split_info in tqdm_bar:
start_cut = cur_scene
end_cut = cur_scene + num_scenes
tqdm_bar.set_description(f"cutting scenes {start_cut}-{end_cut} into {split_info['name']}")
num_els_output = _get_num_els_in_scene_range(input_dataset, start_cut, end_cut)
output_dataset = ChunkedDataset(str(Path(output_path) / split_info["name"]))
output_dataset.initialize(**num_els_output)
_append_zarr_subset(input_dataset, output_dataset, start_cut, end_cut)
cuts_track.append((start_cut, end_cut))
cur_scene = end_cut
return cuts_track
def zarr_scenes_chop(input_zarr: str, output_zarr: str, num_frames_to_copy: int) -> None:
"""
Copy `num_frames_to_keep` from each scene in input_zarr and paste them into output_zarr
Args:
input_zarr (str): path to the input zarr
output_zarr (str): path to the output zarr
num_frames_to_copy (int): how many frames to copy from the start of each scene
Returns:
"""
input_dataset = ChunkedDataset(input_zarr)
input_dataset.open()
# check we can actually copy the frames we want from each scene
assert np.all(np.diff(input_dataset.scenes["frame_index_interval"], 1) > num_frames_to_copy), "not enough frames"
output_dataset = ChunkedDataset(output_zarr)
output_dataset.initialize()
# current indices where to copy in the output_dataset
cur_scene_idx, cur_frame_idx, cur_agent_idx, cur_tl_face_idx = 0, 0, 0, 0
for idx in tqdm(range(len(input_dataset.scenes)), desc="copying"):
# get data and immediately chop frames, agents and traffic lights
scene = input_dataset.scenes[idx]
first_frame_idx = scene["frame_index_interval"][0]
frames = input_dataset.frames[first_frame_idx : first_frame_idx + num_frames_to_copy]
agents = input_dataset.agents[get_agents_slice_from_frames(*frames[[0, -1]])]
tl_faces = input_dataset.tl_faces[get_tl_faces_slice_from_frames(*frames[[0, -1]])]
# reset interval relative to our output (subtract current history and add output history)
scene["frame_index_interval"][0] = cur_frame_idx
scene["frame_index_interval"][1] = cur_frame_idx + num_frames_to_copy # address for less frames
frames["agent_index_interval"] += cur_agent_idx - frames[0]["agent_index_interval"][0]
frames["traffic_light_faces_index_interval"] += (
cur_tl_face_idx - frames[0]["traffic_light_faces_index_interval"][0]
)
# write in dest using append (slow)
output_dataset.scenes.append(scene[None, ...]) # need 2D array to concatenate
output_dataset.frames.append(frames)
output_dataset.agents.append(agents)
output_dataset.tl_faces.append(tl_faces)
# increase indices in output
cur_scene_idx += len(scene)
cur_frame_idx += len(frames)
cur_agent_idx += len(agents)
cur_tl_face_idx += len(tl_faces)
|
"""
Functions and data to use for formatting Pathoscope and NuVs analysis document. Formatted documents are destined for
API responses or CSV/Excel formatted file downloads.
"""
import asyncio
import csv
import io
import json
import statistics
from collections import defaultdict
import aiofiles
import openpyxl.styles
import virtool.analyses.db
import virtool.analyses.utils
import virtool.db.core
import virtool.db.utils
import virtool.history.db
import virtool.otus.db
import virtool.otus.utils
CSV_HEADERS = (
"OTU",
"Isolate",
"Sequence",
"Length",
"Weight",
"Median Depth",
"Coverage"
)
def calculate_median_depths(document: dict) -> dict:
"""
Calculate the median depth for all hits (sequences) in a Pathoscope result document.
:param document: the pathoscope analysis document to calculate depths for
:return: a dict of median depths keyed by hit (sequence) ids
"""
depths = dict()
for hit in document["results"]:
depths[hit["id"]] = statistics.median(hit["align"])
return depths
async def create_pathoscope_coverage_cache(db, document):
cache = defaultdict(lambda: defaultdict(lambda: dict()))
for hit in document["results"]:
for isolate in hit["isolates"]:
for sequence in isolate["sequences"]:
otu_id = hit["id"]
isolate_id = isolate["id"]
sequence_id = sequence["id"]
if sequence.get("align"):
cache[otu_id][isolate_id][sequence_id] = virtool.analyses.utils.transform_coverage_to_coordinates(sequence["align"])
document = {
"analysis": {
"id": document["_id"]
},
"cache": cache
}
await db.coverage.insert_one(document)
return document
async def ensure_pathoscope_coverage_cache(db, document):
cache = await db.coverage.find_one({"analysis.id": document["_id"]})
if cache is None:
cache = await create_pathoscope_coverage_cache(db, document)
for hit in document["results"]:
for isolate in hit["isolates"]:
for sequence in isolate["sequences"]:
otu_id = hit["id"]
isolate_id = isolate["id"]
sequence_id = sequence["id"]
if sequence.get("align"):
sequence["align"] = cache["cache"][otu_id][isolate_id][sequence_id]
async def load_results(settings: dict, document: dict) -> dict:
"""
Load the analysis results. Hide the alternative loading from a `results.json` file. These files are only
generated if the analysis data would have exceeded the MongoDB size limit (16mb).
The document is returned unmodified if loading from file is not required.
:param settings: the application settings
:param document: the document to load results for
:return: a complete analysis document
"""
if document["results"] == "file":
path = virtool.analyses.utils.join_analysis_json_path(
settings["data_path"],
document["_id"],
document["sample"]["id"]
)
async with aiofiles.open(path, "r") as f:
data = json.loads(await f.read())
return {
**document,
"results": data
}
return document
async def format_aodp(app, document):
patched_otus = await gather_patched_otus(app, document["results"])
hits = defaultdict(list)
for hit in document["results"]:
hits[hit["sequence_id"]].append(hit)
for otu in patched_otus.values():
otu["id"] = otu.pop("_id")
for isolate in otu["isolates"]:
for sequence in isolate["sequences"]:
sequence["hits"] = hits[sequence["_id"]]
sequence["id"] = sequence.pop("_id")
return {
**document,
"results": list(patched_otus.values())
}
async def format_pathoscope(app, document):
document = await load_results(
app["settings"],
document
)
patched_otus = await gather_patched_otus(app, document["results"])
formatted = dict()
for hit in document["results"]:
otu_id = hit["otu"]["id"]
otu_document = patched_otus[otu_id]
max_ref_length = 0
for isolate in otu_document["isolates"]:
max_ref_length = max(max_ref_length, max([len(s["sequence"]) for s in isolate["sequences"]]))
otu = {
"id": otu_id,
"name": otu_document["name"],
"version": otu_document["version"],
"abbreviation": otu_document["abbreviation"],
"isolates": otu_document["isolates"],
"length": max_ref_length
}
formatted[otu_id] = otu
for isolate in otu["isolates"]:
for sequence in isolate["sequences"]:
if sequence["_id"] == hit["id"]:
sequence.update(hit)
sequence["length"] = len(sequence["sequence"])
del sequence["otu"]
del sequence["otu_id"]
del sequence["isolate_id"]
document["results"] = [formatted[otu_id] for otu_id in formatted]
for otu in document["results"]:
for isolate in list(otu["isolates"]):
if not any((key in sequence for sequence in isolate["sequences"]) for key in ("pi", "final")):
otu["isolates"].remove(isolate)
continue
for sequence in isolate["sequences"]:
if "final" in sequence:
sequence.update(sequence.pop("final"))
del sequence["initial"]
if "pi" not in sequence:
sequence.update({
"pi": 0,
"reads": 0,
"coverage": 0,
"best": 0,
"length": len(sequence["sequence"])
})
sequence["id"] = sequence.pop("_id")
del sequence["sequence"]
await ensure_pathoscope_coverage_cache(app["db"], document)
return document
async def format_nuvs(app, document):
document = await load_results(
app["settings"],
document
)
hit_ids = list({h["hit"] for s in document["results"] for o in s["orfs"] for h in o["hits"]})
cursor = app["db"].hmm.find({"_id": {"$in": hit_ids}}, ["cluster", "families", "names"])
hmms = {d.pop("_id"): d async for d in cursor}
for sequence in document["results"]:
for orf in sequence["orfs"]:
for hit in orf["hits"]:
hit.update(hmms[hit["hit"]])
return document
async def format_analysis_to_excel(app, document):
depths = calculate_median_depths(document)
formatted = await format_analysis(app, document)
output = io.BytesIO()
wb = openpyxl.Workbook()
ws = wb.active
ws.title = f"Pathoscope for {document["sample"]["id"]}"
header_font = openpyxl.styles.Font(name="Calibri", bold=True)
for index, header in enumerate(CSV_HEADERS):
col = index + 1
cell = ws.cell(column=col, row=1, value=header)
cell.font = header_font
rows = list()
for otu in formatted["results"]:
for isolate in otu["isolates"]:
for sequence in isolate["sequences"]:
row = [
otu["name"],
virtool.otus.utils.format_isolate_name(isolate),
sequence["accession"],
sequence["length"],
sequence["pi"],
depths.get(sequence["id"], 0),
sequence["coverage"]
]
assert len(row) == len(CSV_HEADERS)
rows.append(row)
for row_index, row in enumerate(rows):
row_number = row_index + 2
for col_index, value in enumerate(row):
ws.cell(column=col_index + 1, row=row_number, value=value)
wb.save(output)
return output.getvalue()
async def format_analysis_to_csv(app, document):
depths = calculate_median_depths(document)
formatted = await format_analysis(app, document)
output = io.StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(CSV_HEADERS)
for otu in formatted["results"]:
for isolate in otu["isolates"]:
for sequence in isolate["sequences"]:
row = [
otu["name"],
virtool.otus.utils.format_isolate_name(isolate),
sequence["accession"],
sequence["length"],
sequence["pi"],
depths.get(sequence["id"], 0),
sequence["coverage"]
]
writer.writerow(row)
return output.getvalue()
async def format_analysis(app, document: dict) -> dict:
"""
Format an analysis document to be returned by the API.
:param app: the application object
:param document: the analysis document to format
:return: a formatted document
"""
workflow = document.get("workflow")
if workflow:
if workflow == "nuvs":
return await format_nuvs(app, document)
if "pathoscope" in workflow:
return await format_pathoscope(app, document)
if workflow == "aodp":
return await format_aodp(app, document)
raise ValueError("Could not determine analysis workflow")
async def gather_patched_otus(app, results):
# Use set to only id-version combinations once.
otu_specifiers = {(hit["otu"]["id"], hit["otu"]["version"]) for hit in results}
patched_otus = await asyncio.gather(*[
virtool.history.db.patch_to_version(
app,
otu_id,
version
) for otu_id, version in otu_specifiers
])
return {patched["_id"]: patched for _, patched, _ in patched_otus}
| """
Functions and data to use for formatting Pathoscope and NuVs analysis document. Formatted documents are destined for
API responses or CSV/Excel formatted file downloads.
"""
import asyncio
import csv
import io
import json
import statistics
from collections import defaultdict
import aiofiles
import openpyxl.styles
import virtool.analyses.db
import virtool.analyses.utils
import virtool.db.core
import virtool.db.utils
import virtool.history.db
import virtool.otus.db
import virtool.otus.utils
CSV_HEADERS = (
"OTU",
"Isolate",
"Sequence",
"Length",
"Weight",
"Median Depth",
"Coverage"
)
def calculate_median_depths(document: dict) -> dict:
"""
Calculate the median depth for all hits (sequences) in a Pathoscope result document.
:param document: the pathoscope analysis document to calculate depths for
:return: a dict of median depths keyed by hit (sequence) ids
"""
depths = dict()
for hit in document["results"]:
depths[hit["id"]] = statistics.median(hit["align"])
return depths
async def create_pathoscope_coverage_cache(db, document):
cache = defaultdict(lambda: defaultdict(lambda: dict()))
for hit in document["results"]:
for isolate in hit["isolates"]:
for sequence in isolate["sequences"]:
otu_id = hit["id"]
isolate_id = isolate["id"]
sequence_id = sequence["id"]
if sequence.get("align"):
cache[otu_id][isolate_id][sequence_id] = virtool.analyses.utils.transform_coverage_to_coordinates(sequence["align"])
document = {
"analysis": {
"id": document["_id"]
},
"cache": cache
}
await db.coverage.insert_one(document)
return document
async def ensure_pathoscope_coverage_cache(db, document):
cache = await db.coverage.find_one({"analysis.id": document["_id"]})
if cache is None:
cache = await create_pathoscope_coverage_cache(db, document)
for hit in document["results"]:
for isolate in hit["isolates"]:
for sequence in isolate["sequences"]:
otu_id = hit["id"]
isolate_id = isolate["id"]
sequence_id = sequence["id"]
if sequence.get("align"):
sequence["align"] = cache["cache"][otu_id][isolate_id][sequence_id]
async def load_results(settings: dict, document: dict) -> dict:
"""
Load the analysis results. Hide the alternative loading from a `results.json` file. These files are only
generated if the analysis data would have exceeded the MongoDB size limit (16mb).
The document is returned unmodified if loading from file is not required.
:param settings: the application settings
:param document: the document to load results for
:return: a complete analysis document
"""
if document["results"] == "file":
path = virtool.analyses.utils.join_analysis_json_path(
settings["data_path"],
document["_id"],
document["sample"]["id"]
)
async with aiofiles.open(path, "r") as f:
data = json.loads(await f.read())
return {
**document,
"results": data
}
return document
async def format_aodp(app, document):
patched_otus = await gather_patched_otus(app, document["results"])
hits = defaultdict(list)
for hit in document["results"]:
hits[hit["sequence_id"]].append(hit)
for otu in patched_otus.values():
otu["id"] = otu.pop("_id")
for isolate in otu["isolates"]:
for sequence in isolate["sequences"]:
sequence["hits"] = hits[sequence["_id"]]
sequence["id"] = sequence.pop("_id")
return {
**document,
"results": list(patched_otus.values())
}
async def format_pathoscope(app, document):
document = await load_results(
app["settings"],
document
)
patched_otus = await gather_patched_otus(app, document["results"])
formatted = dict()
for hit in document["results"]:
otu_id = hit["otu"]["id"]
otu_document = patched_otus[otu_id]
max_ref_length = 0
for isolate in otu_document["isolates"]:
max_ref_length = max(max_ref_length, max([len(s["sequence"]) for s in isolate["sequences"]]))
otu = {
"id": otu_id,
"name": otu_document["name"],
"version": otu_document["version"],
"abbreviation": otu_document["abbreviation"],
"isolates": otu_document["isolates"],
"length": max_ref_length
}
formatted[otu_id] = otu
for isolate in otu["isolates"]:
for sequence in isolate["sequences"]:
if sequence["_id"] == hit["id"]:
sequence.update(hit)
sequence["length"] = len(sequence["sequence"])
del sequence["otu"]
del sequence["otu_id"]
del sequence["isolate_id"]
document["results"] = [formatted[otu_id] for otu_id in formatted]
for otu in document["results"]:
for isolate in list(otu["isolates"]):
if not any((key in sequence for sequence in isolate["sequences"]) for key in ("pi", "final")):
otu["isolates"].remove(isolate)
continue
for sequence in isolate["sequences"]:
if "final" in sequence:
sequence.update(sequence.pop("final"))
del sequence["initial"]
if "pi" not in sequence:
sequence.update({
"pi": 0,
"reads": 0,
"coverage": 0,
"best": 0,
"length": len(sequence["sequence"])
})
sequence["id"] = sequence.pop("_id")
del sequence["sequence"]
await ensure_pathoscope_coverage_cache(app["db"], document)
return document
async def format_nuvs(app, document):
document = await load_results(
app["settings"],
document
)
hit_ids = list({h["hit"] for s in document["results"] for o in s["orfs"] for h in o["hits"]})
cursor = app["db"].hmm.find({"_id": {"$in": hit_ids}}, ["cluster", "families", "names"])
hmms = {d.pop("_id"): d async for d in cursor}
for sequence in document["results"]:
for orf in sequence["orfs"]:
for hit in orf["hits"]:
hit.update(hmms[hit["hit"]])
return document
async def format_analysis_to_excel(app, document):
depths = calculate_median_depths(document)
formatted = await format_analysis(app, document)
output = io.BytesIO()
wb = openpyxl.Workbook()
ws = wb.active
ws.title = f"Pathoscope for {document['sample']['id']}"
header_font = openpyxl.styles.Font(name="Calibri", bold=True)
for index, header in enumerate(CSV_HEADERS):
col = index + 1
cell = ws.cell(column=col, row=1, value=header)
cell.font = header_font
rows = list()
for otu in formatted["results"]:
for isolate in otu["isolates"]:
for sequence in isolate["sequences"]:
row = [
otu["name"],
virtool.otus.utils.format_isolate_name(isolate),
sequence["accession"],
sequence["length"],
sequence["pi"],
depths.get(sequence["id"], 0),
sequence["coverage"]
]
assert len(row) == len(CSV_HEADERS)
rows.append(row)
for row_index, row in enumerate(rows):
row_number = row_index + 2
for col_index, value in enumerate(row):
ws.cell(column=col_index + 1, row=row_number, value=value)
wb.save(output)
return output.getvalue()
async def format_analysis_to_csv(app, document):
depths = calculate_median_depths(document)
formatted = await format_analysis(app, document)
output = io.StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(CSV_HEADERS)
for otu in formatted["results"]:
for isolate in otu["isolates"]:
for sequence in isolate["sequences"]:
row = [
otu["name"],
virtool.otus.utils.format_isolate_name(isolate),
sequence["accession"],
sequence["length"],
sequence["pi"],
depths.get(sequence["id"], 0),
sequence["coverage"]
]
writer.writerow(row)
return output.getvalue()
async def format_analysis(app, document: dict) -> dict:
"""
Format an analysis document to be returned by the API.
:param app: the application object
:param document: the analysis document to format
:return: a formatted document
"""
workflow = document.get("workflow")
if workflow:
if workflow == "nuvs":
return await format_nuvs(app, document)
if "pathoscope" in workflow:
return await format_pathoscope(app, document)
if workflow == "aodp":
return await format_aodp(app, document)
raise ValueError("Could not determine analysis workflow")
async def gather_patched_otus(app, results):
# Use set to only id-version combinations once.
otu_specifiers = {(hit["otu"]["id"], hit["otu"]["version"]) for hit in results}
patched_otus = await asyncio.gather(*[
virtool.history.db.patch_to_version(
app,
otu_id,
version
) for otu_id, version in otu_specifiers
])
return {patched["_id"]: patched for _, patched, _ in patched_otus}
|
""" Retreives configuration from the config file
"""
from json import loads
from requests import get
from configparser import ConfigParser, ParsingError
from modules.exceptions import ConfigError
## DiscordIds
discord_ids = {
"lobby" : 0,
"register" : 0,
"matches" : list(),
"results" : 0,
"rules" : 0,
"rules_msg" : 0,
"admin_role" : 0,
"info_role" : 0,
"registered_role" : 0,
"notify_role" : 0
}
## General
general = {
"token" : "",
"api_key" : "",
"command_prefix" : "",
"lobby_size" : 0
}
AFK_TIME = 20 # minutes
ROUND_LENGHT = 10 # minutes
VERSION = "0"
factions = {
1 : "VS",
2 : "NC",
3 : "TR"
}
# Lazy way to get factions from user input:
i_factions = {
"VS" : 1,
"NC" : 2,
"TR" : 3
}
# http://census.daybreakgames.com/get/ps2:v2/zone?c:limit=100
zones = {
2 : "Indar",
4 : "Hossin",
6 : "Amerish",
8 : "Esamir"
}
# http://census.daybreakgames.com/get/ps2:v2/facility_type?c:limit=100
facilitiy_suffix = {
2 : "Amp Station",
3 : "Bio Lab",
4 : "Tech Plant"
}
PIL_MAPS_IDS = [3430,302030,239000,305010,230,307010] #peris, can, pale, ghanan, xeno, chac
## Database
_collections = {
"users" : "",
"sBases" : ""
}
database = {
"url" : "",
"cluster" : "",
"accounts" : "",
"collections" : _collections
}
## Methods
def getConfig(file):
config = ConfigParser()
try:
config.read(file)
except ParsingError as e:
raise ConfigError(f"Parsing Error in '{file}'")
# General section
_checkSection(config, "General", file)
for key in general:
try:
if isinstance(general[key],int):
general[key]=int(config['General'][key])
else:
general[key]=config['General'][key]
except KeyError:
_errorMissing(key, 'General', file)
except ValueError:
_errorIncorrect(key, 'General', file)
# Testing api key
url = f"http://census.daybreakgames.com/s:{general["api_key"]}/get/ps2:v2/faction"
jdata=loads(get(url).content)
if 'error' in jdata:
raise ConfigError(f"Incorrect api key: {general["api_key"]} in '{file}'")
# Discord_Ids section
_checkSection(config, "Discord_Ids", file)
for key in discord_ids:
try:
if key == "matches":
tmp = config['Discord_Ids'][key].split(',')
discord_ids[key].clear()
for m in tmp:
discord_ids[key].append(int(m))
else:
discord_ids[key] = int(config['Discord_Ids'][key])
except KeyError:
_errorMissing(key, 'Discord_Ids', file)
except ValueError:
_errorIncorrect(key, 'Discord_Ids', file)
# Database section
_checkSection(config, "Database", file)
for key in database:
if key != "collections":
try:
database[key]=config['Database'][key]
except KeyError:
_errorMissing(key, 'Database', file)
# Collections section
_checkSection(config, "Collections", file)
for key in database["collections"]:
try:
database["collections"][key] = config['Collections'][key]
except KeyError:
_errorMissing(key, 'Collections', file)
# Version
with open('../CHANGELOG.md', 'r', encoding='utf-8') as txt:
txt_str=txt.readline()
global VERSION
VERSION = txt_str[3:-2] # Extracts "X.X.X" from string "# vX.X.X:" in a lazy way
def _checkSection(config, section, file):
if not section in config:
raise ConfigError(f"Missing section '{section}' in '{file}'")
def _errorMissing(field, section, file):
raise ConfigError(f"Missing field '{field}' in '{section}' in '{file}'")
def _errorIncorrect(field, section, file):
raise ConfigError(f"Incorrect field '{field}' in '{section}' in '{file}'") | """ Retreives configuration from the config file
"""
from json import loads
from requests import get
from configparser import ConfigParser, ParsingError
from modules.exceptions import ConfigError
## DiscordIds
discord_ids = {
"lobby" : 0,
"register" : 0,
"matches" : list(),
"results" : 0,
"rules" : 0,
"rules_msg" : 0,
"admin_role" : 0,
"info_role" : 0,
"registered_role" : 0,
"notify_role" : 0
}
## General
general = {
"token" : "",
"api_key" : "",
"command_prefix" : "",
"lobby_size" : 0
}
AFK_TIME = 20 # minutes
ROUND_LENGHT = 10 # minutes
VERSION = "0"
factions = {
1 : "VS",
2 : "NC",
3 : "TR"
}
# Lazy way to get factions from user input:
i_factions = {
"VS" : 1,
"NC" : 2,
"TR" : 3
}
# http://census.daybreakgames.com/get/ps2:v2/zone?c:limit=100
zones = {
2 : "Indar",
4 : "Hossin",
6 : "Amerish",
8 : "Esamir"
}
# http://census.daybreakgames.com/get/ps2:v2/facility_type?c:limit=100
facilitiy_suffix = {
2 : "Amp Station",
3 : "Bio Lab",
4 : "Tech Plant"
}
PIL_MAPS_IDS = [3430,302030,239000,305010,230,307010] #peris, can, pale, ghanan, xeno, chac
## Database
_collections = {
"users" : "",
"sBases" : ""
}
database = {
"url" : "",
"cluster" : "",
"accounts" : "",
"collections" : _collections
}
## Methods
def getConfig(file):
config = ConfigParser()
try:
config.read(file)
except ParsingError as e:
raise ConfigError(f"Parsing Error in '{file}'")
# General section
_checkSection(config, "General", file)
for key in general:
try:
if isinstance(general[key],int):
general[key]=int(config['General'][key])
else:
general[key]=config['General'][key]
except KeyError:
_errorMissing(key, 'General', file)
except ValueError:
_errorIncorrect(key, 'General', file)
# Testing api key
url = f"http://census.daybreakgames.com/s:{general['api_key']}/get/ps2:v2/faction"
jdata=loads(get(url).content)
if 'error' in jdata:
raise ConfigError(f"Incorrect api key: {general['api_key']} in '{file}'")
# Discord_Ids section
_checkSection(config, "Discord_Ids", file)
for key in discord_ids:
try:
if key == "matches":
tmp = config['Discord_Ids'][key].split(',')
discord_ids[key].clear()
for m in tmp:
discord_ids[key].append(int(m))
else:
discord_ids[key] = int(config['Discord_Ids'][key])
except KeyError:
_errorMissing(key, 'Discord_Ids', file)
except ValueError:
_errorIncorrect(key, 'Discord_Ids', file)
# Database section
_checkSection(config, "Database", file)
for key in database:
if key != "collections":
try:
database[key]=config['Database'][key]
except KeyError:
_errorMissing(key, 'Database', file)
# Collections section
_checkSection(config, "Collections", file)
for key in database["collections"]:
try:
database["collections"][key] = config['Collections'][key]
except KeyError:
_errorMissing(key, 'Collections', file)
# Version
with open('../CHANGELOG.md', 'r', encoding='utf-8') as txt:
txt_str=txt.readline()
global VERSION
VERSION = txt_str[3:-2] # Extracts "X.X.X" from string "# vX.X.X:" in a lazy way
def _checkSection(config, section, file):
if not section in config:
raise ConfigError(f"Missing section '{section}' in '{file}'")
def _errorMissing(field, section, file):
raise ConfigError(f"Missing field '{field}' in '{section}' in '{file}'")
def _errorIncorrect(field, section, file):
raise ConfigError(f"Incorrect field '{field}' in '{section}' in '{file}'") |
from consts import *
class Time:
def __init__(self, date, time_in_day):
self.date = date
self.time_in_day = time_in_day
def __str__(self):
return f"{self.date} {self.time_in_day}"
class Traveler:
def __init__(self, name, is_child):
self.name = name
self.is_child = is_child
class FlightInfo:
def __init__(self, airport_depart, airport_destination,
airline, flight_code, time_departure, time_destination):
self.airport_depart = airport_depart
self.airport_destination = airport_destination
self.flight_code = flight_code
self.airline = airline
self.time_departure = time_departure
self.time_destination = time_destination
def get_flight_desc(self, lang):
return f" {AIRLINES[self.airline]}-({self.airline}{self.flight_code})\n" \
f" {AIRPORTS[self.airport_depart]}>{AIRPORTS[self.airport_destination]}\n" \
f" {self.time_departure} - {self.time_destination}"
class PriceMessage:
def __init__(self, flights, costumers, price_adult, price_child, restrictions,
baggage, meal, compartment, language='en'):
self.travelers = costumers
self.flights = flights
self.price_adult = price_adult
self.price_child = price_child if price_child else price_adult
self.restrictions = restrictions
self.baggage = baggage
self.meal = meal
self.compartment = compartment
self.lang = language
def deal_summary(self):
"""A short text describing the trip by airports"""
if len(self.flights) == 1:
dest_str =" " + ONE_WAY[self.lang]
else:
dest_code = self.flights[-1].airport_destination
dest_str = f">{AIRPORTS[dest_code]} "
trip_str = [AIRPORTS[x.airport_depart] for x in self.flights]
msg = f"{FLIGHT_DESC[self.lang]} {ROUND_TRIP[self.lang]}\n" \
f"{">".join(trip_str)}{dest_str}\n"
if len(self.travelers) > 1:
msg += f"{TOGETHER[self.lang]} {",".join([x.name for x in self.travelers[1:]])}"
return msg
def flights_summary(self):
print_retour = True
msg = f"*{ITINERARY[self.lang]}*\n"
msg += f"{FLIGHT[self.lang]} {ALLER[self.lang]}\n"
for i, flight in enumerate(self.flights):
if i > 0:
if print_retour and flight.time_departure.date != self.flights[i-1].time_destination.date:
msg += f"{FLIGHT[self.lang]} {RETOUR[self.lang]}\n"
print_retour = False
msg += f"{flight.get_flight_desc(self.lang)}\n"
return msg
def pricing_summary(self):
# msg += f"Airlines\n{[x.airline for x in self.flights]}\n\n"
msg = "*Prices*:\n"
num_adults = len([x for x in self.travelers if not x.is_child])
num_childs = len([x for x in self.travelers if x.is_child])
msg += f" {num_adults} $ x {self.price_adult} adults\n"
msg += f" {num_childs} $ x {self.price_child} children\n"
msg += f" total: {float(num_adults) * float(self.price_adult) + float(num_childs) * float(self.price_child)} $\n\n"
msg += "*Restrictions*:"
for k, v in self.restrictions.items():
msg += f"\n {k}: {v}"
return msg
def details_summary(self):
msg = f"*Details*:\n"
msg += f" -Compartment: {self.compartment}\n"
msg += f" -Baggage: {self.baggage}\n"
msg += f" -Meal: {self.meal}\n"
return msg
def construct_msg(self):
if self.flights:
msg = f"{self.travelers[0].name}, Shalom!\n\n"
msg += f"{self.deal_summary()}\n\n"
msg += f"{PLEASE_PAY_MSG_EN[self.lang]}\n\n"
msg += f"{self.flights_summary()}\n\n"
msg += f"{self.pricing_summary()}\n\n"
msg += f"{self.details_summary()}\n\n"
msg += f"{PRICE_MAY_CHANGE[self.lang]}\n\n"
msg += f"{PLEASE_PAY_AGAIN_MSG_EN[self.lang]}\n\n"
msg += f"{FAREWELL[self.lang]}\n\n"
return msg
| from consts import *
class Time:
def __init__(self, date, time_in_day):
self.date = date
self.time_in_day = time_in_day
def __str__(self):
return f"{self.date} {self.time_in_day}"
class Traveler:
def __init__(self, name, is_child):
self.name = name
self.is_child = is_child
class FlightInfo:
def __init__(self, airport_depart, airport_destination,
airline, flight_code, time_departure, time_destination):
self.airport_depart = airport_depart
self.airport_destination = airport_destination
self.flight_code = flight_code
self.airline = airline
self.time_departure = time_departure
self.time_destination = time_destination
def get_flight_desc(self, lang):
return f" {AIRLINES[self.airline]}-({self.airline}{self.flight_code})\n" \
f" {AIRPORTS[self.airport_depart]}>{AIRPORTS[self.airport_destination]}\n" \
f" {self.time_departure} - {self.time_destination}"
class PriceMessage:
def __init__(self, flights, costumers, price_adult, price_child, restrictions,
baggage, meal, compartment, language='en'):
self.travelers = costumers
self.flights = flights
self.price_adult = price_adult
self.price_child = price_child if price_child else price_adult
self.restrictions = restrictions
self.baggage = baggage
self.meal = meal
self.compartment = compartment
self.lang = language
def deal_summary(self):
"""A short text describing the trip by airports"""
if len(self.flights) == 1:
dest_str =" " + ONE_WAY[self.lang]
else:
dest_code = self.flights[-1].airport_destination
dest_str = f">{AIRPORTS[dest_code]} "
trip_str = [AIRPORTS[x.airport_depart] for x in self.flights]
msg = f"{FLIGHT_DESC[self.lang]} {ROUND_TRIP[self.lang]}\n" \
f"{'>'.join(trip_str)}{dest_str}\n"
if len(self.travelers) > 1:
msg += f"{TOGETHER[self.lang]} {','.join([x.name for x in self.travelers[1:]])}"
return msg
def flights_summary(self):
print_retour = True
msg = f"*{ITINERARY[self.lang]}*\n"
msg += f"{FLIGHT[self.lang]} {ALLER[self.lang]}\n"
for i, flight in enumerate(self.flights):
if i > 0:
if print_retour and flight.time_departure.date != self.flights[i-1].time_destination.date:
msg += f"{FLIGHT[self.lang]} {RETOUR[self.lang]}\n"
print_retour = False
msg += f"{flight.get_flight_desc(self.lang)}\n"
return msg
def pricing_summary(self):
# msg += f"Airlines\n{[x.airline for x in self.flights]}\n\n"
msg = "*Prices*:\n"
num_adults = len([x for x in self.travelers if not x.is_child])
num_childs = len([x for x in self.travelers if x.is_child])
msg += f" {num_adults} $ x {self.price_adult} adults\n"
msg += f" {num_childs} $ x {self.price_child} children\n"
msg += f" total: {float(num_adults) * float(self.price_adult) + float(num_childs) * float(self.price_child)} $\n\n"
msg += "*Restrictions*:"
for k, v in self.restrictions.items():
msg += f"\n {k}: {v}"
return msg
def details_summary(self):
msg = f"*Details*:\n"
msg += f" -Compartment: {self.compartment}\n"
msg += f" -Baggage: {self.baggage}\n"
msg += f" -Meal: {self.meal}\n"
return msg
def construct_msg(self):
if self.flights:
msg = f"{self.travelers[0].name}, Shalom!\n\n"
msg += f"{self.deal_summary()}\n\n"
msg += f"{PLEASE_PAY_MSG_EN[self.lang]}\n\n"
msg += f"{self.flights_summary()}\n\n"
msg += f"{self.pricing_summary()}\n\n"
msg += f"{self.details_summary()}\n\n"
msg += f"{PRICE_MAY_CHANGE[self.lang]}\n\n"
msg += f"{PLEASE_PAY_AGAIN_MSG_EN[self.lang]}\n\n"
msg += f"{FAREWELL[self.lang]}\n\n"
return msg
|
import glob
import random
import itertools
import numpy as np
from collections import Counter
import xml.etree.ElementTree as ET
from matplotlib import pyplot as plt
def get_path(dataset:str='Restaurant', language: str='English', mode: str='Train'):
file_type = {'Test': 'B',
'Train':'xml'}
languages = {'Restaurant':{'Dutch':'Dutch',
'English':'English',
'French':'French',
'Russian':'Russian',
'Spanish':'Spanish',
'Turkish':'Turkish',
'All':'*'},
'camera':{'German':'de',
'English':'en',
'Spanish':'es',
'French':'fr',
'Italian':'it',
'All': '*'}
}
return f'./data/{mode}/raw/{dataset}*_{languages[dataset][language]}.{file_type[mode]}'
def get_data(dataset:str='Restaurant', language: str='English', mode: str='Train') -> list:
folder_path = get_path(dataset, language, mode)
data = []
skipped = 0
multi_token = 0
single_token = 0
token_lengths = []
for path in glob.glob(folder_path):
with open(path, 'r', encoding='utf-8') as file:
tree = ET.parse(path)
reviews = tree.getroot()
for review in reviews:
datum = []
for sentence in review[0]:
opinions = []
for i, item in enumerate(sentence):
if item.tag.lower() == 'text':
if item.text != None:
text = item.text
elif 'text' in item.attrib.keys():
text = item.attrib['text']
else:
skipped += 1
if len(sentence) > 1:
item = sentence[i + 1]
for opinion in item:
opinions.append(opinion.attrib)
else:
opinions.append({"category":None, "target":None})
if len(opinion.attrib['target'].split()) > 1:
multi_token += 1
elif len(opinion.attrib['target'].split()) == 1:
single_token += 1
token_lengths.append(len(opinion.attrib['target'].split()))
if len(opinions) != 0:
if 'camera' in path.split("/")[-1]:
main_cat = 'camera'
else:
main_cat = 'restaurant'
labels = 'LABELS: ' + ' '.join([' '.join([f'CATEGORY1: {main_cat}',
f'CATEGORY2: {opinion['category']}',
f'TARGET: {opinion['target']}'])
for opinion in opinions])
data.append((text, labels))
return data
def show_plot(data:list, dataset: str='Restaurant', language='English', mode='Train'):
labels = [datum[1].split()[2::2] for datum in data]
labels = [label for label in list(itertools.chain(*labels)) if label.isupper()]
labels = Counter(labels)
if dataset == 'camera':
labels = {key:value for key, value in labels.items() if key in camera_labels}
elif dataset == 'Restaurant':
labels = {key:value for key, value in labels.items() if key in restaurant_labels}
plt.rcdefaults()
fig, ax = plt.subplots(figsize=(12, 8))
labels = {k:v for k, v in sorted(labels.items())}
keys = values = ['#\n'.join(key.split('#')) for key in list(labels.keys())]
values = list(labels.values())
y_pos = np.arange(len(keys))
ax.barh(y_pos, values, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(keys)
ax.invert_yaxis()
ax.set_xlabel('Count')
for i, v in enumerate(labels.values()):
ax.text(x=i-.35, y=v+1, s=str(v), color='black', fontsize=12)
title = f'{dataset} {mode} {language}'
ax.set_title(title)
plt.tight_layout()
plt.savefig(f'Figures/{dataset}_{language}_{mode}_Labels.png', format='png')
def plot_label_distribution(dataset:str='Restaurant', language: str='English', mode: str='Train'):
data = get_data(dataset, language, mode)
show_plot(data, dataset, language, mode)
print('Done!')
camera_labels = ['IMAGING', 'GENERAL', 'OTHER', 'PRICE', 'DISPLAY', 'EXPOSURE', 'DIMENSION', 'LENS', 'BATTERY', 'VIDEO', 'ZOOM', 'PERFORMANCE', 'MEMORY']
restaurant_labels = ['RESTAURANT#GENERAL', 'NULL', 'SERVICE#GENERAL', 'AMBIENCE#GENERAL', 'FOOD#QUALITY', 'FOOD#PRICES', 'RESTAURANT#PRICES', 'FOOD#STYLE_OPTIONS', 'DRINKS#QUALITY', 'DRINKS#STYLE_OPTIONS', 'LOCATION#GENERAL', 'DRINKS#PRICES', 'RESTAURANT#MISCELLANEOUS']
plot_label_distribution(dataset='Restaurant', language='Dutch', mode='Train')
| import glob
import random
import itertools
import numpy as np
from collections import Counter
import xml.etree.ElementTree as ET
from matplotlib import pyplot as plt
def get_path(dataset:str='Restaurant', language: str='English', mode: str='Train'):
file_type = {'Test': 'B',
'Train':'xml'}
languages = {'Restaurant':{'Dutch':'Dutch',
'English':'English',
'French':'French',
'Russian':'Russian',
'Spanish':'Spanish',
'Turkish':'Turkish',
'All':'*'},
'camera':{'German':'de',
'English':'en',
'Spanish':'es',
'French':'fr',
'Italian':'it',
'All': '*'}
}
return f'./data/{mode}/raw/{dataset}*_{languages[dataset][language]}.{file_type[mode]}'
def get_data(dataset:str='Restaurant', language: str='English', mode: str='Train') -> list:
folder_path = get_path(dataset, language, mode)
data = []
skipped = 0
multi_token = 0
single_token = 0
token_lengths = []
for path in glob.glob(folder_path):
with open(path, 'r', encoding='utf-8') as file:
tree = ET.parse(path)
reviews = tree.getroot()
for review in reviews:
datum = []
for sentence in review[0]:
opinions = []
for i, item in enumerate(sentence):
if item.tag.lower() == 'text':
if item.text != None:
text = item.text
elif 'text' in item.attrib.keys():
text = item.attrib['text']
else:
skipped += 1
if len(sentence) > 1:
item = sentence[i + 1]
for opinion in item:
opinions.append(opinion.attrib)
else:
opinions.append({"category":None, "target":None})
if len(opinion.attrib['target'].split()) > 1:
multi_token += 1
elif len(opinion.attrib['target'].split()) == 1:
single_token += 1
token_lengths.append(len(opinion.attrib['target'].split()))
if len(opinions) != 0:
if 'camera' in path.split("/")[-1]:
main_cat = 'camera'
else:
main_cat = 'restaurant'
labels = 'LABELS: ' + ' '.join([' '.join([f'CATEGORY1: {main_cat}',
f'CATEGORY2: {opinion["category"]}',
f'TARGET: {opinion["target"]}'])
for opinion in opinions])
data.append((text, labels))
return data
def show_plot(data:list, dataset: str='Restaurant', language='English', mode='Train'):
labels = [datum[1].split()[2::2] for datum in data]
labels = [label for label in list(itertools.chain(*labels)) if label.isupper()]
labels = Counter(labels)
if dataset == 'camera':
labels = {key:value for key, value in labels.items() if key in camera_labels}
elif dataset == 'Restaurant':
labels = {key:value for key, value in labels.items() if key in restaurant_labels}
plt.rcdefaults()
fig, ax = plt.subplots(figsize=(12, 8))
labels = {k:v for k, v in sorted(labels.items())}
keys = values = ['#\n'.join(key.split('#')) for key in list(labels.keys())]
values = list(labels.values())
y_pos = np.arange(len(keys))
ax.barh(y_pos, values, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(keys)
ax.invert_yaxis()
ax.set_xlabel('Count')
for i, v in enumerate(labels.values()):
ax.text(x=i-.35, y=v+1, s=str(v), color='black', fontsize=12)
title = f'{dataset} {mode} {language}'
ax.set_title(title)
plt.tight_layout()
plt.savefig(f'Figures/{dataset}_{language}_{mode}_Labels.png', format='png')
def plot_label_distribution(dataset:str='Restaurant', language: str='English', mode: str='Train'):
data = get_data(dataset, language, mode)
show_plot(data, dataset, language, mode)
print('Done!')
camera_labels = ['IMAGING', 'GENERAL', 'OTHER', 'PRICE', 'DISPLAY', 'EXPOSURE', 'DIMENSION', 'LENS', 'BATTERY', 'VIDEO', 'ZOOM', 'PERFORMANCE', 'MEMORY']
restaurant_labels = ['RESTAURANT#GENERAL', 'NULL', 'SERVICE#GENERAL', 'AMBIENCE#GENERAL', 'FOOD#QUALITY', 'FOOD#PRICES', 'RESTAURANT#PRICES', 'FOOD#STYLE_OPTIONS', 'DRINKS#QUALITY', 'DRINKS#STYLE_OPTIONS', 'LOCATION#GENERAL', 'DRINKS#PRICES', 'RESTAURANT#MISCELLANEOUS']
plot_label_distribution(dataset='Restaurant', language='Dutch', mode='Train')
|
class Switch(object):
def __init__(self, session):
super(Switch, self).__init__()
self._session = session
def getDeviceSwitchPorts(self, serial: str):
"""
**List the switch ports for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-ports
- serial (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'ports'],
'operation': 'getDeviceSwitchPorts'
}
resource = f'/devices/{serial}/switch/ports'
return self._session.get(metadata, resource)
def cycleDeviceSwitchPorts(self, serial: str, ports: list):
"""
**Cycle a set of switch ports**
https://developer.cisco.com/meraki/api-v1/#!cycle-device-switch-ports
- serial (string): (required)
- ports (array): List of switch ports. Example: [1, 2-5, 1_MA-MOD-8X10G_1, 1_MA-MOD-8X10G_2-1_MA-MOD-8X10G_8]
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'liveTools', 'ports'],
'operation': 'cycleDeviceSwitchPorts'
}
resource = f'/devices/{serial}/switch/ports/cycle'
body_params = ['ports', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getDeviceSwitchPortsStatuses(self, serial: str, **kwargs):
"""
**Return the status for all the ports of a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-ports-statuses
- serial (string): (required)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 31 days from today.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameter t0. The value must be in seconds and be less than or equal to 31 days. The default is 1 day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'monitor', 'ports', 'statuses'],
'operation': 'getDeviceSwitchPortsStatuses'
}
resource = f'/devices/{serial}/switch/ports/statuses'
query_params = ['t0', 'timespan', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getDeviceSwitchPortsStatusesPackets(self, serial: str, **kwargs):
"""
**Return the packet counters for all the ports of a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-ports-statuses-packets
- serial (string): (required)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 1 day from today.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameter t0. The value must be in seconds and be less than or equal to 1 day. The default is 1 day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'monitor', 'ports', 'statuses', 'packets'],
'operation': 'getDeviceSwitchPortsStatusesPackets'
}
resource = f'/devices/{serial}/switch/ports/statuses/packets'
query_params = ['t0', 'timespan', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getDeviceSwitchPort(self, serial: str, portId: str):
"""
**Return a switch port**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-port
- serial (string): (required)
- portId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'ports'],
'operation': 'getDeviceSwitchPort'
}
resource = f'/devices/{serial}/switch/ports/{portId}'
return self._session.get(metadata, resource)
def updateDeviceSwitchPort(self, serial: str, portId: str, **kwargs):
"""
**Update a switch port**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-port
- serial (string): (required)
- portId (string): (required)
- name (string): The name of the switch port
- tags (array): The list of tags of the switch port
- enabled (boolean): The status of the switch port
- type (string): The type of the switch port ('trunk' or 'access')
- vlan (integer): The VLAN of the switch port. A null value will clear the value set for trunk ports.
- voiceVlan (integer): The voice VLAN of the switch port. Only applicable to access ports.
- allowedVlans (string): The VLANs allowed on the switch port. Only applicable to trunk ports.
- poeEnabled (boolean): The PoE status of the switch port
- isolationEnabled (boolean): The isolation status of the switch port
- rstpEnabled (boolean): The rapid spanning tree protocol status
- stpGuard (string): The state of the STP guard ('disabled', 'root guard', 'bpdu guard' or 'loop guard')
- linkNegotiation (string): The link speed for the switch port
- portScheduleId (string): The ID of the port schedule. A value of null will clear the port schedule.
- udld (string): The action to take when Unidirectional Link is detected (Alert only, Enforce). Default configuration is Alert only.
- accessPolicyType (string): The type of the access policy of the switch port. Only applicable to access ports. Can be one of 'Open', 'Custom access policy', 'MAC allow list' or 'Sticky MAC allow list'
- accessPolicyNumber (integer): The number of a custom access policy to configure on the switch port. Only applicable when 'accessPolicyType' is 'Custom access policy'
- macAllowList (array): Only devices with MAC addresses specified in this list will have access to this port. Up to 20 MAC addresses can be defined. Only applicable when 'accessPolicyType' is 'MAC allow list'
- stickyMacAllowList (array): The initial list of MAC addresses for sticky Mac allow list. Only applicable when 'accessPolicyType' is 'Sticky MAC allow list'
- stickyMacAllowListLimit (integer): The maximum number of MAC addresses for sticky MAC allow list. Only applicable when 'accessPolicyType' is 'Sticky MAC allow list'
- stormControlEnabled (boolean): The storm control status of the switch port
- adaptivePolicyGroupId (string): The adaptive policy group ID that will be used to tag traffic through this switch port. This ID must pre-exist during the configuration, else needs to be created using adaptivePolicy/groups API. Cannot be applied to a port on a switch bound to profile.
- peerSgtCapable (boolean): If true, Peer SGT is enabled for traffic through this switch port. Applicable to trunk port only, not access port.
Cannot be applied to a port on a switch bound to profile.
- flexibleStackingEnabled (boolean): For supported switches (e.g. MS420/MS425), whether or not the port has flexible stacking enabled.
"""
kwargs.update(locals())
if 'type' in kwargs:
options = ['trunk', 'access']
assert kwargs['type'] in options, f'''"type" cannot be "{kwargs['type']}", & must be set to one of: {options}'''
if 'stpGuard' in kwargs:
options = ['disabled', 'root guard', 'bpdu guard', 'loop guard']
assert kwargs['stpGuard'] in options, f'''"stpGuard" cannot be "{kwargs['stpGuard']}", & must be set to one of: {options}'''
if 'udld' in kwargs:
options = ['Alert only', 'Enforce']
assert kwargs['udld'] in options, f'''"udld" cannot be "{kwargs['udld']}", & must be set to one of: {options}'''
if 'accessPolicyType' in kwargs:
options = ['Open', 'Custom access policy', 'MAC allow list', 'Sticky MAC allow list']
assert kwargs['accessPolicyType'] in options, f'''"accessPolicyType" cannot be "{kwargs['accessPolicyType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'ports'],
'operation': 'updateDeviceSwitchPort'
}
resource = f'/devices/{serial}/switch/ports/{portId}'
body_params = ['name', 'tags', 'enabled', 'type', 'vlan', 'voiceVlan', 'allowedVlans', 'poeEnabled', 'isolationEnabled', 'rstpEnabled', 'stpGuard', 'linkNegotiation', 'portScheduleId', 'udld', 'accessPolicyType', 'accessPolicyNumber', 'macAllowList', 'stickyMacAllowList', 'stickyMacAllowListLimit', 'stormControlEnabled', 'adaptivePolicyGroupId', 'peerSgtCapable', 'flexibleStackingEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getDeviceSwitchRoutingInterfaces(self, serial: str):
"""
**List layer 3 interfaces for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-interfaces
- serial (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'getDeviceSwitchRoutingInterfaces'
}
resource = f'/devices/{serial}/switch/routing/interfaces'
return self._session.get(metadata, resource)
def createDeviceSwitchRoutingInterface(self, serial: str, name: str, vlanId: int, **kwargs):
"""
**Create a layer 3 interface for a switch**
https://developer.cisco.com/meraki/api-v1/#!create-device-switch-routing-interface
- serial (string): (required)
- name (string): A friendly name or description for the interface or VLAN.
- vlanId (integer): The VLAN this routed interface is on. VLAN must be between 1 and 4094.
- subnet (string): The network that this routed interface is on, in CIDR notation (ex. 10.1.1.0/24).
- interfaceIp (string): The IP address this switch will use for layer 3 routing on this VLAN or subnet. This cannot be the same as the switch's management IP.
- multicastRouting (string): Enable multicast support if, multicast routing between VLANs is required. Options are, 'disabled', 'enabled' or 'IGMP snooping querier'. Default is 'disabled'.
- defaultGateway (string): The next hop for any traffic that isn't going to a directly connected subnet or over a static route. This IP address must exist in a subnet with a routed interface.
- ospfSettings (object): The OSPF routing settings of the interface.
- ipv6 (object): The IPv6 settings of the interface.
"""
kwargs.update(locals())
if 'multicastRouting' in kwargs:
options = ['disabled', 'enabled', 'IGMP snooping querier']
assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'createDeviceSwitchRoutingInterface'
}
resource = f'/devices/{serial}/switch/routing/interfaces'
body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'defaultGateway', 'ospfSettings', 'ipv6', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getDeviceSwitchRoutingInterface(self, serial: str, interfaceId: str):
"""
**Return a layer 3 interface for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-interface
- serial (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'getDeviceSwitchRoutingInterface'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}'
return self._session.get(metadata, resource)
def updateDeviceSwitchRoutingInterface(self, serial: str, interfaceId: str, **kwargs):
"""
**Update a layer 3 interface for a switch**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-routing-interface
- serial (string): (required)
- interfaceId (string): (required)
- name (string): A friendly name or description for the interface or VLAN.
- subnet (string): The network that this routed interface is on, in CIDR notation (ex. 10.1.1.0/24).
- interfaceIp (string): The IP address this switch will use for layer 3 routing on this VLAN or subnet. This cannot be the same as the switch's management IP.
- multicastRouting (string): Enable multicast support if, multicast routing between VLANs is required. Options are, 'disabled', 'enabled' or 'IGMP snooping querier'.
- vlanId (integer): The VLAN this routed interface is on. VLAN must be between 1 and 4094.
- defaultGateway (string): The next hop for any traffic that isn't going to a directly connected subnet or over a static route. This IP address must exist in a subnet with a routed interface.
- ospfSettings (object): The OSPF routing settings of the interface.
- ipv6 (object): The IPv6 settings of the interface.
"""
kwargs.update(locals())
if 'multicastRouting' in kwargs:
options = ['disabled', 'enabled', 'IGMP snooping querier']
assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'updateDeviceSwitchRoutingInterface'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}'
body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'defaultGateway', 'ospfSettings', 'ipv6', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteDeviceSwitchRoutingInterface(self, serial: str, interfaceId: str):
"""
**Delete a layer 3 interface from the switch**
https://developer.cisco.com/meraki/api-v1/#!delete-device-switch-routing-interface
- serial (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'deleteDeviceSwitchRoutingInterface'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}'
return self._session.delete(metadata, resource)
def getDeviceSwitchRoutingInterfaceDhcp(self, serial: str, interfaceId: str):
"""
**Return a layer 3 interface DHCP configuration for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-interface-dhcp
- serial (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces', 'dhcp'],
'operation': 'getDeviceSwitchRoutingInterfaceDhcp'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}/dhcp'
return self._session.get(metadata, resource)
def updateDeviceSwitchRoutingInterfaceDhcp(self, serial: str, interfaceId: str, **kwargs):
"""
**Update a layer 3 interface DHCP configuration for a switch**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-routing-interface-dhcp
- serial (string): (required)
- interfaceId (string): (required)
- dhcpMode (string): The DHCP mode options for the switch interface ('dhcpDisabled', 'dhcpRelay' or 'dhcpServer')
- dhcpRelayServerIps (array): The DHCP relay server IPs to which DHCP packets would get relayed for the switch interface
- dhcpLeaseTime (string): The DHCP lease time config for the dhcp server running on switch interface ('30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week')
- dnsNameserversOption (string): The DHCP name server option for the dhcp server running on the switch interface ('googlePublicDns', 'openDns' or 'custom')
- dnsCustomNameservers (array): The DHCP name server IPs when DHCP name server option is 'custom'
- bootOptionsEnabled (boolean): Enable DHCP boot options to provide PXE boot options configs for the dhcp server running on the switch interface
- bootNextServer (string): The PXE boot server IP for the DHCP server running on the switch interface
- bootFileName (string): The PXE boot server filename for the DHCP server running on the switch interface
- dhcpOptions (array): Array of DHCP options consisting of code, type and value for the DHCP server running on the switch interface
- reservedIpRanges (array): Array of DHCP reserved IP assignments for the DHCP server running on the switch interface
- fixedIpAssignments (array): Array of DHCP fixed IP assignments for the DHCP server running on the switch interface
"""
kwargs.update(locals())
if 'dhcpMode' in kwargs:
options = ['dhcpDisabled', 'dhcpRelay', 'dhcpServer']
assert kwargs['dhcpMode'] in options, f'''"dhcpMode" cannot be "{kwargs['dhcpMode']}", & must be set to one of: {options}'''
if 'dhcpLeaseTime' in kwargs:
options = ['30 minutes', '1 hour', '4 hours', '12 hours', '1 day', '1 week']
assert kwargs['dhcpLeaseTime'] in options, f'''"dhcpLeaseTime" cannot be "{kwargs['dhcpLeaseTime']}", & must be set to one of: {options}'''
if 'dnsNameserversOption' in kwargs:
options = ['googlePublicDns', 'openDns', 'custom']
assert kwargs['dnsNameserversOption'] in options, f'''"dnsNameserversOption" cannot be "{kwargs['dnsNameserversOption']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces', 'dhcp'],
'operation': 'updateDeviceSwitchRoutingInterfaceDhcp'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}/dhcp'
body_params = ['dhcpMode', 'dhcpRelayServerIps', 'dhcpLeaseTime', 'dnsNameserversOption', 'dnsCustomNameservers', 'bootOptionsEnabled', 'bootNextServer', 'bootFileName', 'dhcpOptions', 'reservedIpRanges', 'fixedIpAssignments', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getDeviceSwitchRoutingStaticRoutes(self, serial: str):
"""
**List layer 3 static routes for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-static-routes
- serial (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'getDeviceSwitchRoutingStaticRoutes'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes'
return self._session.get(metadata, resource)
def createDeviceSwitchRoutingStaticRoute(self, serial: str, subnet: str, nextHopIp: str, **kwargs):
"""
**Create a layer 3 static route for a switch**
https://developer.cisco.com/meraki/api-v1/#!create-device-switch-routing-static-route
- serial (string): (required)
- subnet (string): The subnet which is routed via this static route and should be specified in CIDR notation (ex. 1.2.3.0/24)
- nextHopIp (string): IP address of the next hop device to which the device sends its traffic for the subnet
- name (string): Name or description for layer 3 static route
- advertiseViaOspfEnabled (boolean): Option to advertise static route via OSPF
- preferOverOspfRoutesEnabled (boolean): Option to prefer static route over OSPF routes
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'createDeviceSwitchRoutingStaticRoute'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes'
body_params = ['name', 'subnet', 'nextHopIp', 'advertiseViaOspfEnabled', 'preferOverOspfRoutesEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getDeviceSwitchRoutingStaticRoute(self, serial: str, staticRouteId: str):
"""
**Return a layer 3 static route for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-static-route
- serial (string): (required)
- staticRouteId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'getDeviceSwitchRoutingStaticRoute'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes/{staticRouteId}'
return self._session.get(metadata, resource)
def updateDeviceSwitchRoutingStaticRoute(self, serial: str, staticRouteId: str, **kwargs):
"""
**Update a layer 3 static route for a switch**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-routing-static-route
- serial (string): (required)
- staticRouteId (string): (required)
- name (string): Name or description for layer 3 static route
- subnet (string): The subnet which is routed via this static route and should be specified in CIDR notation (ex. 1.2.3.0/24)
- nextHopIp (string): IP address of the next hop device to which the device sends its traffic for the subnet
- advertiseViaOspfEnabled (boolean): Option to advertise static route via OSPF
- preferOverOspfRoutesEnabled (boolean): Option to prefer static route over OSPF routes
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'updateDeviceSwitchRoutingStaticRoute'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes/{staticRouteId}'
body_params = ['name', 'subnet', 'nextHopIp', 'advertiseViaOspfEnabled', 'preferOverOspfRoutesEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteDeviceSwitchRoutingStaticRoute(self, serial: str, staticRouteId: str):
"""
**Delete a layer 3 static route for a switch**
https://developer.cisco.com/meraki/api-v1/#!delete-device-switch-routing-static-route
- serial (string): (required)
- staticRouteId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'deleteDeviceSwitchRoutingStaticRoute'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes/{staticRouteId}'
return self._session.delete(metadata, resource)
def getDeviceSwitchWarmSpare(self, serial: str):
"""
**Return warm spare configuration for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-warm-spare
- serial (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'warmSpare'],
'operation': 'getDeviceSwitchWarmSpare'
}
resource = f'/devices/{serial}/switch/warmSpare'
return self._session.get(metadata, resource)
def updateDeviceSwitchWarmSpare(self, serial: str, enabled: bool, **kwargs):
"""
**Update warm spare configuration for a switch**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-warm-spare
- serial (string): (required)
- enabled (boolean): Enable or disable warm spare for a switch
- spareSerial (string): Serial number of the warm spare switch
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'warmSpare'],
'operation': 'updateDeviceSwitchWarmSpare'
}
resource = f'/devices/{serial}/switch/warmSpare'
body_params = ['enabled', 'spareSerial', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchAccessControlLists(self, networkId: str):
"""
**Return the access control lists for a MS network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-access-control-lists
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'accessControlLists'],
'operation': 'getNetworkSwitchAccessControlLists'
}
resource = f'/networks/{networkId}/switch/accessControlLists'
return self._session.get(metadata, resource)
def updateNetworkSwitchAccessControlLists(self, networkId: str, rules: list):
"""
**Update the access control lists for a MS network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-access-control-lists
- networkId (string): (required)
- rules (array): An ordered array of the access control list rules (not including the default rule). An empty array will clear the rules.
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'accessControlLists'],
'operation': 'updateNetworkSwitchAccessControlLists'
}
resource = f'/networks/{networkId}/switch/accessControlLists'
body_params = ['rules', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchAccessPolicies(self, networkId: str):
"""
**List the access policies for a switch network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-access-policies
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'getNetworkSwitchAccessPolicies'
}
resource = f'/networks/{networkId}/switch/accessPolicies'
return self._session.get(metadata, resource)
def createNetworkSwitchAccessPolicy(self, networkId: str, name: str, radiusServers: list, radiusTestingEnabled: bool, radiusCoaSupportEnabled: bool, radiusAccountingEnabled: bool, hostMode: str, urlRedirectWalledGardenEnabled: bool, **kwargs):
"""
**Create an access policy for a switch network**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-access-policy
- networkId (string): (required)
- name (string): Name of the access policy
- radiusServers (array): List of RADIUS servers to require connecting devices to authenticate against before granting network access
- radiusTestingEnabled (boolean): If enabled, Meraki devices will periodically send access-request messages to these RADIUS servers
- radiusCoaSupportEnabled (boolean): Change of authentication for RADIUS re-authentication and disconnection
- radiusAccountingEnabled (boolean): Enable to send start, interim-update and stop messages to a configured RADIUS accounting server for tracking connected clients
- hostMode (string): Choose the Host Mode for the access policy.
- urlRedirectWalledGardenEnabled (boolean): Enable to restrict access for clients to a specific set of IP addresses or hostnames prior to authentication
- radius (object): Object for RADIUS Settings
- radiusAccountingServers (array): List of RADIUS accounting servers to require connecting devices to authenticate against before granting network access
- radiusGroupAttribute (string): Acceptable values are `""` for None, or `"11"` for Group Policies ACL
- accessPolicyType (string): Access Type of the policy. Automatically 'Hybrid authentication' when hostMode is 'Multi-Domain'.
- increaseAccessSpeed (boolean): Enabling this option will make switches execute 802.1X and MAC-bypass authentication simultaneously so that clients authenticate faster. Only required when accessPolicyType is 'Hybrid Authentication.
- guestVlanId (integer): ID for the guest VLAN allow unauthorized devices access to limited network resources
- voiceVlanClients (boolean): CDP/LLDP capable voice clients will be able to use this VLAN. Automatically true when hostMode is 'Multi-Domain'.
- urlRedirectWalledGardenRanges (array): IP address ranges, in CIDR notation, to restrict access for clients to a specific set of IP addresses or hostnames prior to authentication
"""
kwargs.update(locals())
if 'hostMode' in kwargs:
options = ['Single-Host', 'Multi-Domain', 'Multi-Host', 'Multi-Auth']
assert kwargs['hostMode'] in options, f'''"hostMode" cannot be "{kwargs['hostMode']}", & must be set to one of: {options}'''
if 'accessPolicyType' in kwargs:
options = ['802.1x', 'MAC authentication bypass', 'Hybrid authentication']
assert kwargs['accessPolicyType'] in options, f'''"accessPolicyType" cannot be "{kwargs['accessPolicyType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'createNetworkSwitchAccessPolicy'
}
resource = f'/networks/{networkId}/switch/accessPolicies'
body_params = ['name', 'radiusServers', 'radius', 'radiusTestingEnabled', 'radiusCoaSupportEnabled', 'radiusAccountingEnabled', 'radiusAccountingServers', 'radiusGroupAttribute', 'hostMode', 'accessPolicyType', 'increaseAccessSpeed', 'guestVlanId', 'voiceVlanClients', 'urlRedirectWalledGardenEnabled', 'urlRedirectWalledGardenRanges', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchAccessPolicy(self, networkId: str, accessPolicyNumber: str):
"""
**Return a specific access policy for a switch network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-access-policy
- networkId (string): (required)
- accessPolicyNumber (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'getNetworkSwitchAccessPolicy'
}
resource = f'/networks/{networkId}/switch/accessPolicies/{accessPolicyNumber}'
return self._session.get(metadata, resource)
def updateNetworkSwitchAccessPolicy(self, networkId: str, accessPolicyNumber: str, **kwargs):
"""
**Update an access policy for a switch network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-access-policy
- networkId (string): (required)
- accessPolicyNumber (string): (required)
- name (string): Name of the access policy
- radiusServers (array): List of RADIUS servers to require connecting devices to authenticate against before granting network access
- radius (object): Object for RADIUS Settings
- radiusTestingEnabled (boolean): If enabled, Meraki devices will periodically send access-request messages to these RADIUS servers
- radiusCoaSupportEnabled (boolean): Change of authentication for RADIUS re-authentication and disconnection
- radiusAccountingEnabled (boolean): Enable to send start, interim-update and stop messages to a configured RADIUS accounting server for tracking connected clients
- radiusAccountingServers (array): List of RADIUS accounting servers to require connecting devices to authenticate against before granting network access
- radiusGroupAttribute (string): Can be either `""`, which means `None` on Dashboard, or `"11"`, which means `Filter-Id` on Dashboard and will use Group Policy ACLs when supported (firmware 14+)
- hostMode (string): Choose the Host Mode for the access policy.
- accessPolicyType (string): Access Type of the policy. Automatically 'Hybrid authentication' when hostMode is 'Multi-Domain'.
- increaseAccessSpeed (boolean): Enabling this option will make switches execute 802.1X and MAC-bypass authentication simultaneously so that clients authenticate faster. Only required when accessPolicyType is 'Hybrid Authentication.
- guestVlanId (integer): ID for the guest VLAN allow unauthorized devices access to limited network resources
- voiceVlanClients (boolean): CDP/LLDP capable voice clients will be able to use this VLAN. Automatically true when hostMode is 'Multi-Domain'.
- urlRedirectWalledGardenEnabled (boolean): Enable to restrict access for clients to a specific set of IP addresses or hostnames prior to authentication
- urlRedirectWalledGardenRanges (array): IP address ranges, in CIDR notation, to restrict access for clients to a specific set of IP addresses or hostnames prior to authentication
"""
kwargs.update(locals())
if 'hostMode' in kwargs:
options = ['Single-Host', 'Multi-Domain', 'Multi-Host', 'Multi-Auth']
assert kwargs['hostMode'] in options, f'''"hostMode" cannot be "{kwargs['hostMode']}", & must be set to one of: {options}'''
if 'accessPolicyType' in kwargs:
options = ['802.1x', 'MAC authentication bypass', 'Hybrid authentication']
assert kwargs['accessPolicyType'] in options, f'''"accessPolicyType" cannot be "{kwargs['accessPolicyType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'updateNetworkSwitchAccessPolicy'
}
resource = f'/networks/{networkId}/switch/accessPolicies/{accessPolicyNumber}'
body_params = ['name', 'radiusServers', 'radius', 'radiusTestingEnabled', 'radiusCoaSupportEnabled', 'radiusAccountingEnabled', 'radiusAccountingServers', 'radiusGroupAttribute', 'hostMode', 'accessPolicyType', 'increaseAccessSpeed', 'guestVlanId', 'voiceVlanClients', 'urlRedirectWalledGardenEnabled', 'urlRedirectWalledGardenRanges', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkSwitchAccessPolicy(self, networkId: str, accessPolicyNumber: str):
"""
**Delete an access policy for a switch network**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-access-policy
- networkId (string): (required)
- accessPolicyNumber (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'deleteNetworkSwitchAccessPolicy'
}
resource = f'/networks/{networkId}/switch/accessPolicies/{accessPolicyNumber}'
return self._session.delete(metadata, resource)
def getNetworkSwitchAlternateManagementInterface(self, networkId: str):
"""
**Return the switch alternate management interface for the network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-alternate-management-interface
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'alternateManagementInterface'],
'operation': 'getNetworkSwitchAlternateManagementInterface'
}
resource = f'/networks/{networkId}/switch/alternateManagementInterface'
return self._session.get(metadata, resource)
def updateNetworkSwitchAlternateManagementInterface(self, networkId: str, **kwargs):
"""
**Update the switch alternate management interface for the network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-alternate-management-interface
- networkId (string): (required)
- enabled (boolean): Boolean value to enable or disable AMI configuration. If enabled, VLAN and protocols must be set
- vlanId (integer): Alternate management VLAN, must be between 1 and 4094
- protocols (array): Can be one or more of the following values: 'radius', 'snmp' or 'syslog'
- switches (array): Array of switch serial number and IP assignment. If parameter is present, it cannot have empty body. Note: switches parameter is not applicable for template networks, in other words, do not put 'switches' in the body when updating template networks. Also, an empty 'switches' array will remove all previous assignments
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'alternateManagementInterface'],
'operation': 'updateNetworkSwitchAlternateManagementInterface'
}
resource = f'/networks/{networkId}/switch/alternateManagementInterface'
body_params = ['enabled', 'vlanId', 'protocols', 'switches', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchDhcpServerPolicy(self, networkId: str):
"""
**Return the DHCP server policy**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-dhcp-server-policy
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'dhcpServerPolicy'],
'operation': 'getNetworkSwitchDhcpServerPolicy'
}
resource = f'/networks/{networkId}/switch/dhcpServerPolicy'
return self._session.get(metadata, resource)
def updateNetworkSwitchDhcpServerPolicy(self, networkId: str, **kwargs):
"""
**Update the DHCP server policy**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-dhcp-server-policy
- networkId (string): (required)
- defaultPolicy (string): 'allow' or 'block' new DHCP servers. Default value is 'allow'.
- allowedServers (array): List the MAC addresses of DHCP servers to permit on the network. Applicable only if defaultPolicy is set to block. An empty array will clear the entries.
- blockedServers (array): List the MAC addresses of DHCP servers to block on the network. Applicable only if defaultPolicy is set to allow. An empty array will clear the entries.
"""
kwargs.update(locals())
if 'defaultPolicy' in kwargs:
options = ['allow', 'block']
assert kwargs['defaultPolicy'] in options, f'''"defaultPolicy" cannot be "{kwargs['defaultPolicy']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'dhcpServerPolicy'],
'operation': 'updateNetworkSwitchDhcpServerPolicy'
}
resource = f'/networks/{networkId}/switch/dhcpServerPolicy'
body_params = ['defaultPolicy', 'allowedServers', 'blockedServers', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchDscpToCosMappings(self, networkId: str):
"""
**Return the DSCP to CoS mappings**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-dscp-to-cos-mappings
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'dscpToCosMappings'],
'operation': 'getNetworkSwitchDscpToCosMappings'
}
resource = f'/networks/{networkId}/switch/dscpToCosMappings'
return self._session.get(metadata, resource)
def updateNetworkSwitchDscpToCosMappings(self, networkId: str, mappings: list):
"""
**Update the DSCP to CoS mappings**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-dscp-to-cos-mappings
- networkId (string): (required)
- mappings (array): An array of DSCP to CoS mappings. An empty array will reset the mappings to default.
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'dscpToCosMappings'],
'operation': 'updateNetworkSwitchDscpToCosMappings'
}
resource = f'/networks/{networkId}/switch/dscpToCosMappings'
body_params = ['mappings', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchLinkAggregations(self, networkId: str):
"""
**List link aggregation groups**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-link-aggregations
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'getNetworkSwitchLinkAggregations'
}
resource = f'/networks/{networkId}/switch/linkAggregations'
return self._session.get(metadata, resource)
def createNetworkSwitchLinkAggregation(self, networkId: str, **kwargs):
"""
**Create a link aggregation group**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-link-aggregation
- networkId (string): (required)
- switchPorts (array): Array of switch or stack ports for creating aggregation group. Minimum 2 and maximum 8 ports are supported.
- switchProfilePorts (array): Array of switch profile ports for creating aggregation group. Minimum 2 and maximum 8 ports are supported.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'createNetworkSwitchLinkAggregation'
}
resource = f'/networks/{networkId}/switch/linkAggregations'
body_params = ['switchPorts', 'switchProfilePorts', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def updateNetworkSwitchLinkAggregation(self, networkId: str, linkAggregationId: str, **kwargs):
"""
**Update a link aggregation group**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-link-aggregation
- networkId (string): (required)
- linkAggregationId (string): (required)
- switchPorts (array): Array of switch or stack ports for updating aggregation group. Minimum 2 and maximum 8 ports are supported.
- switchProfilePorts (array): Array of switch profile ports for updating aggregation group. Minimum 2 and maximum 8 ports are supported.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'updateNetworkSwitchLinkAggregation'
}
resource = f'/networks/{networkId}/switch/linkAggregations/{linkAggregationId}'
body_params = ['switchPorts', 'switchProfilePorts', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkSwitchLinkAggregation(self, networkId: str, linkAggregationId: str):
"""
**Split a link aggregation group into separate ports**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-link-aggregation
- networkId (string): (required)
- linkAggregationId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'deleteNetworkSwitchLinkAggregation'
}
resource = f'/networks/{networkId}/switch/linkAggregations/{linkAggregationId}'
return self._session.delete(metadata, resource)
def getNetworkSwitchMtu(self, networkId: str):
"""
**Return the MTU configuration**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-mtu
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'mtu'],
'operation': 'getNetworkSwitchMtu'
}
resource = f'/networks/{networkId}/switch/mtu'
return self._session.get(metadata, resource)
def updateNetworkSwitchMtu(self, networkId: str, **kwargs):
"""
**Update the MTU configuration**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-mtu
- networkId (string): (required)
- defaultMtuSize (integer): MTU size for the entire network. Default value is 9578.
- overrides (array): Override MTU size for individual switches or switch profiles. An empty array will clear overrides.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'mtu'],
'operation': 'updateNetworkSwitchMtu'
}
resource = f'/networks/{networkId}/switch/mtu'
body_params = ['defaultMtuSize', 'overrides', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchPortSchedules(self, networkId: str):
"""
**List switch port schedules**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-port-schedules
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'getNetworkSwitchPortSchedules'
}
resource = f'/networks/{networkId}/switch/portSchedules'
return self._session.get(metadata, resource)
def createNetworkSwitchPortSchedule(self, networkId: str, name: str, **kwargs):
"""
**Add a switch port schedule**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-port-schedule
- networkId (string): (required)
- name (string): The name for your port schedule. Required
- portSchedule (object): The schedule for switch port scheduling. Schedules are applied to days of the week.
When it's empty, default schedule with all days of a week are configured.
Any unspecified day in the schedule is added as a default schedule configuration of the day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'createNetworkSwitchPortSchedule'
}
resource = f'/networks/{networkId}/switch/portSchedules'
body_params = ['name', 'portSchedule', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def deleteNetworkSwitchPortSchedule(self, networkId: str, portScheduleId: str):
"""
**Delete a switch port schedule**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-port-schedule
- networkId (string): (required)
- portScheduleId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'deleteNetworkSwitchPortSchedule'
}
resource = f'/networks/{networkId}/switch/portSchedules/{portScheduleId}'
return self._session.delete(metadata, resource)
def updateNetworkSwitchPortSchedule(self, networkId: str, portScheduleId: str, **kwargs):
"""
**Update a switch port schedule**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-port-schedule
- networkId (string): (required)
- portScheduleId (string): (required)
- name (string): The name for your port schedule.
- portSchedule (object): The schedule for switch port scheduling. Schedules are applied to days of the week.
When it's empty, default schedule with all days of a week are configured.
Any unspecified day in the schedule is added as a default schedule configuration of the day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'updateNetworkSwitchPortSchedule'
}
resource = f'/networks/{networkId}/switch/portSchedules/{portScheduleId}'
body_params = ['name', 'portSchedule', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchQosRules(self, networkId: str):
"""
**List quality of service rules**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-qos-rules
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'getNetworkSwitchQosRules'
}
resource = f'/networks/{networkId}/switch/qosRules'
return self._session.get(metadata, resource)
def createNetworkSwitchQosRule(self, networkId: str, vlan: int, **kwargs):
"""
**Add a quality of service rule**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-qos-rule
- networkId (string): (required)
- vlan (integer): The VLAN of the incoming packet. A null value will match any VLAN.
- protocol (string): The protocol of the incoming packet. Can be one of "ANY", "TCP" or "UDP". Default value is "ANY"
- srcPort (integer): The source port of the incoming packet. Applicable only if protocol is TCP or UDP.
- srcPortRange (string): The source port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dstPort (integer): The destination port of the incoming packet. Applicable only if protocol is TCP or UDP.
- dstPortRange (string): The destination port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dscp (integer): DSCP tag. Set this to -1 to trust incoming DSCP. Default value is 0
"""
kwargs.update(locals())
if 'protocol' in kwargs:
options = ['ANY', 'TCP', 'UDP']
assert kwargs['protocol'] in options, f'''"protocol" cannot be "{kwargs['protocol']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'createNetworkSwitchQosRule'
}
resource = f'/networks/{networkId}/switch/qosRules'
body_params = ['vlan', 'protocol', 'srcPort', 'srcPortRange', 'dstPort', 'dstPortRange', 'dscp', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchQosRulesOrder(self, networkId: str):
"""
**Return the quality of service rule IDs by order in which they will be processed by the switch**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-qos-rules-order
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules', 'order'],
'operation': 'getNetworkSwitchQosRulesOrder'
}
resource = f'/networks/{networkId}/switch/qosRules/order'
return self._session.get(metadata, resource)
def updateNetworkSwitchQosRulesOrder(self, networkId: str, ruleIds: list):
"""
**Update the order in which the rules should be processed by the switch**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-qos-rules-order
- networkId (string): (required)
- ruleIds (array): A list of quality of service rule IDs arranged in order in which they should be processed by the switch.
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'qosRules', 'order'],
'operation': 'updateNetworkSwitchQosRulesOrder'
}
resource = f'/networks/{networkId}/switch/qosRules/order'
body_params = ['ruleIds', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchQosRule(self, networkId: str, qosRuleId: str):
"""
**Return a quality of service rule**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-qos-rule
- networkId (string): (required)
- qosRuleId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'getNetworkSwitchQosRule'
}
resource = f'/networks/{networkId}/switch/qosRules/{qosRuleId}'
return self._session.get(metadata, resource)
def deleteNetworkSwitchQosRule(self, networkId: str, qosRuleId: str):
"""
**Delete a quality of service rule**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-qos-rule
- networkId (string): (required)
- qosRuleId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'deleteNetworkSwitchQosRule'
}
resource = f'/networks/{networkId}/switch/qosRules/{qosRuleId}'
return self._session.delete(metadata, resource)
def updateNetworkSwitchQosRule(self, networkId: str, qosRuleId: str, **kwargs):
"""
**Update a quality of service rule**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-qos-rule
- networkId (string): (required)
- qosRuleId (string): (required)
- vlan (integer): The VLAN of the incoming packet. A null value will match any VLAN.
- protocol (string): The protocol of the incoming packet. Can be one of "ANY", "TCP" or "UDP". Default value is "ANY".
- srcPort (integer): The source port of the incoming packet. Applicable only if protocol is TCP or UDP.
- srcPortRange (string): The source port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dstPort (integer): The destination port of the incoming packet. Applicable only if protocol is TCP or UDP.
- dstPortRange (string): The destination port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dscp (integer): DSCP tag that should be assigned to incoming packet. Set this to -1 to trust incoming DSCP. Default value is 0.
"""
kwargs.update(locals())
if 'protocol' in kwargs:
options = ['ANY', 'TCP', 'UDP']
assert kwargs['protocol'] in options, f'''"protocol" cannot be "{kwargs['protocol']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'updateNetworkSwitchQosRule'
}
resource = f'/networks/{networkId}/switch/qosRules/{qosRuleId}'
body_params = ['vlan', 'protocol', 'srcPort', 'srcPortRange', 'dstPort', 'dstPortRange', 'dscp', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchRoutingMulticast(self, networkId: str):
"""
**Return multicast settings for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-routing-multicast
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast'],
'operation': 'getNetworkSwitchRoutingMulticast'
}
resource = f'/networks/{networkId}/switch/routing/multicast'
return self._session.get(metadata, resource)
def updateNetworkSwitchRoutingMulticast(self, networkId: str, **kwargs):
"""
**Update multicast settings for a network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-routing-multicast
- networkId (string): (required)
- defaultSettings (object): Default multicast setting for entire network. IGMP snooping and Flood unknown multicast traffic settings are enabled by default.
- overrides (array): Array of paired switches/stacks/profiles and corresponding multicast settings. An empty array will clear the multicast settings.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast'],
'operation': 'updateNetworkSwitchRoutingMulticast'
}
resource = f'/networks/{networkId}/switch/routing/multicast'
body_params = ['defaultSettings', 'overrides', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchRoutingMulticastRendezvousPoints(self, networkId: str):
"""
**List multicast rendezvous points**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-routing-multicast-rendezvous-points
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'getNetworkSwitchRoutingMulticastRendezvousPoints'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints'
return self._session.get(metadata, resource)
def createNetworkSwitchRoutingMulticastRendezvousPoint(self, networkId: str, interfaceIp: str, multicastGroup: str):
"""
**Create a multicast rendezvous point**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-routing-multicast-rendezvous-point
- networkId (string): (required)
- interfaceIp (string): The IP address of the interface where the RP needs to be created.
- multicastGroup (string): 'Any', or the IP address of a multicast group
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'createNetworkSwitchRoutingMulticastRendezvousPoint'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints'
body_params = ['interfaceIp', 'multicastGroup', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchRoutingMulticastRendezvousPoint(self, networkId: str, rendezvousPointId: str):
"""
**Return a multicast rendezvous point**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-routing-multicast-rendezvous-point
- networkId (string): (required)
- rendezvousPointId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'getNetworkSwitchRoutingMulticastRendezvousPoint'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints/{rendezvousPointId}'
return self._session.get(metadata, resource)
def deleteNetworkSwitchRoutingMulticastRendezvousPoint(self, networkId: str, rendezvousPointId: str):
"""
**Delete a multicast rendezvous point**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-routing-multicast-rendezvous-point
- networkId (string): (required)
- rendezvousPointId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'deleteNetworkSwitchRoutingMulticastRendezvousPoint'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints/{rendezvousPointId}'
return self._session.delete(metadata, resource)
def updateNetworkSwitchRoutingMulticastRendezvousPoint(self, networkId: str, rendezvousPointId: str, interfaceIp: str, multicastGroup: str):
"""
**Update a multicast rendezvous point**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-routing-multicast-rendezvous-point
- networkId (string): (required)
- rendezvousPointId (string): (required)
- interfaceIp (string): The IP address of the interface to use
- multicastGroup (string): 'Any', or the IP address of a multicast group
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'updateNetworkSwitchRoutingMulticastRendezvousPoint'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints/{rendezvousPointId}'
body_params = ['interfaceIp', 'multicastGroup', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchRoutingOspf(self, networkId: str):
"""
**Return layer 3 OSPF routing configuration**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-routing-ospf
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'ospf'],
'operation': 'getNetworkSwitchRoutingOspf'
}
resource = f'/networks/{networkId}/switch/routing/ospf'
return self._session.get(metadata, resource)
def updateNetworkSwitchRoutingOspf(self, networkId: str, **kwargs):
"""
**Update layer 3 OSPF routing configuration**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-routing-ospf
- networkId (string): (required)
- enabled (boolean): Boolean value to enable or disable OSPF routing. OSPF routing is disabled by default.
- helloTimerInSeconds (integer): Time interval in seconds at which hello packet will be sent to OSPF neighbors to maintain connectivity. Value must be between 1 and 255. Default is 10 seconds.
- deadTimerInSeconds (integer): Time interval to determine when the peer will be declared inactive/dead. Value must be between 1 and 65535
- areas (array): OSPF areas
- v3 (object): OSPF v3 configuration
- md5AuthenticationEnabled (boolean): Boolean value to enable or disable MD5 authentication. MD5 authentication is disabled by default.
- md5AuthenticationKey (object): MD5 authentication credentials. This param is only relevant if md5AuthenticationEnabled is true
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'routing', 'ospf'],
'operation': 'updateNetworkSwitchRoutingOspf'
}
resource = f'/networks/{networkId}/switch/routing/ospf'
body_params = ['enabled', 'helloTimerInSeconds', 'deadTimerInSeconds', 'areas', 'v3', 'md5AuthenticationEnabled', 'md5AuthenticationKey', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchSettings(self, networkId: str):
"""
**Returns the switch network settings**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-settings
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'settings'],
'operation': 'getNetworkSwitchSettings'
}
resource = f'/networks/{networkId}/switch/settings'
return self._session.get(metadata, resource)
def updateNetworkSwitchSettings(self, networkId: str, **kwargs):
"""
**Update switch network settings**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-settings
- networkId (string): (required)
- vlan (integer): Management VLAN
- useCombinedPower (boolean): The use Combined Power as the default behavior of secondary power supplies on supported devices.
- powerExceptions (array): Exceptions on a per switch basis to "useCombinedPower"
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'settings'],
'operation': 'updateNetworkSwitchSettings'
}
resource = f'/networks/{networkId}/switch/settings'
body_params = ['vlan', 'useCombinedPower', 'powerExceptions', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchStacks(self, networkId: str):
"""
**List the switch stacks in a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stacks
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'getNetworkSwitchStacks'
}
resource = f'/networks/{networkId}/switch/stacks'
return self._session.get(metadata, resource)
def createNetworkSwitchStack(self, networkId: str, name: str, serials: list):
"""
**Create a stack**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-stack
- networkId (string): (required)
- name (string): The name of the new stack
- serials (array): An array of switch serials to be added into the new stack
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'createNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks'
body_params = ['name', 'serials', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchStack(self, networkId: str, switchStackId: str):
"""
**Show a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack
- networkId (string): (required)
- switchStackId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'getNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}'
return self._session.get(metadata, resource)
def deleteNetworkSwitchStack(self, networkId: str, switchStackId: str):
"""
**Delete a stack**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-stack
- networkId (string): (required)
- switchStackId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'deleteNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}'
return self._session.delete(metadata, resource)
def addNetworkSwitchStack(self, networkId: str, switchStackId: str, serial: str):
"""
**Add a switch to a stack**
https://developer.cisco.com/meraki/api-v1/#!add-network-switch-stack
- networkId (string): (required)
- switchStackId (string): (required)
- serial (string): The serial of the switch to be added
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'addNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/add'
body_params = ['serial', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def removeNetworkSwitchStack(self, networkId: str, switchStackId: str, serial: str):
"""
**Remove a switch from a stack**
https://developer.cisco.com/meraki/api-v1/#!remove-network-switch-stack
- networkId (string): (required)
- switchStackId (string): (required)
- serial (string): The serial of the switch to be removed
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'removeNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/remove'
body_params = ['serial', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchStackRoutingInterfaces(self, networkId: str, switchStackId: str):
"""
**List layer 3 interfaces for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-interfaces
- networkId (string): (required)
- switchStackId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'getNetworkSwitchStackRoutingInterfaces'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces'
return self._session.get(metadata, resource)
def createNetworkSwitchStackRoutingInterface(self, networkId: str, switchStackId: str, name: str, vlanId: int, **kwargs):
"""
**Create a layer 3 interface for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-stack-routing-interface
- networkId (string): (required)
- switchStackId (string): (required)
- name (string): A friendly name or description for the interface or VLAN.
- vlanId (integer): The VLAN this routed interface is on. VLAN must be between 1 and 4094.
- subnet (string): The network that this routed interface is on, in CIDR notation (ex. 10.1.1.0/24).
- interfaceIp (string): The IP address this switch stack will use for layer 3 routing on this VLAN or subnet. This cannot be the same as the switch's management IP.
- multicastRouting (string): Enable multicast support if, multicast routing between VLANs is required. Options are, 'disabled', 'enabled' or 'IGMP snooping querier'. Default is 'disabled'.
- defaultGateway (string): The next hop for any traffic that isn't going to a directly connected subnet or over a static route. This IP address must exist in a subnet with a routed interface.
- ospfSettings (object): The OSPF routing settings of the interface.
- ipv6 (object): The IPv6 settings of the interface.
"""
kwargs.update(locals())
if 'multicastRouting' in kwargs:
options = ['disabled', 'enabled', 'IGMP snooping querier']
assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'createNetworkSwitchStackRoutingInterface'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces'
body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'defaultGateway', 'ospfSettings', 'ipv6', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchStackRoutingInterface(self, networkId: str, switchStackId: str, interfaceId: str):
"""
**Return a layer 3 interface from a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-interface
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'getNetworkSwitchStackRoutingInterface'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}'
return self._session.get(metadata, resource)
def updateNetworkSwitchStackRoutingInterface(self, networkId: str, switchStackId: str, interfaceId: str, **kwargs):
"""
**Update a layer 3 interface for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-stack-routing-interface
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
- name (string): A friendly name or description for the interface or VLAN.
- subnet (string): The network that this routed interface is on, in CIDR notation (ex. 10.1.1.0/24).
- interfaceIp (string): The IP address this switch stack will use for layer 3 routing on this VLAN or subnet. This cannot be the same as the switch's management IP.
- multicastRouting (string): Enable multicast support if, multicast routing between VLANs is required. Options are, 'disabled', 'enabled' or 'IGMP snooping querier'.
- vlanId (integer): The VLAN this routed interface is on. VLAN must be between 1 and 4094.
- defaultGateway (string): The next hop for any traffic that isn't going to a directly connected subnet or over a static route. This IP address must exist in a subnet with a routed interface.
- ospfSettings (object): The OSPF routing settings of the interface.
- ipv6 (object): The IPv6 settings of the interface.
"""
kwargs.update(locals())
if 'multicastRouting' in kwargs:
options = ['disabled', 'enabled', 'IGMP snooping querier']
assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'updateNetworkSwitchStackRoutingInterface'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}'
body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'defaultGateway', 'ospfSettings', 'ipv6', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkSwitchStackRoutingInterface(self, networkId: str, switchStackId: str, interfaceId: str):
"""
**Delete a layer 3 interface from a switch stack**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-stack-routing-interface
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'deleteNetworkSwitchStackRoutingInterface'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}'
return self._session.delete(metadata, resource)
def getNetworkSwitchStackRoutingInterfaceDhcp(self, networkId: str, switchStackId: str, interfaceId: str):
"""
**Return a layer 3 interface DHCP configuration for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-interface-dhcp
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces', 'dhcp'],
'operation': 'getNetworkSwitchStackRoutingInterfaceDhcp'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}/dhcp'
return self._session.get(metadata, resource)
def updateNetworkSwitchStackRoutingInterfaceDhcp(self, networkId: str, switchStackId: str, interfaceId: str, **kwargs):
"""
**Update a layer 3 interface DHCP configuration for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-stack-routing-interface-dhcp
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
- dhcpMode (string): The DHCP mode options for the switch stack interface ('dhcpDisabled', 'dhcpRelay' or 'dhcpServer')
- dhcpRelayServerIps (array): The DHCP relay server IPs to which DHCP packets would get relayed for the switch stack interface
- dhcpLeaseTime (string): The DHCP lease time config for the dhcp server running on switch stack interface ('30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week')
- dnsNameserversOption (string): The DHCP name server option for the dhcp server running on the switch stack interface ('googlePublicDns', 'openDns' or 'custom')
- dnsCustomNameservers (array): The DHCP name server IPs when DHCP name server option is 'custom'
- bootOptionsEnabled (boolean): Enable DHCP boot options to provide PXE boot options configs for the dhcp server running on the switch stack interface
- bootNextServer (string): The PXE boot server IP for the DHCP server running on the switch stack interface
- bootFileName (string): The PXE boot server file name for the DHCP server running on the switch stack interface
- dhcpOptions (array): Array of DHCP options consisting of code, type and value for the DHCP server running on the switch stack interface
- reservedIpRanges (array): Array of DHCP reserved IP assignments for the DHCP server running on the switch stack interface
- fixedIpAssignments (array): Array of DHCP fixed IP assignments for the DHCP server running on the switch stack interface
"""
kwargs.update(locals())
if 'dhcpMode' in kwargs:
options = ['dhcpDisabled', 'dhcpRelay', 'dhcpServer']
assert kwargs['dhcpMode'] in options, f'''"dhcpMode" cannot be "{kwargs['dhcpMode']}", & must be set to one of: {options}'''
if 'dhcpLeaseTime' in kwargs:
options = ['30 minutes', '1 hour', '4 hours', '12 hours', '1 day', '1 week']
assert kwargs['dhcpLeaseTime'] in options, f'''"dhcpLeaseTime" cannot be "{kwargs['dhcpLeaseTime']}", & must be set to one of: {options}'''
if 'dnsNameserversOption' in kwargs:
options = ['googlePublicDns', 'openDns', 'custom']
assert kwargs['dnsNameserversOption'] in options, f'''"dnsNameserversOption" cannot be "{kwargs['dnsNameserversOption']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces', 'dhcp'],
'operation': 'updateNetworkSwitchStackRoutingInterfaceDhcp'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}/dhcp'
body_params = ['dhcpMode', 'dhcpRelayServerIps', 'dhcpLeaseTime', 'dnsNameserversOption', 'dnsCustomNameservers', 'bootOptionsEnabled', 'bootNextServer', 'bootFileName', 'dhcpOptions', 'reservedIpRanges', 'fixedIpAssignments', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchStackRoutingStaticRoutes(self, networkId: str, switchStackId: str):
"""
**List layer 3 static routes for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-static-routes
- networkId (string): (required)
- switchStackId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'getNetworkSwitchStackRoutingStaticRoutes'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes'
return self._session.get(metadata, resource)
def createNetworkSwitchStackRoutingStaticRoute(self, networkId: str, switchStackId: str, subnet: str, nextHopIp: str, **kwargs):
"""
**Create a layer 3 static route for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-stack-routing-static-route
- networkId (string): (required)
- switchStackId (string): (required)
- subnet (string): The subnet which is routed via this static route and should be specified in CIDR notation (ex. 1.2.3.0/24)
- nextHopIp (string): IP address of the next hop device to which the device sends its traffic for the subnet
- name (string): Name or description for layer 3 static route
- advertiseViaOspfEnabled (boolean): Option to advertise static route via OSPF
- preferOverOspfRoutesEnabled (boolean): Option to prefer static route over OSPF routes
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'createNetworkSwitchStackRoutingStaticRoute'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes'
body_params = ['name', 'subnet', 'nextHopIp', 'advertiseViaOspfEnabled', 'preferOverOspfRoutesEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchStackRoutingStaticRoute(self, networkId: str, switchStackId: str, staticRouteId: str):
"""
**Return a layer 3 static route for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-static-route
- networkId (string): (required)
- switchStackId (string): (required)
- staticRouteId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'getNetworkSwitchStackRoutingStaticRoute'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes/{staticRouteId}'
return self._session.get(metadata, resource)
def updateNetworkSwitchStackRoutingStaticRoute(self, networkId: str, switchStackId: str, staticRouteId: str, **kwargs):
"""
**Update a layer 3 static route for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-stack-routing-static-route
- networkId (string): (required)
- switchStackId (string): (required)
- staticRouteId (string): (required)
- name (string): Name or description for layer 3 static route
- subnet (string): The subnet which is routed via this static route and should be specified in CIDR notation (ex. 1.2.3.0/24)
- nextHopIp (string): IP address of the next hop device to which the device sends its traffic for the subnet
- advertiseViaOspfEnabled (boolean): Option to advertise static route via OSPF
- preferOverOspfRoutesEnabled (boolean): Option to prefer static route over OSPF routes
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'updateNetworkSwitchStackRoutingStaticRoute'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes/{staticRouteId}'
body_params = ['name', 'subnet', 'nextHopIp', 'advertiseViaOspfEnabled', 'preferOverOspfRoutesEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkSwitchStackRoutingStaticRoute(self, networkId: str, switchStackId: str, staticRouteId: str):
"""
**Delete a layer 3 static route for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-stack-routing-static-route
- networkId (string): (required)
- switchStackId (string): (required)
- staticRouteId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'deleteNetworkSwitchStackRoutingStaticRoute'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes/{staticRouteId}'
return self._session.delete(metadata, resource)
def getNetworkSwitchStormControl(self, networkId: str):
"""
**Return the storm control configuration for a switch network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-storm-control
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stormControl'],
'operation': 'getNetworkSwitchStormControl'
}
resource = f'/networks/{networkId}/switch/stormControl'
return self._session.get(metadata, resource)
def updateNetworkSwitchStormControl(self, networkId: str, **kwargs):
"""
**Update the storm control configuration for a switch network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-storm-control
- networkId (string): (required)
- broadcastThreshold (integer): Percentage (1 to 99) of total available port bandwidth for broadcast traffic type. Default value 100 percent rate is to clear the configuration.
- multicastThreshold (integer): Percentage (1 to 99) of total available port bandwidth for multicast traffic type. Default value 100 percent rate is to clear the configuration.
- unknownUnicastThreshold (integer): Percentage (1 to 99) of total available port bandwidth for unknown unicast (dlf-destination lookup failure) traffic type. Default value 100 percent rate is to clear the configuration.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'stormControl'],
'operation': 'updateNetworkSwitchStormControl'
}
resource = f'/networks/{networkId}/switch/stormControl'
body_params = ['broadcastThreshold', 'multicastThreshold', 'unknownUnicastThreshold', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchStp(self, networkId: str):
"""
**Returns STP settings**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stp
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stp'],
'operation': 'getNetworkSwitchStp'
}
resource = f'/networks/{networkId}/switch/stp'
return self._session.get(metadata, resource)
def updateNetworkSwitchStp(self, networkId: str, **kwargs):
"""
**Updates STP settings**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-stp
- networkId (string): (required)
- rstpEnabled (boolean): The spanning tree protocol status in network
- stpBridgePriority (array): STP bridge priority for switches/stacks or switch profiles. An empty array will clear the STP bridge priority settings.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'stp'],
'operation': 'updateNetworkSwitchStp'
}
resource = f'/networks/{networkId}/switch/stp'
body_params = ['rstpEnabled', 'stpBridgePriority', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationConfigTemplateSwitchProfiles(self, organizationId: str, configTemplateId: str):
"""
**List the switch profiles for your switch template configuration**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-template-switch-profiles
- organizationId (string): (required)
- configTemplateId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'configTemplates', 'profiles'],
'operation': 'getOrganizationConfigTemplateSwitchProfiles'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}/switch/profiles'
return self._session.get(metadata, resource)
def getOrganizationConfigTemplateSwitchProfilePorts(self, organizationId: str, configTemplateId: str, profileId: str):
"""
**Return all the ports of a switch profile**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-template-switch-profile-ports
- organizationId (string): (required)
- configTemplateId (string): (required)
- profileId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'configTemplates', 'profiles', 'ports'],
'operation': 'getOrganizationConfigTemplateSwitchProfilePorts'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}/switch/profiles/{profileId}/ports'
return self._session.get(metadata, resource)
def getOrganizationConfigTemplateSwitchProfilePort(self, organizationId: str, configTemplateId: str, profileId: str, portId: str):
"""
**Return a switch profile port**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-template-switch-profile-port
- organizationId (string): (required)
- configTemplateId (string): (required)
- profileId (string): (required)
- portId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'configTemplates', 'profiles', 'ports'],
'operation': 'getOrganizationConfigTemplateSwitchProfilePort'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}/switch/profiles/{profileId}/ports/{portId}'
return self._session.get(metadata, resource)
def updateOrganizationConfigTemplateSwitchProfilePort(self, organizationId: str, configTemplateId: str, profileId: str, portId: str, **kwargs):
"""
**Update a switch profile port**
https://developer.cisco.com/meraki/api-v1/#!update-organization-config-template-switch-profile-port
- organizationId (string): (required)
- configTemplateId (string): (required)
- profileId (string): (required)
- portId (string): (required)
- name (string): The name of the switch profile port
- tags (array): The list of tags of the switch profile port
- enabled (boolean): The status of the switch profile port
- type (string): The type of the switch profile port ('trunk' or 'access')
- vlan (integer): The VLAN of the switch profile port. A null value will clear the value set for trunk ports.
- voiceVlan (integer): The voice VLAN of the switch profile port. Only applicable to access ports
- allowedVlans (string): The VLANs allowed on the switch profile port. Only applicable to trunk ports
- poeEnabled (boolean): The PoE status of the switch profile port
- isolationEnabled (boolean): The isolation status of the switch profile port
- rstpEnabled (boolean): The rapid spanning tree protocol status
- stpGuard (string): The state of the STP guard ('disabled', 'root guard', 'bpdu guard' or 'loop guard')
- linkNegotiation (string): The link speed for the switch profile port
- portScheduleId (string): The ID of the port schedule. A value of null will clear the port schedule.
- udld (string): The action to take when Unidirectional Link is detected (Alert only, Enforce). Default configuration is Alert only.
- accessPolicyType (string): The type of the access policy of the switch profile port. Only applicable to access ports. Can be one of 'Open', 'Custom access policy', 'MAC allow list' or 'Sticky MAC allow list'
- accessPolicyNumber (integer): The number of a custom access policy to configure on the switch profile port. Only applicable when 'accessPolicyType' is 'Custom access policy'
- macAllowList (array): Only devices with MAC addresses specified in this list will have access to this port. Up to 20 MAC addresses can be defined. Only applicable when 'accessPolicyType' is 'MAC allow list'
- stickyMacAllowList (array): The initial list of MAC addresses for sticky Mac allow list. Only applicable when 'accessPolicyType' is 'Sticky MAC allow list'
- stickyMacAllowListLimit (integer): The maximum number of MAC addresses for sticky MAC allow list. Only applicable when 'accessPolicyType' is 'Sticky MAC allow list'
- stormControlEnabled (boolean): The storm control status of the switch profile port
- flexibleStackingEnabled (boolean): For supported switches (e.g. MS420/MS425), whether or not the port has flexible stacking enabled.
"""
kwargs.update(locals())
if 'type' in kwargs:
options = ['trunk', 'access']
assert kwargs['type'] in options, f'''"type" cannot be "{kwargs['type']}", & must be set to one of: {options}'''
if 'stpGuard' in kwargs:
options = ['disabled', 'root guard', 'bpdu guard', 'loop guard']
assert kwargs['stpGuard'] in options, f'''"stpGuard" cannot be "{kwargs['stpGuard']}", & must be set to one of: {options}'''
if 'udld' in kwargs:
options = ['Alert only', 'Enforce']
assert kwargs['udld'] in options, f'''"udld" cannot be "{kwargs['udld']}", & must be set to one of: {options}'''
if 'accessPolicyType' in kwargs:
options = ['Open', 'Custom access policy', 'MAC allow list', 'Sticky MAC allow list']
assert kwargs['accessPolicyType'] in options, f'''"accessPolicyType" cannot be "{kwargs['accessPolicyType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'configTemplates', 'profiles', 'ports'],
'operation': 'updateOrganizationConfigTemplateSwitchProfilePort'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}/switch/profiles/{profileId}/ports/{portId}'
body_params = ['name', 'tags', 'enabled', 'type', 'vlan', 'voiceVlan', 'allowedVlans', 'poeEnabled', 'isolationEnabled', 'rstpEnabled', 'stpGuard', 'linkNegotiation', 'portScheduleId', 'udld', 'accessPolicyType', 'accessPolicyNumber', 'macAllowList', 'stickyMacAllowList', 'stickyMacAllowListLimit', 'stormControlEnabled', 'flexibleStackingEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def cloneOrganizationSwitchDevices(self, organizationId: str, sourceSerial: str, targetSerials: list):
"""
**Clone port-level and some switch-level configuration settings from a source switch to one or more target switches**
https://developer.cisco.com/meraki/api-v1/#!clone-organization-switch-devices
- organizationId (string): (required)
- sourceSerial (string): Serial number of the source switch (must be on a network not bound to a template)
- targetSerials (array): Array of serial numbers of one or more target switches (must be on a network not bound to a template)
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'devices'],
'operation': 'cloneOrganizationSwitchDevices'
}
resource = f'/organizations/{organizationId}/switch/devices/clone'
body_params = ['sourceSerial', 'targetSerials', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
| class Switch(object):
def __init__(self, session):
super(Switch, self).__init__()
self._session = session
def getDeviceSwitchPorts(self, serial: str):
"""
**List the switch ports for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-ports
- serial (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'ports'],
'operation': 'getDeviceSwitchPorts'
}
resource = f'/devices/{serial}/switch/ports'
return self._session.get(metadata, resource)
def cycleDeviceSwitchPorts(self, serial: str, ports: list):
"""
**Cycle a set of switch ports**
https://developer.cisco.com/meraki/api-v1/#!cycle-device-switch-ports
- serial (string): (required)
- ports (array): List of switch ports. Example: [1, 2-5, 1_MA-MOD-8X10G_1, 1_MA-MOD-8X10G_2-1_MA-MOD-8X10G_8]
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'liveTools', 'ports'],
'operation': 'cycleDeviceSwitchPorts'
}
resource = f'/devices/{serial}/switch/ports/cycle'
body_params = ['ports', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getDeviceSwitchPortsStatuses(self, serial: str, **kwargs):
"""
**Return the status for all the ports of a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-ports-statuses
- serial (string): (required)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 31 days from today.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameter t0. The value must be in seconds and be less than or equal to 31 days. The default is 1 day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'monitor', 'ports', 'statuses'],
'operation': 'getDeviceSwitchPortsStatuses'
}
resource = f'/devices/{serial}/switch/ports/statuses'
query_params = ['t0', 'timespan', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getDeviceSwitchPortsStatusesPackets(self, serial: str, **kwargs):
"""
**Return the packet counters for all the ports of a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-ports-statuses-packets
- serial (string): (required)
- t0 (string): The beginning of the timespan for the data. The maximum lookback period is 1 day from today.
- timespan (number): The timespan for which the information will be fetched. If specifying timespan, do not specify parameter t0. The value must be in seconds and be less than or equal to 1 day. The default is 1 day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'monitor', 'ports', 'statuses', 'packets'],
'operation': 'getDeviceSwitchPortsStatusesPackets'
}
resource = f'/devices/{serial}/switch/ports/statuses/packets'
query_params = ['t0', 'timespan', ]
params = {k.strip(): v for k, v in kwargs.items() if k.strip() in query_params}
return self._session.get(metadata, resource, params)
def getDeviceSwitchPort(self, serial: str, portId: str):
"""
**Return a switch port**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-port
- serial (string): (required)
- portId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'ports'],
'operation': 'getDeviceSwitchPort'
}
resource = f'/devices/{serial}/switch/ports/{portId}'
return self._session.get(metadata, resource)
def updateDeviceSwitchPort(self, serial: str, portId: str, **kwargs):
"""
**Update a switch port**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-port
- serial (string): (required)
- portId (string): (required)
- name (string): The name of the switch port
- tags (array): The list of tags of the switch port
- enabled (boolean): The status of the switch port
- type (string): The type of the switch port ('trunk' or 'access')
- vlan (integer): The VLAN of the switch port. A null value will clear the value set for trunk ports.
- voiceVlan (integer): The voice VLAN of the switch port. Only applicable to access ports.
- allowedVlans (string): The VLANs allowed on the switch port. Only applicable to trunk ports.
- poeEnabled (boolean): The PoE status of the switch port
- isolationEnabled (boolean): The isolation status of the switch port
- rstpEnabled (boolean): The rapid spanning tree protocol status
- stpGuard (string): The state of the STP guard ('disabled', 'root guard', 'bpdu guard' or 'loop guard')
- linkNegotiation (string): The link speed for the switch port
- portScheduleId (string): The ID of the port schedule. A value of null will clear the port schedule.
- udld (string): The action to take when Unidirectional Link is detected (Alert only, Enforce). Default configuration is Alert only.
- accessPolicyType (string): The type of the access policy of the switch port. Only applicable to access ports. Can be one of 'Open', 'Custom access policy', 'MAC allow list' or 'Sticky MAC allow list'
- accessPolicyNumber (integer): The number of a custom access policy to configure on the switch port. Only applicable when 'accessPolicyType' is 'Custom access policy'
- macAllowList (array): Only devices with MAC addresses specified in this list will have access to this port. Up to 20 MAC addresses can be defined. Only applicable when 'accessPolicyType' is 'MAC allow list'
- stickyMacAllowList (array): The initial list of MAC addresses for sticky Mac allow list. Only applicable when 'accessPolicyType' is 'Sticky MAC allow list'
- stickyMacAllowListLimit (integer): The maximum number of MAC addresses for sticky MAC allow list. Only applicable when 'accessPolicyType' is 'Sticky MAC allow list'
- stormControlEnabled (boolean): The storm control status of the switch port
- adaptivePolicyGroupId (string): The adaptive policy group ID that will be used to tag traffic through this switch port. This ID must pre-exist during the configuration, else needs to be created using adaptivePolicy/groups API. Cannot be applied to a port on a switch bound to profile.
- peerSgtCapable (boolean): If true, Peer SGT is enabled for traffic through this switch port. Applicable to trunk port only, not access port.
Cannot be applied to a port on a switch bound to profile.
- flexibleStackingEnabled (boolean): For supported switches (e.g. MS420/MS425), whether or not the port has flexible stacking enabled.
"""
kwargs.update(locals())
if 'type' in kwargs:
options = ['trunk', 'access']
assert kwargs['type'] in options, f'''"type" cannot be "{kwargs['type']}", & must be set to one of: {options}'''
if 'stpGuard' in kwargs:
options = ['disabled', 'root guard', 'bpdu guard', 'loop guard']
assert kwargs['stpGuard'] in options, f'''"stpGuard" cannot be "{kwargs['stpGuard']}", & must be set to one of: {options}'''
if 'udld' in kwargs:
options = ['Alert only', 'Enforce']
assert kwargs['udld'] in options, f'''"udld" cannot be "{kwargs['udld']}", & must be set to one of: {options}'''
if 'accessPolicyType' in kwargs:
options = ['Open', 'Custom access policy', 'MAC allow list', 'Sticky MAC allow list']
assert kwargs['accessPolicyType'] in options, f'''"accessPolicyType" cannot be "{kwargs['accessPolicyType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'ports'],
'operation': 'updateDeviceSwitchPort'
}
resource = f'/devices/{serial}/switch/ports/{portId}'
body_params = ['name', 'tags', 'enabled', 'type', 'vlan', 'voiceVlan', 'allowedVlans', 'poeEnabled', 'isolationEnabled', 'rstpEnabled', 'stpGuard', 'linkNegotiation', 'portScheduleId', 'udld', 'accessPolicyType', 'accessPolicyNumber', 'macAllowList', 'stickyMacAllowList', 'stickyMacAllowListLimit', 'stormControlEnabled', 'adaptivePolicyGroupId', 'peerSgtCapable', 'flexibleStackingEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getDeviceSwitchRoutingInterfaces(self, serial: str):
"""
**List layer 3 interfaces for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-interfaces
- serial (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'getDeviceSwitchRoutingInterfaces'
}
resource = f'/devices/{serial}/switch/routing/interfaces'
return self._session.get(metadata, resource)
def createDeviceSwitchRoutingInterface(self, serial: str, name: str, vlanId: int, **kwargs):
"""
**Create a layer 3 interface for a switch**
https://developer.cisco.com/meraki/api-v1/#!create-device-switch-routing-interface
- serial (string): (required)
- name (string): A friendly name or description for the interface or VLAN.
- vlanId (integer): The VLAN this routed interface is on. VLAN must be between 1 and 4094.
- subnet (string): The network that this routed interface is on, in CIDR notation (ex. 10.1.1.0/24).
- interfaceIp (string): The IP address this switch will use for layer 3 routing on this VLAN or subnet. This cannot be the same as the switch's management IP.
- multicastRouting (string): Enable multicast support if, multicast routing between VLANs is required. Options are, 'disabled', 'enabled' or 'IGMP snooping querier'. Default is 'disabled'.
- defaultGateway (string): The next hop for any traffic that isn't going to a directly connected subnet or over a static route. This IP address must exist in a subnet with a routed interface.
- ospfSettings (object): The OSPF routing settings of the interface.
- ipv6 (object): The IPv6 settings of the interface.
"""
kwargs.update(locals())
if 'multicastRouting' in kwargs:
options = ['disabled', 'enabled', 'IGMP snooping querier']
assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'createDeviceSwitchRoutingInterface'
}
resource = f'/devices/{serial}/switch/routing/interfaces'
body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'defaultGateway', 'ospfSettings', 'ipv6', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getDeviceSwitchRoutingInterface(self, serial: str, interfaceId: str):
"""
**Return a layer 3 interface for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-interface
- serial (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'getDeviceSwitchRoutingInterface'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}'
return self._session.get(metadata, resource)
def updateDeviceSwitchRoutingInterface(self, serial: str, interfaceId: str, **kwargs):
"""
**Update a layer 3 interface for a switch**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-routing-interface
- serial (string): (required)
- interfaceId (string): (required)
- name (string): A friendly name or description for the interface or VLAN.
- subnet (string): The network that this routed interface is on, in CIDR notation (ex. 10.1.1.0/24).
- interfaceIp (string): The IP address this switch will use for layer 3 routing on this VLAN or subnet. This cannot be the same as the switch's management IP.
- multicastRouting (string): Enable multicast support if, multicast routing between VLANs is required. Options are, 'disabled', 'enabled' or 'IGMP snooping querier'.
- vlanId (integer): The VLAN this routed interface is on. VLAN must be between 1 and 4094.
- defaultGateway (string): The next hop for any traffic that isn't going to a directly connected subnet or over a static route. This IP address must exist in a subnet with a routed interface.
- ospfSettings (object): The OSPF routing settings of the interface.
- ipv6 (object): The IPv6 settings of the interface.
"""
kwargs.update(locals())
if 'multicastRouting' in kwargs:
options = ['disabled', 'enabled', 'IGMP snooping querier']
assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'updateDeviceSwitchRoutingInterface'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}'
body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'defaultGateway', 'ospfSettings', 'ipv6', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteDeviceSwitchRoutingInterface(self, serial: str, interfaceId: str):
"""
**Delete a layer 3 interface from the switch**
https://developer.cisco.com/meraki/api-v1/#!delete-device-switch-routing-interface
- serial (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces'],
'operation': 'deleteDeviceSwitchRoutingInterface'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}'
return self._session.delete(metadata, resource)
def getDeviceSwitchRoutingInterfaceDhcp(self, serial: str, interfaceId: str):
"""
**Return a layer 3 interface DHCP configuration for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-interface-dhcp
- serial (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces', 'dhcp'],
'operation': 'getDeviceSwitchRoutingInterfaceDhcp'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}/dhcp'
return self._session.get(metadata, resource)
def updateDeviceSwitchRoutingInterfaceDhcp(self, serial: str, interfaceId: str, **kwargs):
"""
**Update a layer 3 interface DHCP configuration for a switch**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-routing-interface-dhcp
- serial (string): (required)
- interfaceId (string): (required)
- dhcpMode (string): The DHCP mode options for the switch interface ('dhcpDisabled', 'dhcpRelay' or 'dhcpServer')
- dhcpRelayServerIps (array): The DHCP relay server IPs to which DHCP packets would get relayed for the switch interface
- dhcpLeaseTime (string): The DHCP lease time config for the dhcp server running on switch interface ('30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week')
- dnsNameserversOption (string): The DHCP name server option for the dhcp server running on the switch interface ('googlePublicDns', 'openDns' or 'custom')
- dnsCustomNameservers (array): The DHCP name server IPs when DHCP name server option is 'custom'
- bootOptionsEnabled (boolean): Enable DHCP boot options to provide PXE boot options configs for the dhcp server running on the switch interface
- bootNextServer (string): The PXE boot server IP for the DHCP server running on the switch interface
- bootFileName (string): The PXE boot server filename for the DHCP server running on the switch interface
- dhcpOptions (array): Array of DHCP options consisting of code, type and value for the DHCP server running on the switch interface
- reservedIpRanges (array): Array of DHCP reserved IP assignments for the DHCP server running on the switch interface
- fixedIpAssignments (array): Array of DHCP fixed IP assignments for the DHCP server running on the switch interface
"""
kwargs.update(locals())
if 'dhcpMode' in kwargs:
options = ['dhcpDisabled', 'dhcpRelay', 'dhcpServer']
assert kwargs['dhcpMode'] in options, f'''"dhcpMode" cannot be "{kwargs['dhcpMode']}", & must be set to one of: {options}'''
if 'dhcpLeaseTime' in kwargs:
options = ['30 minutes', '1 hour', '4 hours', '12 hours', '1 day', '1 week']
assert kwargs['dhcpLeaseTime'] in options, f'''"dhcpLeaseTime" cannot be "{kwargs['dhcpLeaseTime']}", & must be set to one of: {options}'''
if 'dnsNameserversOption' in kwargs:
options = ['googlePublicDns', 'openDns', 'custom']
assert kwargs['dnsNameserversOption'] in options, f'''"dnsNameserversOption" cannot be "{kwargs['dnsNameserversOption']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'routing', 'interfaces', 'dhcp'],
'operation': 'updateDeviceSwitchRoutingInterfaceDhcp'
}
resource = f'/devices/{serial}/switch/routing/interfaces/{interfaceId}/dhcp'
body_params = ['dhcpMode', 'dhcpRelayServerIps', 'dhcpLeaseTime', 'dnsNameserversOption', 'dnsCustomNameservers', 'bootOptionsEnabled', 'bootNextServer', 'bootFileName', 'dhcpOptions', 'reservedIpRanges', 'fixedIpAssignments', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getDeviceSwitchRoutingStaticRoutes(self, serial: str):
"""
**List layer 3 static routes for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-static-routes
- serial (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'getDeviceSwitchRoutingStaticRoutes'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes'
return self._session.get(metadata, resource)
def createDeviceSwitchRoutingStaticRoute(self, serial: str, subnet: str, nextHopIp: str, **kwargs):
"""
**Create a layer 3 static route for a switch**
https://developer.cisco.com/meraki/api-v1/#!create-device-switch-routing-static-route
- serial (string): (required)
- subnet (string): The subnet which is routed via this static route and should be specified in CIDR notation (ex. 1.2.3.0/24)
- nextHopIp (string): IP address of the next hop device to which the device sends its traffic for the subnet
- name (string): Name or description for layer 3 static route
- advertiseViaOspfEnabled (boolean): Option to advertise static route via OSPF
- preferOverOspfRoutesEnabled (boolean): Option to prefer static route over OSPF routes
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'createDeviceSwitchRoutingStaticRoute'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes'
body_params = ['name', 'subnet', 'nextHopIp', 'advertiseViaOspfEnabled', 'preferOverOspfRoutesEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getDeviceSwitchRoutingStaticRoute(self, serial: str, staticRouteId: str):
"""
**Return a layer 3 static route for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-routing-static-route
- serial (string): (required)
- staticRouteId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'getDeviceSwitchRoutingStaticRoute'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes/{staticRouteId}'
return self._session.get(metadata, resource)
def updateDeviceSwitchRoutingStaticRoute(self, serial: str, staticRouteId: str, **kwargs):
"""
**Update a layer 3 static route for a switch**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-routing-static-route
- serial (string): (required)
- staticRouteId (string): (required)
- name (string): Name or description for layer 3 static route
- subnet (string): The subnet which is routed via this static route and should be specified in CIDR notation (ex. 1.2.3.0/24)
- nextHopIp (string): IP address of the next hop device to which the device sends its traffic for the subnet
- advertiseViaOspfEnabled (boolean): Option to advertise static route via OSPF
- preferOverOspfRoutesEnabled (boolean): Option to prefer static route over OSPF routes
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'updateDeviceSwitchRoutingStaticRoute'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes/{staticRouteId}'
body_params = ['name', 'subnet', 'nextHopIp', 'advertiseViaOspfEnabled', 'preferOverOspfRoutesEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteDeviceSwitchRoutingStaticRoute(self, serial: str, staticRouteId: str):
"""
**Delete a layer 3 static route for a switch**
https://developer.cisco.com/meraki/api-v1/#!delete-device-switch-routing-static-route
- serial (string): (required)
- staticRouteId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'staticRoutes'],
'operation': 'deleteDeviceSwitchRoutingStaticRoute'
}
resource = f'/devices/{serial}/switch/routing/staticRoutes/{staticRouteId}'
return self._session.delete(metadata, resource)
def getDeviceSwitchWarmSpare(self, serial: str):
"""
**Return warm spare configuration for a switch**
https://developer.cisco.com/meraki/api-v1/#!get-device-switch-warm-spare
- serial (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'warmSpare'],
'operation': 'getDeviceSwitchWarmSpare'
}
resource = f'/devices/{serial}/switch/warmSpare'
return self._session.get(metadata, resource)
def updateDeviceSwitchWarmSpare(self, serial: str, enabled: bool, **kwargs):
"""
**Update warm spare configuration for a switch**
https://developer.cisco.com/meraki/api-v1/#!update-device-switch-warm-spare
- serial (string): (required)
- enabled (boolean): Enable or disable warm spare for a switch
- spareSerial (string): Serial number of the warm spare switch
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'warmSpare'],
'operation': 'updateDeviceSwitchWarmSpare'
}
resource = f'/devices/{serial}/switch/warmSpare'
body_params = ['enabled', 'spareSerial', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchAccessControlLists(self, networkId: str):
"""
**Return the access control lists for a MS network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-access-control-lists
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'accessControlLists'],
'operation': 'getNetworkSwitchAccessControlLists'
}
resource = f'/networks/{networkId}/switch/accessControlLists'
return self._session.get(metadata, resource)
def updateNetworkSwitchAccessControlLists(self, networkId: str, rules: list):
"""
**Update the access control lists for a MS network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-access-control-lists
- networkId (string): (required)
- rules (array): An ordered array of the access control list rules (not including the default rule). An empty array will clear the rules.
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'accessControlLists'],
'operation': 'updateNetworkSwitchAccessControlLists'
}
resource = f'/networks/{networkId}/switch/accessControlLists'
body_params = ['rules', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchAccessPolicies(self, networkId: str):
"""
**List the access policies for a switch network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-access-policies
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'getNetworkSwitchAccessPolicies'
}
resource = f'/networks/{networkId}/switch/accessPolicies'
return self._session.get(metadata, resource)
def createNetworkSwitchAccessPolicy(self, networkId: str, name: str, radiusServers: list, radiusTestingEnabled: bool, radiusCoaSupportEnabled: bool, radiusAccountingEnabled: bool, hostMode: str, urlRedirectWalledGardenEnabled: bool, **kwargs):
"""
**Create an access policy for a switch network**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-access-policy
- networkId (string): (required)
- name (string): Name of the access policy
- radiusServers (array): List of RADIUS servers to require connecting devices to authenticate against before granting network access
- radiusTestingEnabled (boolean): If enabled, Meraki devices will periodically send access-request messages to these RADIUS servers
- radiusCoaSupportEnabled (boolean): Change of authentication for RADIUS re-authentication and disconnection
- radiusAccountingEnabled (boolean): Enable to send start, interim-update and stop messages to a configured RADIUS accounting server for tracking connected clients
- hostMode (string): Choose the Host Mode for the access policy.
- urlRedirectWalledGardenEnabled (boolean): Enable to restrict access for clients to a specific set of IP addresses or hostnames prior to authentication
- radius (object): Object for RADIUS Settings
- radiusAccountingServers (array): List of RADIUS accounting servers to require connecting devices to authenticate against before granting network access
- radiusGroupAttribute (string): Acceptable values are `""` for None, or `"11"` for Group Policies ACL
- accessPolicyType (string): Access Type of the policy. Automatically 'Hybrid authentication' when hostMode is 'Multi-Domain'.
- increaseAccessSpeed (boolean): Enabling this option will make switches execute 802.1X and MAC-bypass authentication simultaneously so that clients authenticate faster. Only required when accessPolicyType is 'Hybrid Authentication.
- guestVlanId (integer): ID for the guest VLAN allow unauthorized devices access to limited network resources
- voiceVlanClients (boolean): CDP/LLDP capable voice clients will be able to use this VLAN. Automatically true when hostMode is 'Multi-Domain'.
- urlRedirectWalledGardenRanges (array): IP address ranges, in CIDR notation, to restrict access for clients to a specific set of IP addresses or hostnames prior to authentication
"""
kwargs.update(locals())
if 'hostMode' in kwargs:
options = ['Single-Host', 'Multi-Domain', 'Multi-Host', 'Multi-Auth']
assert kwargs['hostMode'] in options, f'''"hostMode" cannot be "{kwargs['hostMode']}", & must be set to one of: {options}'''
if 'accessPolicyType' in kwargs:
options = ['802.1x', 'MAC authentication bypass', 'Hybrid authentication']
assert kwargs['accessPolicyType'] in options, f'''"accessPolicyType" cannot be "{kwargs['accessPolicyType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'createNetworkSwitchAccessPolicy'
}
resource = f'/networks/{networkId}/switch/accessPolicies'
body_params = ['name', 'radiusServers', 'radius', 'radiusTestingEnabled', 'radiusCoaSupportEnabled', 'radiusAccountingEnabled', 'radiusAccountingServers', 'radiusGroupAttribute', 'hostMode', 'accessPolicyType', 'increaseAccessSpeed', 'guestVlanId', 'voiceVlanClients', 'urlRedirectWalledGardenEnabled', 'urlRedirectWalledGardenRanges', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchAccessPolicy(self, networkId: str, accessPolicyNumber: str):
"""
**Return a specific access policy for a switch network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-access-policy
- networkId (string): (required)
- accessPolicyNumber (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'getNetworkSwitchAccessPolicy'
}
resource = f'/networks/{networkId}/switch/accessPolicies/{accessPolicyNumber}'
return self._session.get(metadata, resource)
def updateNetworkSwitchAccessPolicy(self, networkId: str, accessPolicyNumber: str, **kwargs):
"""
**Update an access policy for a switch network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-access-policy
- networkId (string): (required)
- accessPolicyNumber (string): (required)
- name (string): Name of the access policy
- radiusServers (array): List of RADIUS servers to require connecting devices to authenticate against before granting network access
- radius (object): Object for RADIUS Settings
- radiusTestingEnabled (boolean): If enabled, Meraki devices will periodically send access-request messages to these RADIUS servers
- radiusCoaSupportEnabled (boolean): Change of authentication for RADIUS re-authentication and disconnection
- radiusAccountingEnabled (boolean): Enable to send start, interim-update and stop messages to a configured RADIUS accounting server for tracking connected clients
- radiusAccountingServers (array): List of RADIUS accounting servers to require connecting devices to authenticate against before granting network access
- radiusGroupAttribute (string): Can be either `""`, which means `None` on Dashboard, or `"11"`, which means `Filter-Id` on Dashboard and will use Group Policy ACLs when supported (firmware 14+)
- hostMode (string): Choose the Host Mode for the access policy.
- accessPolicyType (string): Access Type of the policy. Automatically 'Hybrid authentication' when hostMode is 'Multi-Domain'.
- increaseAccessSpeed (boolean): Enabling this option will make switches execute 802.1X and MAC-bypass authentication simultaneously so that clients authenticate faster. Only required when accessPolicyType is 'Hybrid Authentication.
- guestVlanId (integer): ID for the guest VLAN allow unauthorized devices access to limited network resources
- voiceVlanClients (boolean): CDP/LLDP capable voice clients will be able to use this VLAN. Automatically true when hostMode is 'Multi-Domain'.
- urlRedirectWalledGardenEnabled (boolean): Enable to restrict access for clients to a specific set of IP addresses or hostnames prior to authentication
- urlRedirectWalledGardenRanges (array): IP address ranges, in CIDR notation, to restrict access for clients to a specific set of IP addresses or hostnames prior to authentication
"""
kwargs.update(locals())
if 'hostMode' in kwargs:
options = ['Single-Host', 'Multi-Domain', 'Multi-Host', 'Multi-Auth']
assert kwargs['hostMode'] in options, f'''"hostMode" cannot be "{kwargs['hostMode']}", & must be set to one of: {options}'''
if 'accessPolicyType' in kwargs:
options = ['802.1x', 'MAC authentication bypass', 'Hybrid authentication']
assert kwargs['accessPolicyType'] in options, f'''"accessPolicyType" cannot be "{kwargs['accessPolicyType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'updateNetworkSwitchAccessPolicy'
}
resource = f'/networks/{networkId}/switch/accessPolicies/{accessPolicyNumber}'
body_params = ['name', 'radiusServers', 'radius', 'radiusTestingEnabled', 'radiusCoaSupportEnabled', 'radiusAccountingEnabled', 'radiusAccountingServers', 'radiusGroupAttribute', 'hostMode', 'accessPolicyType', 'increaseAccessSpeed', 'guestVlanId', 'voiceVlanClients', 'urlRedirectWalledGardenEnabled', 'urlRedirectWalledGardenRanges', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkSwitchAccessPolicy(self, networkId: str, accessPolicyNumber: str):
"""
**Delete an access policy for a switch network**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-access-policy
- networkId (string): (required)
- accessPolicyNumber (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'accessPolicies'],
'operation': 'deleteNetworkSwitchAccessPolicy'
}
resource = f'/networks/{networkId}/switch/accessPolicies/{accessPolicyNumber}'
return self._session.delete(metadata, resource)
def getNetworkSwitchAlternateManagementInterface(self, networkId: str):
"""
**Return the switch alternate management interface for the network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-alternate-management-interface
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'alternateManagementInterface'],
'operation': 'getNetworkSwitchAlternateManagementInterface'
}
resource = f'/networks/{networkId}/switch/alternateManagementInterface'
return self._session.get(metadata, resource)
def updateNetworkSwitchAlternateManagementInterface(self, networkId: str, **kwargs):
"""
**Update the switch alternate management interface for the network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-alternate-management-interface
- networkId (string): (required)
- enabled (boolean): Boolean value to enable or disable AMI configuration. If enabled, VLAN and protocols must be set
- vlanId (integer): Alternate management VLAN, must be between 1 and 4094
- protocols (array): Can be one or more of the following values: 'radius', 'snmp' or 'syslog'
- switches (array): Array of switch serial number and IP assignment. If parameter is present, it cannot have empty body. Note: switches parameter is not applicable for template networks, in other words, do not put 'switches' in the body when updating template networks. Also, an empty 'switches' array will remove all previous assignments
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'alternateManagementInterface'],
'operation': 'updateNetworkSwitchAlternateManagementInterface'
}
resource = f'/networks/{networkId}/switch/alternateManagementInterface'
body_params = ['enabled', 'vlanId', 'protocols', 'switches', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchDhcpServerPolicy(self, networkId: str):
"""
**Return the DHCP server policy**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-dhcp-server-policy
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'dhcpServerPolicy'],
'operation': 'getNetworkSwitchDhcpServerPolicy'
}
resource = f'/networks/{networkId}/switch/dhcpServerPolicy'
return self._session.get(metadata, resource)
def updateNetworkSwitchDhcpServerPolicy(self, networkId: str, **kwargs):
"""
**Update the DHCP server policy**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-dhcp-server-policy
- networkId (string): (required)
- defaultPolicy (string): 'allow' or 'block' new DHCP servers. Default value is 'allow'.
- allowedServers (array): List the MAC addresses of DHCP servers to permit on the network. Applicable only if defaultPolicy is set to block. An empty array will clear the entries.
- blockedServers (array): List the MAC addresses of DHCP servers to block on the network. Applicable only if defaultPolicy is set to allow. An empty array will clear the entries.
"""
kwargs.update(locals())
if 'defaultPolicy' in kwargs:
options = ['allow', 'block']
assert kwargs['defaultPolicy'] in options, f'''"defaultPolicy" cannot be "{kwargs['defaultPolicy']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'dhcpServerPolicy'],
'operation': 'updateNetworkSwitchDhcpServerPolicy'
}
resource = f'/networks/{networkId}/switch/dhcpServerPolicy'
body_params = ['defaultPolicy', 'allowedServers', 'blockedServers', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchDscpToCosMappings(self, networkId: str):
"""
**Return the DSCP to CoS mappings**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-dscp-to-cos-mappings
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'dscpToCosMappings'],
'operation': 'getNetworkSwitchDscpToCosMappings'
}
resource = f'/networks/{networkId}/switch/dscpToCosMappings'
return self._session.get(metadata, resource)
def updateNetworkSwitchDscpToCosMappings(self, networkId: str, mappings: list):
"""
**Update the DSCP to CoS mappings**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-dscp-to-cos-mappings
- networkId (string): (required)
- mappings (array): An array of DSCP to CoS mappings. An empty array will reset the mappings to default.
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'dscpToCosMappings'],
'operation': 'updateNetworkSwitchDscpToCosMappings'
}
resource = f'/networks/{networkId}/switch/dscpToCosMappings'
body_params = ['mappings', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchLinkAggregations(self, networkId: str):
"""
**List link aggregation groups**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-link-aggregations
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'getNetworkSwitchLinkAggregations'
}
resource = f'/networks/{networkId}/switch/linkAggregations'
return self._session.get(metadata, resource)
def createNetworkSwitchLinkAggregation(self, networkId: str, **kwargs):
"""
**Create a link aggregation group**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-link-aggregation
- networkId (string): (required)
- switchPorts (array): Array of switch or stack ports for creating aggregation group. Minimum 2 and maximum 8 ports are supported.
- switchProfilePorts (array): Array of switch profile ports for creating aggregation group. Minimum 2 and maximum 8 ports are supported.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'createNetworkSwitchLinkAggregation'
}
resource = f'/networks/{networkId}/switch/linkAggregations'
body_params = ['switchPorts', 'switchProfilePorts', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def updateNetworkSwitchLinkAggregation(self, networkId: str, linkAggregationId: str, **kwargs):
"""
**Update a link aggregation group**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-link-aggregation
- networkId (string): (required)
- linkAggregationId (string): (required)
- switchPorts (array): Array of switch or stack ports for updating aggregation group. Minimum 2 and maximum 8 ports are supported.
- switchProfilePorts (array): Array of switch profile ports for updating aggregation group. Minimum 2 and maximum 8 ports are supported.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'updateNetworkSwitchLinkAggregation'
}
resource = f'/networks/{networkId}/switch/linkAggregations/{linkAggregationId}'
body_params = ['switchPorts', 'switchProfilePorts', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkSwitchLinkAggregation(self, networkId: str, linkAggregationId: str):
"""
**Split a link aggregation group into separate ports**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-link-aggregation
- networkId (string): (required)
- linkAggregationId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'deleteNetworkSwitchLinkAggregation'
}
resource = f'/networks/{networkId}/switch/linkAggregations/{linkAggregationId}'
return self._session.delete(metadata, resource)
def getNetworkSwitchMtu(self, networkId: str):
"""
**Return the MTU configuration**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-mtu
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'mtu'],
'operation': 'getNetworkSwitchMtu'
}
resource = f'/networks/{networkId}/switch/mtu'
return self._session.get(metadata, resource)
def updateNetworkSwitchMtu(self, networkId: str, **kwargs):
"""
**Update the MTU configuration**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-mtu
- networkId (string): (required)
- defaultMtuSize (integer): MTU size for the entire network. Default value is 9578.
- overrides (array): Override MTU size for individual switches or switch profiles. An empty array will clear overrides.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'mtu'],
'operation': 'updateNetworkSwitchMtu'
}
resource = f'/networks/{networkId}/switch/mtu'
body_params = ['defaultMtuSize', 'overrides', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchPortSchedules(self, networkId: str):
"""
**List switch port schedules**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-port-schedules
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'getNetworkSwitchPortSchedules'
}
resource = f'/networks/{networkId}/switch/portSchedules'
return self._session.get(metadata, resource)
def createNetworkSwitchPortSchedule(self, networkId: str, name: str, **kwargs):
"""
**Add a switch port schedule**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-port-schedule
- networkId (string): (required)
- name (string): The name for your port schedule. Required
- portSchedule (object): The schedule for switch port scheduling. Schedules are applied to days of the week.
When it's empty, default schedule with all days of a week are configured.
Any unspecified day in the schedule is added as a default schedule configuration of the day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'createNetworkSwitchPortSchedule'
}
resource = f'/networks/{networkId}/switch/portSchedules'
body_params = ['name', 'portSchedule', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def deleteNetworkSwitchPortSchedule(self, networkId: str, portScheduleId: str):
"""
**Delete a switch port schedule**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-port-schedule
- networkId (string): (required)
- portScheduleId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'deleteNetworkSwitchPortSchedule'
}
resource = f'/networks/{networkId}/switch/portSchedules/{portScheduleId}'
return self._session.delete(metadata, resource)
def updateNetworkSwitchPortSchedule(self, networkId: str, portScheduleId: str, **kwargs):
"""
**Update a switch port schedule**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-port-schedule
- networkId (string): (required)
- portScheduleId (string): (required)
- name (string): The name for your port schedule.
- portSchedule (object): The schedule for switch port scheduling. Schedules are applied to days of the week.
When it's empty, default schedule with all days of a week are configured.
Any unspecified day in the schedule is added as a default schedule configuration of the day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'updateNetworkSwitchPortSchedule'
}
resource = f'/networks/{networkId}/switch/portSchedules/{portScheduleId}'
body_params = ['name', 'portSchedule', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchQosRules(self, networkId: str):
"""
**List quality of service rules**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-qos-rules
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'getNetworkSwitchQosRules'
}
resource = f'/networks/{networkId}/switch/qosRules'
return self._session.get(metadata, resource)
def createNetworkSwitchQosRule(self, networkId: str, vlan: int, **kwargs):
"""
**Add a quality of service rule**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-qos-rule
- networkId (string): (required)
- vlan (integer): The VLAN of the incoming packet. A null value will match any VLAN.
- protocol (string): The protocol of the incoming packet. Can be one of "ANY", "TCP" or "UDP". Default value is "ANY"
- srcPort (integer): The source port of the incoming packet. Applicable only if protocol is TCP or UDP.
- srcPortRange (string): The source port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dstPort (integer): The destination port of the incoming packet. Applicable only if protocol is TCP or UDP.
- dstPortRange (string): The destination port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dscp (integer): DSCP tag. Set this to -1 to trust incoming DSCP. Default value is 0
"""
kwargs.update(locals())
if 'protocol' in kwargs:
options = ['ANY', 'TCP', 'UDP']
assert kwargs['protocol'] in options, f'''"protocol" cannot be "{kwargs['protocol']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'createNetworkSwitchQosRule'
}
resource = f'/networks/{networkId}/switch/qosRules'
body_params = ['vlan', 'protocol', 'srcPort', 'srcPortRange', 'dstPort', 'dstPortRange', 'dscp', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchQosRulesOrder(self, networkId: str):
"""
**Return the quality of service rule IDs by order in which they will be processed by the switch**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-qos-rules-order
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules', 'order'],
'operation': 'getNetworkSwitchQosRulesOrder'
}
resource = f'/networks/{networkId}/switch/qosRules/order'
return self._session.get(metadata, resource)
def updateNetworkSwitchQosRulesOrder(self, networkId: str, ruleIds: list):
"""
**Update the order in which the rules should be processed by the switch**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-qos-rules-order
- networkId (string): (required)
- ruleIds (array): A list of quality of service rule IDs arranged in order in which they should be processed by the switch.
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'qosRules', 'order'],
'operation': 'updateNetworkSwitchQosRulesOrder'
}
resource = f'/networks/{networkId}/switch/qosRules/order'
body_params = ['ruleIds', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchQosRule(self, networkId: str, qosRuleId: str):
"""
**Return a quality of service rule**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-qos-rule
- networkId (string): (required)
- qosRuleId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'getNetworkSwitchQosRule'
}
resource = f'/networks/{networkId}/switch/qosRules/{qosRuleId}'
return self._session.get(metadata, resource)
def deleteNetworkSwitchQosRule(self, networkId: str, qosRuleId: str):
"""
**Delete a quality of service rule**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-qos-rule
- networkId (string): (required)
- qosRuleId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'deleteNetworkSwitchQosRule'
}
resource = f'/networks/{networkId}/switch/qosRules/{qosRuleId}'
return self._session.delete(metadata, resource)
def updateNetworkSwitchQosRule(self, networkId: str, qosRuleId: str, **kwargs):
"""
**Update a quality of service rule**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-qos-rule
- networkId (string): (required)
- qosRuleId (string): (required)
- vlan (integer): The VLAN of the incoming packet. A null value will match any VLAN.
- protocol (string): The protocol of the incoming packet. Can be one of "ANY", "TCP" or "UDP". Default value is "ANY".
- srcPort (integer): The source port of the incoming packet. Applicable only if protocol is TCP or UDP.
- srcPortRange (string): The source port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dstPort (integer): The destination port of the incoming packet. Applicable only if protocol is TCP or UDP.
- dstPortRange (string): The destination port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dscp (integer): DSCP tag that should be assigned to incoming packet. Set this to -1 to trust incoming DSCP. Default value is 0.
"""
kwargs.update(locals())
if 'protocol' in kwargs:
options = ['ANY', 'TCP', 'UDP']
assert kwargs['protocol'] in options, f'''"protocol" cannot be "{kwargs['protocol']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'updateNetworkSwitchQosRule'
}
resource = f'/networks/{networkId}/switch/qosRules/{qosRuleId}'
body_params = ['vlan', 'protocol', 'srcPort', 'srcPortRange', 'dstPort', 'dstPortRange', 'dscp', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchRoutingMulticast(self, networkId: str):
"""
**Return multicast settings for a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-routing-multicast
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast'],
'operation': 'getNetworkSwitchRoutingMulticast'
}
resource = f'/networks/{networkId}/switch/routing/multicast'
return self._session.get(metadata, resource)
def updateNetworkSwitchRoutingMulticast(self, networkId: str, **kwargs):
"""
**Update multicast settings for a network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-routing-multicast
- networkId (string): (required)
- defaultSettings (object): Default multicast setting for entire network. IGMP snooping and Flood unknown multicast traffic settings are enabled by default.
- overrides (array): Array of paired switches/stacks/profiles and corresponding multicast settings. An empty array will clear the multicast settings.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast'],
'operation': 'updateNetworkSwitchRoutingMulticast'
}
resource = f'/networks/{networkId}/switch/routing/multicast'
body_params = ['defaultSettings', 'overrides', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchRoutingMulticastRendezvousPoints(self, networkId: str):
"""
**List multicast rendezvous points**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-routing-multicast-rendezvous-points
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'getNetworkSwitchRoutingMulticastRendezvousPoints'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints'
return self._session.get(metadata, resource)
def createNetworkSwitchRoutingMulticastRendezvousPoint(self, networkId: str, interfaceIp: str, multicastGroup: str):
"""
**Create a multicast rendezvous point**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-routing-multicast-rendezvous-point
- networkId (string): (required)
- interfaceIp (string): The IP address of the interface where the RP needs to be created.
- multicastGroup (string): 'Any', or the IP address of a multicast group
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'createNetworkSwitchRoutingMulticastRendezvousPoint'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints'
body_params = ['interfaceIp', 'multicastGroup', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchRoutingMulticastRendezvousPoint(self, networkId: str, rendezvousPointId: str):
"""
**Return a multicast rendezvous point**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-routing-multicast-rendezvous-point
- networkId (string): (required)
- rendezvousPointId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'getNetworkSwitchRoutingMulticastRendezvousPoint'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints/{rendezvousPointId}'
return self._session.get(metadata, resource)
def deleteNetworkSwitchRoutingMulticastRendezvousPoint(self, networkId: str, rendezvousPointId: str):
"""
**Delete a multicast rendezvous point**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-routing-multicast-rendezvous-point
- networkId (string): (required)
- rendezvousPointId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'deleteNetworkSwitchRoutingMulticastRendezvousPoint'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints/{rendezvousPointId}'
return self._session.delete(metadata, resource)
def updateNetworkSwitchRoutingMulticastRendezvousPoint(self, networkId: str, rendezvousPointId: str, interfaceIp: str, multicastGroup: str):
"""
**Update a multicast rendezvous point**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-routing-multicast-rendezvous-point
- networkId (string): (required)
- rendezvousPointId (string): (required)
- interfaceIp (string): The IP address of the interface to use
- multicastGroup (string): 'Any', or the IP address of a multicast group
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'],
'operation': 'updateNetworkSwitchRoutingMulticastRendezvousPoint'
}
resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints/{rendezvousPointId}'
body_params = ['interfaceIp', 'multicastGroup', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchRoutingOspf(self, networkId: str):
"""
**Return layer 3 OSPF routing configuration**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-routing-ospf
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'routing', 'ospf'],
'operation': 'getNetworkSwitchRoutingOspf'
}
resource = f'/networks/{networkId}/switch/routing/ospf'
return self._session.get(metadata, resource)
def updateNetworkSwitchRoutingOspf(self, networkId: str, **kwargs):
"""
**Update layer 3 OSPF routing configuration**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-routing-ospf
- networkId (string): (required)
- enabled (boolean): Boolean value to enable or disable OSPF routing. OSPF routing is disabled by default.
- helloTimerInSeconds (integer): Time interval in seconds at which hello packet will be sent to OSPF neighbors to maintain connectivity. Value must be between 1 and 255. Default is 10 seconds.
- deadTimerInSeconds (integer): Time interval to determine when the peer will be declared inactive/dead. Value must be between 1 and 65535
- areas (array): OSPF areas
- v3 (object): OSPF v3 configuration
- md5AuthenticationEnabled (boolean): Boolean value to enable or disable MD5 authentication. MD5 authentication is disabled by default.
- md5AuthenticationKey (object): MD5 authentication credentials. This param is only relevant if md5AuthenticationEnabled is true
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'routing', 'ospf'],
'operation': 'updateNetworkSwitchRoutingOspf'
}
resource = f'/networks/{networkId}/switch/routing/ospf'
body_params = ['enabled', 'helloTimerInSeconds', 'deadTimerInSeconds', 'areas', 'v3', 'md5AuthenticationEnabled', 'md5AuthenticationKey', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchSettings(self, networkId: str):
"""
**Returns the switch network settings**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-settings
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'settings'],
'operation': 'getNetworkSwitchSettings'
}
resource = f'/networks/{networkId}/switch/settings'
return self._session.get(metadata, resource)
def updateNetworkSwitchSettings(self, networkId: str, **kwargs):
"""
**Update switch network settings**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-settings
- networkId (string): (required)
- vlan (integer): Management VLAN
- useCombinedPower (boolean): The use Combined Power as the default behavior of secondary power supplies on supported devices.
- powerExceptions (array): Exceptions on a per switch basis to "useCombinedPower"
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'settings'],
'operation': 'updateNetworkSwitchSettings'
}
resource = f'/networks/{networkId}/switch/settings'
body_params = ['vlan', 'useCombinedPower', 'powerExceptions', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchStacks(self, networkId: str):
"""
**List the switch stacks in a network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stacks
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'getNetworkSwitchStacks'
}
resource = f'/networks/{networkId}/switch/stacks'
return self._session.get(metadata, resource)
def createNetworkSwitchStack(self, networkId: str, name: str, serials: list):
"""
**Create a stack**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-stack
- networkId (string): (required)
- name (string): The name of the new stack
- serials (array): An array of switch serials to be added into the new stack
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'createNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks'
body_params = ['name', 'serials', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchStack(self, networkId: str, switchStackId: str):
"""
**Show a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack
- networkId (string): (required)
- switchStackId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'getNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}'
return self._session.get(metadata, resource)
def deleteNetworkSwitchStack(self, networkId: str, switchStackId: str):
"""
**Delete a stack**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-stack
- networkId (string): (required)
- switchStackId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'deleteNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}'
return self._session.delete(metadata, resource)
def addNetworkSwitchStack(self, networkId: str, switchStackId: str, serial: str):
"""
**Add a switch to a stack**
https://developer.cisco.com/meraki/api-v1/#!add-network-switch-stack
- networkId (string): (required)
- switchStackId (string): (required)
- serial (string): The serial of the switch to be added
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'addNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/add'
body_params = ['serial', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def removeNetworkSwitchStack(self, networkId: str, switchStackId: str, serial: str):
"""
**Remove a switch from a stack**
https://developer.cisco.com/meraki/api-v1/#!remove-network-switch-stack
- networkId (string): (required)
- switchStackId (string): (required)
- serial (string): The serial of the switch to be removed
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'stacks'],
'operation': 'removeNetworkSwitchStack'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/remove'
body_params = ['serial', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchStackRoutingInterfaces(self, networkId: str, switchStackId: str):
"""
**List layer 3 interfaces for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-interfaces
- networkId (string): (required)
- switchStackId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'getNetworkSwitchStackRoutingInterfaces'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces'
return self._session.get(metadata, resource)
def createNetworkSwitchStackRoutingInterface(self, networkId: str, switchStackId: str, name: str, vlanId: int, **kwargs):
"""
**Create a layer 3 interface for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-stack-routing-interface
- networkId (string): (required)
- switchStackId (string): (required)
- name (string): A friendly name or description for the interface or VLAN.
- vlanId (integer): The VLAN this routed interface is on. VLAN must be between 1 and 4094.
- subnet (string): The network that this routed interface is on, in CIDR notation (ex. 10.1.1.0/24).
- interfaceIp (string): The IP address this switch stack will use for layer 3 routing on this VLAN or subnet. This cannot be the same as the switch's management IP.
- multicastRouting (string): Enable multicast support if, multicast routing between VLANs is required. Options are, 'disabled', 'enabled' or 'IGMP snooping querier'. Default is 'disabled'.
- defaultGateway (string): The next hop for any traffic that isn't going to a directly connected subnet or over a static route. This IP address must exist in a subnet with a routed interface.
- ospfSettings (object): The OSPF routing settings of the interface.
- ipv6 (object): The IPv6 settings of the interface.
"""
kwargs.update(locals())
if 'multicastRouting' in kwargs:
options = ['disabled', 'enabled', 'IGMP snooping querier']
assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'createNetworkSwitchStackRoutingInterface'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces'
body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'defaultGateway', 'ospfSettings', 'ipv6', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchStackRoutingInterface(self, networkId: str, switchStackId: str, interfaceId: str):
"""
**Return a layer 3 interface from a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-interface
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'getNetworkSwitchStackRoutingInterface'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}'
return self._session.get(metadata, resource)
def updateNetworkSwitchStackRoutingInterface(self, networkId: str, switchStackId: str, interfaceId: str, **kwargs):
"""
**Update a layer 3 interface for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-stack-routing-interface
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
- name (string): A friendly name or description for the interface or VLAN.
- subnet (string): The network that this routed interface is on, in CIDR notation (ex. 10.1.1.0/24).
- interfaceIp (string): The IP address this switch stack will use for layer 3 routing on this VLAN or subnet. This cannot be the same as the switch's management IP.
- multicastRouting (string): Enable multicast support if, multicast routing between VLANs is required. Options are, 'disabled', 'enabled' or 'IGMP snooping querier'.
- vlanId (integer): The VLAN this routed interface is on. VLAN must be between 1 and 4094.
- defaultGateway (string): The next hop for any traffic that isn't going to a directly connected subnet or over a static route. This IP address must exist in a subnet with a routed interface.
- ospfSettings (object): The OSPF routing settings of the interface.
- ipv6 (object): The IPv6 settings of the interface.
"""
kwargs.update(locals())
if 'multicastRouting' in kwargs:
options = ['disabled', 'enabled', 'IGMP snooping querier']
assert kwargs['multicastRouting'] in options, f'''"multicastRouting" cannot be "{kwargs['multicastRouting']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'updateNetworkSwitchStackRoutingInterface'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}'
body_params = ['name', 'subnet', 'interfaceIp', 'multicastRouting', 'vlanId', 'defaultGateway', 'ospfSettings', 'ipv6', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkSwitchStackRoutingInterface(self, networkId: str, switchStackId: str, interfaceId: str):
"""
**Delete a layer 3 interface from a switch stack**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-stack-routing-interface
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces'],
'operation': 'deleteNetworkSwitchStackRoutingInterface'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}'
return self._session.delete(metadata, resource)
def getNetworkSwitchStackRoutingInterfaceDhcp(self, networkId: str, switchStackId: str, interfaceId: str):
"""
**Return a layer 3 interface DHCP configuration for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-interface-dhcp
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces', 'dhcp'],
'operation': 'getNetworkSwitchStackRoutingInterfaceDhcp'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}/dhcp'
return self._session.get(metadata, resource)
def updateNetworkSwitchStackRoutingInterfaceDhcp(self, networkId: str, switchStackId: str, interfaceId: str, **kwargs):
"""
**Update a layer 3 interface DHCP configuration for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-stack-routing-interface-dhcp
- networkId (string): (required)
- switchStackId (string): (required)
- interfaceId (string): (required)
- dhcpMode (string): The DHCP mode options for the switch stack interface ('dhcpDisabled', 'dhcpRelay' or 'dhcpServer')
- dhcpRelayServerIps (array): The DHCP relay server IPs to which DHCP packets would get relayed for the switch stack interface
- dhcpLeaseTime (string): The DHCP lease time config for the dhcp server running on switch stack interface ('30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week')
- dnsNameserversOption (string): The DHCP name server option for the dhcp server running on the switch stack interface ('googlePublicDns', 'openDns' or 'custom')
- dnsCustomNameservers (array): The DHCP name server IPs when DHCP name server option is 'custom'
- bootOptionsEnabled (boolean): Enable DHCP boot options to provide PXE boot options configs for the dhcp server running on the switch stack interface
- bootNextServer (string): The PXE boot server IP for the DHCP server running on the switch stack interface
- bootFileName (string): The PXE boot server file name for the DHCP server running on the switch stack interface
- dhcpOptions (array): Array of DHCP options consisting of code, type and value for the DHCP server running on the switch stack interface
- reservedIpRanges (array): Array of DHCP reserved IP assignments for the DHCP server running on the switch stack interface
- fixedIpAssignments (array): Array of DHCP fixed IP assignments for the DHCP server running on the switch stack interface
"""
kwargs.update(locals())
if 'dhcpMode' in kwargs:
options = ['dhcpDisabled', 'dhcpRelay', 'dhcpServer']
assert kwargs['dhcpMode'] in options, f'''"dhcpMode" cannot be "{kwargs['dhcpMode']}", & must be set to one of: {options}'''
if 'dhcpLeaseTime' in kwargs:
options = ['30 minutes', '1 hour', '4 hours', '12 hours', '1 day', '1 week']
assert kwargs['dhcpLeaseTime'] in options, f'''"dhcpLeaseTime" cannot be "{kwargs['dhcpLeaseTime']}", & must be set to one of: {options}'''
if 'dnsNameserversOption' in kwargs:
options = ['googlePublicDns', 'openDns', 'custom']
assert kwargs['dnsNameserversOption'] in options, f'''"dnsNameserversOption" cannot be "{kwargs['dnsNameserversOption']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'interfaces', 'dhcp'],
'operation': 'updateNetworkSwitchStackRoutingInterfaceDhcp'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/interfaces/{interfaceId}/dhcp'
body_params = ['dhcpMode', 'dhcpRelayServerIps', 'dhcpLeaseTime', 'dnsNameserversOption', 'dnsCustomNameservers', 'bootOptionsEnabled', 'bootNextServer', 'bootFileName', 'dhcpOptions', 'reservedIpRanges', 'fixedIpAssignments', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchStackRoutingStaticRoutes(self, networkId: str, switchStackId: str):
"""
**List layer 3 static routes for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-static-routes
- networkId (string): (required)
- switchStackId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'getNetworkSwitchStackRoutingStaticRoutes'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes'
return self._session.get(metadata, resource)
def createNetworkSwitchStackRoutingStaticRoute(self, networkId: str, switchStackId: str, subnet: str, nextHopIp: str, **kwargs):
"""
**Create a layer 3 static route for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!create-network-switch-stack-routing-static-route
- networkId (string): (required)
- switchStackId (string): (required)
- subnet (string): The subnet which is routed via this static route and should be specified in CIDR notation (ex. 1.2.3.0/24)
- nextHopIp (string): IP address of the next hop device to which the device sends its traffic for the subnet
- name (string): Name or description for layer 3 static route
- advertiseViaOspfEnabled (boolean): Option to advertise static route via OSPF
- preferOverOspfRoutesEnabled (boolean): Option to prefer static route over OSPF routes
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'createNetworkSwitchStackRoutingStaticRoute'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes'
body_params = ['name', 'subnet', 'nextHopIp', 'advertiseViaOspfEnabled', 'preferOverOspfRoutesEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
def getNetworkSwitchStackRoutingStaticRoute(self, networkId: str, switchStackId: str, staticRouteId: str):
"""
**Return a layer 3 static route for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stack-routing-static-route
- networkId (string): (required)
- switchStackId (string): (required)
- staticRouteId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'getNetworkSwitchStackRoutingStaticRoute'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes/{staticRouteId}'
return self._session.get(metadata, resource)
def updateNetworkSwitchStackRoutingStaticRoute(self, networkId: str, switchStackId: str, staticRouteId: str, **kwargs):
"""
**Update a layer 3 static route for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-stack-routing-static-route
- networkId (string): (required)
- switchStackId (string): (required)
- staticRouteId (string): (required)
- name (string): Name or description for layer 3 static route
- subnet (string): The subnet which is routed via this static route and should be specified in CIDR notation (ex. 1.2.3.0/24)
- nextHopIp (string): IP address of the next hop device to which the device sends its traffic for the subnet
- advertiseViaOspfEnabled (boolean): Option to advertise static route via OSPF
- preferOverOspfRoutesEnabled (boolean): Option to prefer static route over OSPF routes
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'updateNetworkSwitchStackRoutingStaticRoute'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes/{staticRouteId}'
body_params = ['name', 'subnet', 'nextHopIp', 'advertiseViaOspfEnabled', 'preferOverOspfRoutesEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def deleteNetworkSwitchStackRoutingStaticRoute(self, networkId: str, switchStackId: str, staticRouteId: str):
"""
**Delete a layer 3 static route for a switch stack**
https://developer.cisco.com/meraki/api-v1/#!delete-network-switch-stack-routing-static-route
- networkId (string): (required)
- switchStackId (string): (required)
- staticRouteId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stacks', 'routing', 'staticRoutes'],
'operation': 'deleteNetworkSwitchStackRoutingStaticRoute'
}
resource = f'/networks/{networkId}/switch/stacks/{switchStackId}/routing/staticRoutes/{staticRouteId}'
return self._session.delete(metadata, resource)
def getNetworkSwitchStormControl(self, networkId: str):
"""
**Return the storm control configuration for a switch network**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-storm-control
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stormControl'],
'operation': 'getNetworkSwitchStormControl'
}
resource = f'/networks/{networkId}/switch/stormControl'
return self._session.get(metadata, resource)
def updateNetworkSwitchStormControl(self, networkId: str, **kwargs):
"""
**Update the storm control configuration for a switch network**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-storm-control
- networkId (string): (required)
- broadcastThreshold (integer): Percentage (1 to 99) of total available port bandwidth for broadcast traffic type. Default value 100 percent rate is to clear the configuration.
- multicastThreshold (integer): Percentage (1 to 99) of total available port bandwidth for multicast traffic type. Default value 100 percent rate is to clear the configuration.
- unknownUnicastThreshold (integer): Percentage (1 to 99) of total available port bandwidth for unknown unicast (dlf-destination lookup failure) traffic type. Default value 100 percent rate is to clear the configuration.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'stormControl'],
'operation': 'updateNetworkSwitchStormControl'
}
resource = f'/networks/{networkId}/switch/stormControl'
body_params = ['broadcastThreshold', 'multicastThreshold', 'unknownUnicastThreshold', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getNetworkSwitchStp(self, networkId: str):
"""
**Returns STP settings**
https://developer.cisco.com/meraki/api-v1/#!get-network-switch-stp
- networkId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'stp'],
'operation': 'getNetworkSwitchStp'
}
resource = f'/networks/{networkId}/switch/stp'
return self._session.get(metadata, resource)
def updateNetworkSwitchStp(self, networkId: str, **kwargs):
"""
**Updates STP settings**
https://developer.cisco.com/meraki/api-v1/#!update-network-switch-stp
- networkId (string): (required)
- rstpEnabled (boolean): The spanning tree protocol status in network
- stpBridgePriority (array): STP bridge priority for switches/stacks or switch profiles. An empty array will clear the STP bridge priority settings.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'stp'],
'operation': 'updateNetworkSwitchStp'
}
resource = f'/networks/{networkId}/switch/stp'
body_params = ['rstpEnabled', 'stpBridgePriority', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def getOrganizationConfigTemplateSwitchProfiles(self, organizationId: str, configTemplateId: str):
"""
**List the switch profiles for your switch template configuration**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-template-switch-profiles
- organizationId (string): (required)
- configTemplateId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'configTemplates', 'profiles'],
'operation': 'getOrganizationConfigTemplateSwitchProfiles'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}/switch/profiles'
return self._session.get(metadata, resource)
def getOrganizationConfigTemplateSwitchProfilePorts(self, organizationId: str, configTemplateId: str, profileId: str):
"""
**Return all the ports of a switch profile**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-template-switch-profile-ports
- organizationId (string): (required)
- configTemplateId (string): (required)
- profileId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'configTemplates', 'profiles', 'ports'],
'operation': 'getOrganizationConfigTemplateSwitchProfilePorts'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}/switch/profiles/{profileId}/ports'
return self._session.get(metadata, resource)
def getOrganizationConfigTemplateSwitchProfilePort(self, organizationId: str, configTemplateId: str, profileId: str, portId: str):
"""
**Return a switch profile port**
https://developer.cisco.com/meraki/api-v1/#!get-organization-config-template-switch-profile-port
- organizationId (string): (required)
- configTemplateId (string): (required)
- profileId (string): (required)
- portId (string): (required)
"""
metadata = {
'tags': ['switch', 'configure', 'configTemplates', 'profiles', 'ports'],
'operation': 'getOrganizationConfigTemplateSwitchProfilePort'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}/switch/profiles/{profileId}/ports/{portId}'
return self._session.get(metadata, resource)
def updateOrganizationConfigTemplateSwitchProfilePort(self, organizationId: str, configTemplateId: str, profileId: str, portId: str, **kwargs):
"""
**Update a switch profile port**
https://developer.cisco.com/meraki/api-v1/#!update-organization-config-template-switch-profile-port
- organizationId (string): (required)
- configTemplateId (string): (required)
- profileId (string): (required)
- portId (string): (required)
- name (string): The name of the switch profile port
- tags (array): The list of tags of the switch profile port
- enabled (boolean): The status of the switch profile port
- type (string): The type of the switch profile port ('trunk' or 'access')
- vlan (integer): The VLAN of the switch profile port. A null value will clear the value set for trunk ports.
- voiceVlan (integer): The voice VLAN of the switch profile port. Only applicable to access ports
- allowedVlans (string): The VLANs allowed on the switch profile port. Only applicable to trunk ports
- poeEnabled (boolean): The PoE status of the switch profile port
- isolationEnabled (boolean): The isolation status of the switch profile port
- rstpEnabled (boolean): The rapid spanning tree protocol status
- stpGuard (string): The state of the STP guard ('disabled', 'root guard', 'bpdu guard' or 'loop guard')
- linkNegotiation (string): The link speed for the switch profile port
- portScheduleId (string): The ID of the port schedule. A value of null will clear the port schedule.
- udld (string): The action to take when Unidirectional Link is detected (Alert only, Enforce). Default configuration is Alert only.
- accessPolicyType (string): The type of the access policy of the switch profile port. Only applicable to access ports. Can be one of 'Open', 'Custom access policy', 'MAC allow list' or 'Sticky MAC allow list'
- accessPolicyNumber (integer): The number of a custom access policy to configure on the switch profile port. Only applicable when 'accessPolicyType' is 'Custom access policy'
- macAllowList (array): Only devices with MAC addresses specified in this list will have access to this port. Up to 20 MAC addresses can be defined. Only applicable when 'accessPolicyType' is 'MAC allow list'
- stickyMacAllowList (array): The initial list of MAC addresses for sticky Mac allow list. Only applicable when 'accessPolicyType' is 'Sticky MAC allow list'
- stickyMacAllowListLimit (integer): The maximum number of MAC addresses for sticky MAC allow list. Only applicable when 'accessPolicyType' is 'Sticky MAC allow list'
- stormControlEnabled (boolean): The storm control status of the switch profile port
- flexibleStackingEnabled (boolean): For supported switches (e.g. MS420/MS425), whether or not the port has flexible stacking enabled.
"""
kwargs.update(locals())
if 'type' in kwargs:
options = ['trunk', 'access']
assert kwargs['type'] in options, f'''"type" cannot be "{kwargs['type']}", & must be set to one of: {options}'''
if 'stpGuard' in kwargs:
options = ['disabled', 'root guard', 'bpdu guard', 'loop guard']
assert kwargs['stpGuard'] in options, f'''"stpGuard" cannot be "{kwargs['stpGuard']}", & must be set to one of: {options}'''
if 'udld' in kwargs:
options = ['Alert only', 'Enforce']
assert kwargs['udld'] in options, f'''"udld" cannot be "{kwargs['udld']}", & must be set to one of: {options}'''
if 'accessPolicyType' in kwargs:
options = ['Open', 'Custom access policy', 'MAC allow list', 'Sticky MAC allow list']
assert kwargs['accessPolicyType'] in options, f'''"accessPolicyType" cannot be "{kwargs['accessPolicyType']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'configTemplates', 'profiles', 'ports'],
'operation': 'updateOrganizationConfigTemplateSwitchProfilePort'
}
resource = f'/organizations/{organizationId}/configTemplates/{configTemplateId}/switch/profiles/{profileId}/ports/{portId}'
body_params = ['name', 'tags', 'enabled', 'type', 'vlan', 'voiceVlan', 'allowedVlans', 'poeEnabled', 'isolationEnabled', 'rstpEnabled', 'stpGuard', 'linkNegotiation', 'portScheduleId', 'udld', 'accessPolicyType', 'accessPolicyNumber', 'macAllowList', 'stickyMacAllowList', 'stickyMacAllowListLimit', 'stormControlEnabled', 'flexibleStackingEnabled', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.put(metadata, resource, payload)
def cloneOrganizationSwitchDevices(self, organizationId: str, sourceSerial: str, targetSerials: list):
"""
**Clone port-level and some switch-level configuration settings from a source switch to one or more target switches**
https://developer.cisco.com/meraki/api-v1/#!clone-organization-switch-devices
- organizationId (string): (required)
- sourceSerial (string): Serial number of the source switch (must be on a network not bound to a template)
- targetSerials (array): Array of serial numbers of one or more target switches (must be on a network not bound to a template)
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'devices'],
'operation': 'cloneOrganizationSwitchDevices'
}
resource = f'/organizations/{organizationId}/switch/devices/clone'
body_params = ['sourceSerial', 'targetSerials', ]
payload = {k.strip(): v for k, v in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
|
from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestmap_map_112(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_map_map_112(self):
res = self.i.execute(f'INCLUDE "{abspath('opcodes/contracts/map_map.tz')}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN 15 { Elt "bar" 5 ; Elt "foo" 1 }')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('{ Elt "bar" 20 ; Elt "foo" 16 }')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
| from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestmap_map_112(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_map_map_112(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/map_map.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN 15 { Elt "bar" 5 ; Elt "foo" 1 }')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('{ Elt "bar" 20 ; Elt "foo" 16 }')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
|
"""Common stuff for AVM Fritz!Box tests."""
from homeassistant.components import ssdp
from homeassistant.components.fritz.const import DOMAIN
from homeassistant.components.ssdp import ATTR_UPNP_FRIENDLY_NAME, ATTR_UPNP_UDN
from homeassistant.const import (
CONF_DEVICES,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
ATTR_HOST = "host"
ATTR_NEW_SERIAL_NUMBER = "NewSerialNumber"
MOCK_CONFIG = {
DOMAIN: {
CONF_DEVICES: [
{
CONF_HOST: "fake_host",
CONF_PORT: "1234",
CONF_PASSWORD: "fake_pass",
CONF_USERNAME: "fake_user",
}
]
}
}
MOCK_HOST = "fake_host"
MOCK_IPS = {"fritz.box": "192.168.178.1", "printer": "192.168.178.2"}
MOCK_MODELNAME = "FRITZ!Box 7530 AX"
MOCK_FIRMWARE = "256.07.29"
MOCK_SERIAL_NUMBER = "fake_serial_number"
MOCK_FIRMWARE_INFO = [True, "1.1.1"]
MOCK_MESH_SSID = "TestSSID"
MOCK_MESH_MASTER_MAC = "1C:ED:6F:12:34:11"
MOCK_MESH_MASTER_WIFI1_MAC = "1C:ED:6F:12:34:12"
MOCK_MESH_SLAVE_MAC = "1C:ED:6F:12:34:21"
MOCK_MESH_SLAVE_WIFI1_MAC = "1C:ED:6F:12:34:22"
MOCK_FB_SERVICES: dict[str, dict] = {
"DeviceInfo1": {
"GetInfo": {
"NewSerialNumber": MOCK_MESH_MASTER_MAC,
"NewName": "TheName",
"NewModelName": MOCK_MODELNAME,
"NewSoftwareVersion": MOCK_FIRMWARE,
"NewUpTime": 2518179,
},
},
"Hosts1": {
"GetGenericHostEntry": [
{
"NewIPAddress": MOCK_IPS["fritz.box"],
"NewAddressSource": "Static",
"NewLeaseTimeRemaining": 0,
"NewMACAddress": MOCK_MESH_MASTER_MAC,
"NewInterfaceType": "",
"NewActive": True,
"NewHostName": "fritz.box",
},
{
"NewIPAddress": MOCK_IPS["printer"],
"NewAddressSource": "DHCP",
"NewLeaseTimeRemaining": 0,
"NewMACAddress": "AA:BB:CC:00:11:22",
"NewInterfaceType": "Ethernet",
"NewActive": True,
"NewHostName": "printer",
},
],
"X_AVM-DE_GetMeshListPath": {},
},
"LANEthernetInterfaceConfig1": {
"GetStatistics": {
"NewBytesSent": 23004321,
"NewBytesReceived": 12045,
},
},
"Layer3Forwarding1": {
"GetDefaultConnectionService": {
"NewDefaultConnectionService": "1.WANPPPConnection.1"
}
},
"UserInterface1": {
"GetInfo": {},
},
"WANCommonIFC1": {
"GetCommonLinkProperties": {
"NewLayer1DownstreamMaxBitRate": 10087000,
"NewLayer1UpstreamMaxBitRate": 2105000,
"NewPhysicalLinkStatus": "Up",
},
"GetAddonInfos": {
"NewByteSendRate": 3438,
"NewByteReceiveRate": 67649,
"NewTotalBytesSent": 1712232562,
"NewTotalBytesReceived": 5221019883,
"NewX_AVM_DE_TotalBytesSent64": 1712232562,
"NeWX_AVM_DE_TotalBytesReceived64": 5221019883,
},
"GetTotalBytesSent": {"NewTotalBytesSent": 1712232562},
"GetTotalBytesReceived": {"NewTotalBytesReceived": 5221019883},
},
"WANCommonInterfaceConfig1": {
"GetCommonLinkProperties": {
"NewWANAccessType": "DSL",
"NewLayer1UpstreamMaxBitRate": 51805000,
"NewLayer1DownstreamMaxBitRate": 318557000,
"NewPhysicalLinkStatus": "Up",
}
},
"WANDSLInterfaceConfig1": {
"GetInfo": {
"NewEnable": True,
"NewStatus": "Up",
"NewDataPath": "Interleaved",
"NewUpstreamCurrRate": 46720,
"NewDownstreamCurrRate": 292030,
"NewUpstreamMaxRate": 51348,
"NewDownstreamMaxRate": 315978,
"NewUpstreamNoiseMargin": 90,
"NewDownstreamNoiseMargin": 80,
"NewUpstreamAttenuation": 70,
"NewDownstreamAttenuation": 120,
"NewATURVendor": "41564d00",
"NewATURCountry": "0400",
"NewUpstreamPower": 500,
"NewDownstreamPower": 500,
}
},
"WANIPConn1": {
"GetStatusInfo": {
"NewConnectionStatus": "Connected",
"NewUptime": 35307,
},
"GetExternalIPAddress": {"NewExternalIPAddress": "1.2.3.4"},
},
"WANPPPConnection1": {
"GetInfo": {
"NewEnable": True,
"NewConnectionStatus": "Connected",
"NewUptime": 57199,
"NewUpstreamMaxBitRate": 46531924,
"NewDownstreamMaxBitRate": 43430530,
"NewExternalIPAddress": "1.2.3.4",
},
"GetPortMappingNumberOfEntries": {},
},
"X_AVM-DE_Homeauto1": {
"GetGenericDeviceInfos": [
{
"NewSwitchIsValid": "VALID",
"NewMultimeterIsValid": "VALID",
"NewTemperatureIsValid": "VALID",
"NewDeviceId": 16,
"NewAIN": "08761 0114116",
"NewDeviceName": "FRITZ!DECT 200 #1",
"NewTemperatureOffset": "0",
"NewSwitchLock": "0",
"NewProductName": "FRITZ!DECT 200",
"NewPresent": "CONNECTED",
"NewMultimeterPower": 1673,
"NewHkrComfortTemperature": "0",
"NewSwitchMode": "AUTO",
"NewManufacturer": "AVM",
"NewMultimeterIsEnabled": "ENABLED",
"NewHkrIsTemperature": "0",
"NewFunctionBitMask": 2944,
"NewTemperatureIsEnabled": "ENABLED",
"NewSwitchState": "ON",
"NewSwitchIsEnabled": "ENABLED",
"NewFirmwareVersion": "03.87",
"NewHkrSetVentilStatus": "CLOSED",
"NewMultimeterEnergy": 5182,
"NewHkrComfortVentilStatus": "CLOSED",
"NewHkrReduceTemperature": "0",
"NewHkrReduceVentilStatus": "CLOSED",
"NewHkrIsEnabled": "DISABLED",
"NewHkrSetTemperature": "0",
"NewTemperatureCelsius": "225",
"NewHkrIsValid": "INVALID",
},
{},
],
},
"X_AVM-DE_HostFilter1": {
"GetWANAccessByIP": {
MOCK_IPS["printer"]: {"NewDisallow": False, "NewWANAccess": "granted"}
}
},
}
MOCK_MESH_DATA = {
"schema_version": "1.9",
"nodes": [
{
"uid": "n-1",
"device_name": "fritz.box",
"device_model": "FRITZ!Box 7530 AX",
"device_manufacturer": "AVM",
"device_firmware_version": "256.07.29",
"device_mac_address": MOCK_MESH_MASTER_MAC,
"is_meshed": True,
"mesh_role": "master",
"meshd_version": "3.13",
"node_interfaces": [
{
"uid": "ni-5",
"name": "LANBridge",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [],
},
{
"uid": "ni-30",
"name": "LAN:2",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [],
},
{
"uid": "ni-32",
"name": "LAN:3",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [],
},
{
"uid": "ni-31",
"name": "LAN:1",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [
{
"uid": "nl-78",
"type": "LAN",
"state": "CONNECTED",
"last_connected": 1642872967,
"node_1_uid": "n-1",
"node_2_uid": "n-76",
"node_interface_1_uid": "ni-31",
"node_interface_2_uid": "ni-77",
"max_data_rate_rx": 1000000,
"max_data_rate_tx": 1000000,
"cur_data_rate_rx": 0,
"cur_data_rate_tx": 0,
"cur_availability_rx": 99,
"cur_availability_tx": 99,
}
],
},
{
"uid": "ni-33",
"name": "LAN:4",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [],
},
{
"uid": "ni-230",
"name": "AP:2G:0",
"type": "WLAN",
"mac_address": MOCK_MESH_MASTER_WIFI1_MAC,
"blocking_state": "UNKNOWN",
"node_links": [
{
"uid": "nl-219",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618820,
"node_1_uid": "n-1",
"node_2_uid": "n-89",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-90",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 65000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 51,
"tx_rsni": 255,
"rx_rcpi": -38,
"tx_rcpi": 255,
},
{
"uid": "nl-168",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1645162418,
"node_1_uid": "n-1",
"node_2_uid": "n-118",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-119",
"max_data_rate_rx": 144400,
"max_data_rate_tx": 144400,
"cur_data_rate_rx": 144400,
"cur_data_rate_tx": 130000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 37,
"tx_rsni": 255,
"rx_rcpi": -52,
"tx_rcpi": 255,
},
{
"uid": "nl-185",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1645273363,
"node_1_uid": "n-1",
"node_2_uid": "n-100",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-99",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 1000,
"cur_data_rate_tx": 1000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 35,
"tx_rsni": 255,
"rx_rcpi": -54,
"tx_rcpi": 255,
},
{
"uid": "nl-166",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618912,
"node_1_uid": "n-1",
"node_2_uid": "n-16",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-15",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 65000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 41,
"tx_rsni": 255,
"rx_rcpi": -48,
"tx_rcpi": 255,
},
{
"uid": "nl-239",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618828,
"node_1_uid": "n-1",
"node_2_uid": "n-59",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-58",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 65000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 43,
"tx_rsni": 255,
"rx_rcpi": -46,
"tx_rcpi": 255,
},
{
"uid": "nl-173",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1645331764,
"node_1_uid": "n-1",
"node_2_uid": "n-137",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-138",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 72200,
"cur_data_rate_tx": 65000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 38,
"tx_rsni": 255,
"rx_rcpi": -51,
"tx_rcpi": 255,
},
{
"uid": "nl-217",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618833,
"node_1_uid": "n-1",
"node_2_uid": "n-128",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-127",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 72200,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 41,
"tx_rsni": 255,
"rx_rcpi": -48,
"tx_rcpi": 255,
},
{
"uid": "nl-198",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618820,
"node_1_uid": "n-1",
"node_2_uid": "n-105",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-106",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 58500,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 28,
"tx_rsni": 255,
"rx_rcpi": -61,
"tx_rcpi": 255,
},
{
"uid": "nl-213",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618820,
"node_1_uid": "n-1",
"node_2_uid": "n-111",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-112",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 1000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 44,
"tx_rsni": 255,
"rx_rcpi": -45,
"tx_rcpi": 255,
},
{
"uid": "nl-224",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618831,
"node_1_uid": "n-1",
"node_2_uid": "n-197",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-196",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 1000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 51,
"tx_rsni": 255,
"rx_rcpi": -38,
"tx_rcpi": 255,
},
{
"uid": "nl-182",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618822,
"node_1_uid": "n-1",
"node_2_uid": "n-56",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-55",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 72200,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 34,
"tx_rsni": 255,
"rx_rcpi": -55,
"tx_rcpi": 255,
},
{
"uid": "nl-205",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618820,
"node_1_uid": "n-1",
"node_2_uid": "n-109",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-108",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 1000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 43,
"tx_rsni": 255,
"rx_rcpi": -46,
"tx_rcpi": 255,
},
{
"uid": "nl-240",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618827,
"node_1_uid": "n-1",
"node_2_uid": "n-95",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-96",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 58500,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 25,
"tx_rsni": 255,
"rx_rcpi": -64,
"tx_rcpi": 255,
},
{
"uid": "nl-146",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1642872967,
"node_1_uid": "n-1",
"node_2_uid": "n-167",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-134",
"max_data_rate_rx": 144400,
"max_data_rate_tx": 144400,
"cur_data_rate_rx": 144400,
"cur_data_rate_tx": 130000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 48,
"tx_rsni": 255,
"rx_rcpi": -41,
"tx_rcpi": 255,
},
{
"uid": "nl-232",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618829,
"node_1_uid": "n-1",
"node_2_uid": "n-18",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-17",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 21700,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 22,
"tx_rsni": 255,
"rx_rcpi": -67,
"tx_rcpi": 255,
},
],
"ssid": MOCK_MESH_SSID,
"opmode": "AP",
"security": "WPA2_WPA3_MIXED",
"supported_streams_tx": [
["20 MHz", 2],
["40 MHz", 0],
["80 MHz", 0],
["160 MHz", 0],
["80+80 MHz", 0],
],
"supported_streams_rx": [
["20 MHz", 2],
["40 MHz", 0],
["80 MHz", 0],
["160 MHz", 0],
["80+80 MHz", 0],
],
"current_channel": 13,
"phymodes": ["g", "n", "ax"],
"channel_utilization": 0,
"anpi": -91,
"steering_enabled": True,
"11k_friendly": True,
"11v_friendly": True,
"legacy_friendly": True,
"rrm_compliant": False,
"channel_list": [
{"channel": 1},
{"channel": 2},
{"channel": 3},
{"channel": 4},
{"channel": 5},
{"channel": 6},
{"channel": 7},
{"channel": 8},
{"channel": 9},
{"channel": 10},
{"channel": 11},
{"channel": 12},
{"channel": 13},
],
},
],
},
{
"uid": "n-76",
"device_name": "printer",
"device_model": "",
"device_manufacturer": "",
"device_firmware_version": "",
"device_mac_address": "AA:BB:CC:00:11:22",
"is_meshed": False,
"mesh_role": "unknown",
"meshd_version": "0.0",
"node_interfaces": [
{
"uid": "ni-77",
"name": "eth0",
"type": "LAN",
"mac_address": "AA:BB:CC:00:11:22",
"blocking_state": "UNKNOWN",
"node_links": [
{
"uid": "nl-78",
"type": "LAN",
"state": "CONNECTED",
"last_connected": 1642872967,
"node_1_uid": "n-1",
"node_2_uid": "n-76",
"node_interface_1_uid": "ni-31",
"node_interface_2_uid": "ni-77",
"max_data_rate_rx": 1000000,
"max_data_rate_tx": 1000000,
"cur_data_rate_rx": 0,
"cur_data_rate_tx": 0,
"cur_availability_rx": 99,
"cur_availability_tx": 99,
}
],
}
],
},
{
"uid": "n-167",
"device_name": "fritz-repeater",
"device_model": "FRITZ!Box 7490",
"device_manufacturer": "AVM",
"device_firmware_version": "113.07.29",
"device_mac_address": MOCK_MESH_SLAVE_MAC,
"is_meshed": True,
"mesh_role": "slave",
"meshd_version": "3.13",
"node_interfaces": [
{
"uid": "ni-140",
"name": "LAN:3",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
{
"uid": "ni-139",
"name": "LAN:4",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
{
"uid": "ni-141",
"name": "LAN:2",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
{
"uid": "ni-134",
"name": "UPLINK:2G:0",
"type": "WLAN",
"mac_address": MOCK_MESH_SLAVE_WIFI1_MAC,
"blocking_state": "UNKNOWN",
"node_links": [
{
"uid": "nl-146",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1642872967,
"node_1_uid": "n-1",
"node_2_uid": "n-167",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-134",
"max_data_rate_rx": 144400,
"max_data_rate_tx": 144400,
"cur_data_rate_rx": 144400,
"cur_data_rate_tx": 130000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 48,
"tx_rsni": 255,
"rx_rcpi": -41,
"tx_rcpi": 255,
}
],
"ssid": "",
"opmode": "WDS_REPEATER",
"security": "WPA3PSK",
"supported_streams_tx": [
["20 MHz", 3],
["40 MHz", 3],
["80 MHz", 0],
["160 MHz", 0],
["80+80 MHz", 0],
],
"supported_streams_rx": [
["20 MHz", 3],
["40 MHz", 3],
["80 MHz", 0],
["160 MHz", 0],
["80+80 MHz", 0],
],
"current_channel": 13,
"phymodes": ["b", "g", "n"],
"channel_utilization": 0,
"anpi": 255,
"steering_enabled": True,
"11k_friendly": False,
"11v_friendly": True,
"legacy_friendly": True,
"rrm_compliant": False,
"channel_list": [
{"channel": 1},
{"channel": 2},
{"channel": 3},
{"channel": 4},
{"channel": 5},
{"channel": 6},
{"channel": 7},
{"channel": 8},
{"channel": 9},
{"channel": 10},
{"channel": 11},
{"channel": 12},
{"channel": 13},
],
"client_position": "unknown",
},
{
"uid": "ni-143",
"name": "LANBridge",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
{
"uid": "ni-142",
"name": "LAN:1",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
],
},
],
}
MOCK_USER_DATA = MOCK_CONFIG[DOMAIN][CONF_DEVICES][0]
MOCK_DEVICE_INFO = {
ATTR_HOST: MOCK_HOST,
ATTR_NEW_SERIAL_NUMBER: MOCK_SERIAL_NUMBER,
}
MOCK_SSDP_DATA = ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location=f"https://{MOCK_IPS["fritz.box"]}:12345/test",
upnp={
ATTR_UPNP_FRIENDLY_NAME: "fake_name",
ATTR_UPNP_UDN: "uuid:only-a-test",
},
)
MOCK_REQUEST = b'<?xml version="1.0" encoding="utf-8"?><SessionInfo><SID>xxxxxxxxxxxxxxxx</SID><Challenge>xxxxxxxx</Challenge><BlockTime>0</BlockTime><Rights><Name>Dial</Name><Access>2</Access><Name>App</Name><Access>2</Access><Name>HomeAuto</Name><Access>2</Access><Name>BoxAdmin</Name><Access>2</Access><Name>Phone</Name><Access>2</Access><Name>NAS</Name><Access>2</Access></Rights><Users><User last="1">FakeFritzUser</User></Users></SessionInfo>\n'
| """Common stuff for AVM Fritz!Box tests."""
from homeassistant.components import ssdp
from homeassistant.components.fritz.const import DOMAIN
from homeassistant.components.ssdp import ATTR_UPNP_FRIENDLY_NAME, ATTR_UPNP_UDN
from homeassistant.const import (
CONF_DEVICES,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
ATTR_HOST = "host"
ATTR_NEW_SERIAL_NUMBER = "NewSerialNumber"
MOCK_CONFIG = {
DOMAIN: {
CONF_DEVICES: [
{
CONF_HOST: "fake_host",
CONF_PORT: "1234",
CONF_PASSWORD: "fake_pass",
CONF_USERNAME: "fake_user",
}
]
}
}
MOCK_HOST = "fake_host"
MOCK_IPS = {"fritz.box": "192.168.178.1", "printer": "192.168.178.2"}
MOCK_MODELNAME = "FRITZ!Box 7530 AX"
MOCK_FIRMWARE = "256.07.29"
MOCK_SERIAL_NUMBER = "fake_serial_number"
MOCK_FIRMWARE_INFO = [True, "1.1.1"]
MOCK_MESH_SSID = "TestSSID"
MOCK_MESH_MASTER_MAC = "1C:ED:6F:12:34:11"
MOCK_MESH_MASTER_WIFI1_MAC = "1C:ED:6F:12:34:12"
MOCK_MESH_SLAVE_MAC = "1C:ED:6F:12:34:21"
MOCK_MESH_SLAVE_WIFI1_MAC = "1C:ED:6F:12:34:22"
MOCK_FB_SERVICES: dict[str, dict] = {
"DeviceInfo1": {
"GetInfo": {
"NewSerialNumber": MOCK_MESH_MASTER_MAC,
"NewName": "TheName",
"NewModelName": MOCK_MODELNAME,
"NewSoftwareVersion": MOCK_FIRMWARE,
"NewUpTime": 2518179,
},
},
"Hosts1": {
"GetGenericHostEntry": [
{
"NewIPAddress": MOCK_IPS["fritz.box"],
"NewAddressSource": "Static",
"NewLeaseTimeRemaining": 0,
"NewMACAddress": MOCK_MESH_MASTER_MAC,
"NewInterfaceType": "",
"NewActive": True,
"NewHostName": "fritz.box",
},
{
"NewIPAddress": MOCK_IPS["printer"],
"NewAddressSource": "DHCP",
"NewLeaseTimeRemaining": 0,
"NewMACAddress": "AA:BB:CC:00:11:22",
"NewInterfaceType": "Ethernet",
"NewActive": True,
"NewHostName": "printer",
},
],
"X_AVM-DE_GetMeshListPath": {},
},
"LANEthernetInterfaceConfig1": {
"GetStatistics": {
"NewBytesSent": 23004321,
"NewBytesReceived": 12045,
},
},
"Layer3Forwarding1": {
"GetDefaultConnectionService": {
"NewDefaultConnectionService": "1.WANPPPConnection.1"
}
},
"UserInterface1": {
"GetInfo": {},
},
"WANCommonIFC1": {
"GetCommonLinkProperties": {
"NewLayer1DownstreamMaxBitRate": 10087000,
"NewLayer1UpstreamMaxBitRate": 2105000,
"NewPhysicalLinkStatus": "Up",
},
"GetAddonInfos": {
"NewByteSendRate": 3438,
"NewByteReceiveRate": 67649,
"NewTotalBytesSent": 1712232562,
"NewTotalBytesReceived": 5221019883,
"NewX_AVM_DE_TotalBytesSent64": 1712232562,
"NeWX_AVM_DE_TotalBytesReceived64": 5221019883,
},
"GetTotalBytesSent": {"NewTotalBytesSent": 1712232562},
"GetTotalBytesReceived": {"NewTotalBytesReceived": 5221019883},
},
"WANCommonInterfaceConfig1": {
"GetCommonLinkProperties": {
"NewWANAccessType": "DSL",
"NewLayer1UpstreamMaxBitRate": 51805000,
"NewLayer1DownstreamMaxBitRate": 318557000,
"NewPhysicalLinkStatus": "Up",
}
},
"WANDSLInterfaceConfig1": {
"GetInfo": {
"NewEnable": True,
"NewStatus": "Up",
"NewDataPath": "Interleaved",
"NewUpstreamCurrRate": 46720,
"NewDownstreamCurrRate": 292030,
"NewUpstreamMaxRate": 51348,
"NewDownstreamMaxRate": 315978,
"NewUpstreamNoiseMargin": 90,
"NewDownstreamNoiseMargin": 80,
"NewUpstreamAttenuation": 70,
"NewDownstreamAttenuation": 120,
"NewATURVendor": "41564d00",
"NewATURCountry": "0400",
"NewUpstreamPower": 500,
"NewDownstreamPower": 500,
}
},
"WANIPConn1": {
"GetStatusInfo": {
"NewConnectionStatus": "Connected",
"NewUptime": 35307,
},
"GetExternalIPAddress": {"NewExternalIPAddress": "1.2.3.4"},
},
"WANPPPConnection1": {
"GetInfo": {
"NewEnable": True,
"NewConnectionStatus": "Connected",
"NewUptime": 57199,
"NewUpstreamMaxBitRate": 46531924,
"NewDownstreamMaxBitRate": 43430530,
"NewExternalIPAddress": "1.2.3.4",
},
"GetPortMappingNumberOfEntries": {},
},
"X_AVM-DE_Homeauto1": {
"GetGenericDeviceInfos": [
{
"NewSwitchIsValid": "VALID",
"NewMultimeterIsValid": "VALID",
"NewTemperatureIsValid": "VALID",
"NewDeviceId": 16,
"NewAIN": "08761 0114116",
"NewDeviceName": "FRITZ!DECT 200 #1",
"NewTemperatureOffset": "0",
"NewSwitchLock": "0",
"NewProductName": "FRITZ!DECT 200",
"NewPresent": "CONNECTED",
"NewMultimeterPower": 1673,
"NewHkrComfortTemperature": "0",
"NewSwitchMode": "AUTO",
"NewManufacturer": "AVM",
"NewMultimeterIsEnabled": "ENABLED",
"NewHkrIsTemperature": "0",
"NewFunctionBitMask": 2944,
"NewTemperatureIsEnabled": "ENABLED",
"NewSwitchState": "ON",
"NewSwitchIsEnabled": "ENABLED",
"NewFirmwareVersion": "03.87",
"NewHkrSetVentilStatus": "CLOSED",
"NewMultimeterEnergy": 5182,
"NewHkrComfortVentilStatus": "CLOSED",
"NewHkrReduceTemperature": "0",
"NewHkrReduceVentilStatus": "CLOSED",
"NewHkrIsEnabled": "DISABLED",
"NewHkrSetTemperature": "0",
"NewTemperatureCelsius": "225",
"NewHkrIsValid": "INVALID",
},
{},
],
},
"X_AVM-DE_HostFilter1": {
"GetWANAccessByIP": {
MOCK_IPS["printer"]: {"NewDisallow": False, "NewWANAccess": "granted"}
}
},
}
MOCK_MESH_DATA = {
"schema_version": "1.9",
"nodes": [
{
"uid": "n-1",
"device_name": "fritz.box",
"device_model": "FRITZ!Box 7530 AX",
"device_manufacturer": "AVM",
"device_firmware_version": "256.07.29",
"device_mac_address": MOCK_MESH_MASTER_MAC,
"is_meshed": True,
"mesh_role": "master",
"meshd_version": "3.13",
"node_interfaces": [
{
"uid": "ni-5",
"name": "LANBridge",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [],
},
{
"uid": "ni-30",
"name": "LAN:2",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [],
},
{
"uid": "ni-32",
"name": "LAN:3",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [],
},
{
"uid": "ni-31",
"name": "LAN:1",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [
{
"uid": "nl-78",
"type": "LAN",
"state": "CONNECTED",
"last_connected": 1642872967,
"node_1_uid": "n-1",
"node_2_uid": "n-76",
"node_interface_1_uid": "ni-31",
"node_interface_2_uid": "ni-77",
"max_data_rate_rx": 1000000,
"max_data_rate_tx": 1000000,
"cur_data_rate_rx": 0,
"cur_data_rate_tx": 0,
"cur_availability_rx": 99,
"cur_availability_tx": 99,
}
],
},
{
"uid": "ni-33",
"name": "LAN:4",
"type": "LAN",
"mac_address": MOCK_MESH_MASTER_MAC,
"blocking_state": "NOT_BLOCKED",
"node_links": [],
},
{
"uid": "ni-230",
"name": "AP:2G:0",
"type": "WLAN",
"mac_address": MOCK_MESH_MASTER_WIFI1_MAC,
"blocking_state": "UNKNOWN",
"node_links": [
{
"uid": "nl-219",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618820,
"node_1_uid": "n-1",
"node_2_uid": "n-89",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-90",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 65000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 51,
"tx_rsni": 255,
"rx_rcpi": -38,
"tx_rcpi": 255,
},
{
"uid": "nl-168",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1645162418,
"node_1_uid": "n-1",
"node_2_uid": "n-118",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-119",
"max_data_rate_rx": 144400,
"max_data_rate_tx": 144400,
"cur_data_rate_rx": 144400,
"cur_data_rate_tx": 130000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 37,
"tx_rsni": 255,
"rx_rcpi": -52,
"tx_rcpi": 255,
},
{
"uid": "nl-185",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1645273363,
"node_1_uid": "n-1",
"node_2_uid": "n-100",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-99",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 1000,
"cur_data_rate_tx": 1000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 35,
"tx_rsni": 255,
"rx_rcpi": -54,
"tx_rcpi": 255,
},
{
"uid": "nl-166",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618912,
"node_1_uid": "n-1",
"node_2_uid": "n-16",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-15",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 65000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 41,
"tx_rsni": 255,
"rx_rcpi": -48,
"tx_rcpi": 255,
},
{
"uid": "nl-239",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618828,
"node_1_uid": "n-1",
"node_2_uid": "n-59",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-58",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 65000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 43,
"tx_rsni": 255,
"rx_rcpi": -46,
"tx_rcpi": 255,
},
{
"uid": "nl-173",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1645331764,
"node_1_uid": "n-1",
"node_2_uid": "n-137",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-138",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 72200,
"cur_data_rate_tx": 65000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 38,
"tx_rsni": 255,
"rx_rcpi": -51,
"tx_rcpi": 255,
},
{
"uid": "nl-217",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618833,
"node_1_uid": "n-1",
"node_2_uid": "n-128",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-127",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 72200,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 41,
"tx_rsni": 255,
"rx_rcpi": -48,
"tx_rcpi": 255,
},
{
"uid": "nl-198",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618820,
"node_1_uid": "n-1",
"node_2_uid": "n-105",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-106",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 58500,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 28,
"tx_rsni": 255,
"rx_rcpi": -61,
"tx_rcpi": 255,
},
{
"uid": "nl-213",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618820,
"node_1_uid": "n-1",
"node_2_uid": "n-111",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-112",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 1000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 44,
"tx_rsni": 255,
"rx_rcpi": -45,
"tx_rcpi": 255,
},
{
"uid": "nl-224",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618831,
"node_1_uid": "n-1",
"node_2_uid": "n-197",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-196",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 1000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 51,
"tx_rsni": 255,
"rx_rcpi": -38,
"tx_rcpi": 255,
},
{
"uid": "nl-182",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618822,
"node_1_uid": "n-1",
"node_2_uid": "n-56",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-55",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 72200,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 34,
"tx_rsni": 255,
"rx_rcpi": -55,
"tx_rcpi": 255,
},
{
"uid": "nl-205",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618820,
"node_1_uid": "n-1",
"node_2_uid": "n-109",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-108",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 54000,
"cur_data_rate_tx": 1000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 43,
"tx_rsni": 255,
"rx_rcpi": -46,
"tx_rcpi": 255,
},
{
"uid": "nl-240",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618827,
"node_1_uid": "n-1",
"node_2_uid": "n-95",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-96",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 58500,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 25,
"tx_rsni": 255,
"rx_rcpi": -64,
"tx_rcpi": 255,
},
{
"uid": "nl-146",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1642872967,
"node_1_uid": "n-1",
"node_2_uid": "n-167",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-134",
"max_data_rate_rx": 144400,
"max_data_rate_tx": 144400,
"cur_data_rate_rx": 144400,
"cur_data_rate_tx": 130000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 48,
"tx_rsni": 255,
"rx_rcpi": -41,
"tx_rcpi": 255,
},
{
"uid": "nl-232",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1644618829,
"node_1_uid": "n-1",
"node_2_uid": "n-18",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-17",
"max_data_rate_rx": 72200,
"max_data_rate_tx": 72200,
"cur_data_rate_rx": 48000,
"cur_data_rate_tx": 21700,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 22,
"tx_rsni": 255,
"rx_rcpi": -67,
"tx_rcpi": 255,
},
],
"ssid": MOCK_MESH_SSID,
"opmode": "AP",
"security": "WPA2_WPA3_MIXED",
"supported_streams_tx": [
["20 MHz", 2],
["40 MHz", 0],
["80 MHz", 0],
["160 MHz", 0],
["80+80 MHz", 0],
],
"supported_streams_rx": [
["20 MHz", 2],
["40 MHz", 0],
["80 MHz", 0],
["160 MHz", 0],
["80+80 MHz", 0],
],
"current_channel": 13,
"phymodes": ["g", "n", "ax"],
"channel_utilization": 0,
"anpi": -91,
"steering_enabled": True,
"11k_friendly": True,
"11v_friendly": True,
"legacy_friendly": True,
"rrm_compliant": False,
"channel_list": [
{"channel": 1},
{"channel": 2},
{"channel": 3},
{"channel": 4},
{"channel": 5},
{"channel": 6},
{"channel": 7},
{"channel": 8},
{"channel": 9},
{"channel": 10},
{"channel": 11},
{"channel": 12},
{"channel": 13},
],
},
],
},
{
"uid": "n-76",
"device_name": "printer",
"device_model": "",
"device_manufacturer": "",
"device_firmware_version": "",
"device_mac_address": "AA:BB:CC:00:11:22",
"is_meshed": False,
"mesh_role": "unknown",
"meshd_version": "0.0",
"node_interfaces": [
{
"uid": "ni-77",
"name": "eth0",
"type": "LAN",
"mac_address": "AA:BB:CC:00:11:22",
"blocking_state": "UNKNOWN",
"node_links": [
{
"uid": "nl-78",
"type": "LAN",
"state": "CONNECTED",
"last_connected": 1642872967,
"node_1_uid": "n-1",
"node_2_uid": "n-76",
"node_interface_1_uid": "ni-31",
"node_interface_2_uid": "ni-77",
"max_data_rate_rx": 1000000,
"max_data_rate_tx": 1000000,
"cur_data_rate_rx": 0,
"cur_data_rate_tx": 0,
"cur_availability_rx": 99,
"cur_availability_tx": 99,
}
],
}
],
},
{
"uid": "n-167",
"device_name": "fritz-repeater",
"device_model": "FRITZ!Box 7490",
"device_manufacturer": "AVM",
"device_firmware_version": "113.07.29",
"device_mac_address": MOCK_MESH_SLAVE_MAC,
"is_meshed": True,
"mesh_role": "slave",
"meshd_version": "3.13",
"node_interfaces": [
{
"uid": "ni-140",
"name": "LAN:3",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
{
"uid": "ni-139",
"name": "LAN:4",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
{
"uid": "ni-141",
"name": "LAN:2",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
{
"uid": "ni-134",
"name": "UPLINK:2G:0",
"type": "WLAN",
"mac_address": MOCK_MESH_SLAVE_WIFI1_MAC,
"blocking_state": "UNKNOWN",
"node_links": [
{
"uid": "nl-146",
"type": "WLAN",
"state": "CONNECTED",
"last_connected": 1642872967,
"node_1_uid": "n-1",
"node_2_uid": "n-167",
"node_interface_1_uid": "ni-230",
"node_interface_2_uid": "ni-134",
"max_data_rate_rx": 144400,
"max_data_rate_tx": 144400,
"cur_data_rate_rx": 144400,
"cur_data_rate_tx": 130000,
"cur_availability_rx": 100,
"cur_availability_tx": 100,
"rx_rsni": 48,
"tx_rsni": 255,
"rx_rcpi": -41,
"tx_rcpi": 255,
}
],
"ssid": "",
"opmode": "WDS_REPEATER",
"security": "WPA3PSK",
"supported_streams_tx": [
["20 MHz", 3],
["40 MHz", 3],
["80 MHz", 0],
["160 MHz", 0],
["80+80 MHz", 0],
],
"supported_streams_rx": [
["20 MHz", 3],
["40 MHz", 3],
["80 MHz", 0],
["160 MHz", 0],
["80+80 MHz", 0],
],
"current_channel": 13,
"phymodes": ["b", "g", "n"],
"channel_utilization": 0,
"anpi": 255,
"steering_enabled": True,
"11k_friendly": False,
"11v_friendly": True,
"legacy_friendly": True,
"rrm_compliant": False,
"channel_list": [
{"channel": 1},
{"channel": 2},
{"channel": 3},
{"channel": 4},
{"channel": 5},
{"channel": 6},
{"channel": 7},
{"channel": 8},
{"channel": 9},
{"channel": 10},
{"channel": 11},
{"channel": 12},
{"channel": 13},
],
"client_position": "unknown",
},
{
"uid": "ni-143",
"name": "LANBridge",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
{
"uid": "ni-142",
"name": "LAN:1",
"type": "LAN",
"mac_address": MOCK_MESH_SLAVE_MAC,
"blocking_state": "UNKNOWN",
"node_links": [],
},
],
},
],
}
MOCK_USER_DATA = MOCK_CONFIG[DOMAIN][CONF_DEVICES][0]
MOCK_DEVICE_INFO = {
ATTR_HOST: MOCK_HOST,
ATTR_NEW_SERIAL_NUMBER: MOCK_SERIAL_NUMBER,
}
MOCK_SSDP_DATA = ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location=f"https://{MOCK_IPS['fritz.box']}:12345/test",
upnp={
ATTR_UPNP_FRIENDLY_NAME: "fake_name",
ATTR_UPNP_UDN: "uuid:only-a-test",
},
)
MOCK_REQUEST = b'<?xml version="1.0" encoding="utf-8"?><SessionInfo><SID>xxxxxxxxxxxxxxxx</SID><Challenge>xxxxxxxx</Challenge><BlockTime>0</BlockTime><Rights><Name>Dial</Name><Access>2</Access><Name>App</Name><Access>2</Access><Name>HomeAuto</Name><Access>2</Access><Name>BoxAdmin</Name><Access>2</Access><Name>Phone</Name><Access>2</Access><Name>NAS</Name><Access>2</Access></Rights><Users><User last="1">FakeFritzUser</User></Users></SessionInfo>\n'
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum_dsv as electrum
from electrum_dsv import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum_dsv.bitcoin import COIN, is_address
from electrum_dsv.plugin import run_hook, BasePlugin
from electrum_dsv.i18n import _
from electrum_dsv.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum_dsv.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum_dsv.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum_dsv.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum_dsv.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx)
from electrum_dsv.version import ELECTRUM_VERSION
from electrum_dsv.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum_dsv.exchange_rate import FxThread
from electrum_dsv.simple_config import SimpleConfig
from electrum_dsv.logging import Logger
from electrum_dsv.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum_dsv.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-dsv.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum-DSV - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-DSV Testnet" if constants.net.TESTNET else "Electrum-DSV"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend doriancoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request doriancoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Doriancoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://doriancoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('doriancoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-DSV",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Doriancoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Doriancoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-DSV - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-DSV", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-DSV", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Doriancoin addresses.'),
_('The Doriancoin address never expires and will always be part of this Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Doriancoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Doriancoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except (MultipleSpendMaxTxOutputs, NotEnoughFunds) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Doriancoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_msat=amount_msat, attempts=attempts)
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
self.show_message(_('Not Enough Funds'))
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum_dsv.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Doriancoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Doriancoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum_dsv.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum_dsv import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_dsv import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-dsv-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_('Error')}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_('Can't CPFP'')}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(
tx=tx,
txid=txid,
new_fee_rate=new_fee_rate,
coins=self.get_coins(),
)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Cancel transaction'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
except CannotDoubleSpendTx as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
| #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum_dsv as electrum
from electrum_dsv import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum_dsv.bitcoin import COIN, is_address
from electrum_dsv.plugin import run_hook, BasePlugin
from electrum_dsv.i18n import _
from electrum_dsv.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum_dsv.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum_dsv.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum_dsv.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum_dsv.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx)
from electrum_dsv.version import ELECTRUM_VERSION
from electrum_dsv.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum_dsv.exchange_rate import FxThread
from electrum_dsv.simple_config import SimpleConfig
from electrum_dsv.logging import Logger
from electrum_dsv.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum_dsv.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-dsv.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum-DSV - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-DSV Testnet" if constants.net.TESTNET else "Electrum-DSV"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend doriancoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request doriancoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Doriancoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://doriancoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('doriancoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-DSV",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Doriancoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Doriancoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-DSV - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-DSV", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-DSV", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Doriancoin addresses.'),
_('The Doriancoin address never expires and will always be part of this Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Doriancoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Doriancoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except (MultipleSpendMaxTxOutputs, NotEnoughFunds) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Doriancoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_msat=amount_msat, attempts=attempts)
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
self.show_message(_('Not Enough Funds'))
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum_dsv.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Doriancoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Doriancoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum_dsv.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum_dsv import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_dsv import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-dsv-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(
tx=tx,
txid=txid,
new_fee_rate=new_fee_rate,
coins=self.get_coins(),
)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Cancel transaction'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
except CannotDoubleSpendTx as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
import os
from collections import defaultdict
from datetime import datetime as dt
import numpy as np
from .utils.ek_raw_io import RawSimradFile, SimradEOF
FILENAME_DATETIME_EK60 = (
"(?P<survey>.+)?-?D(?P<date>\\w{1,8})-T(?P<time>\\w{1,6})-?(?P<postfix>\\w+)?.raw"
)
class ParseBase:
"""Parent class for all convert classes."""
def __init__(self, file, storage_options):
self.source_file = file
self.timestamp_pattern = (
None # regex pattern used to grab datetime embedded in filename
)
self.ping_time = [] # list to store ping time
self.storage_options = storage_options
def _print_status(self):
"""Prints message to console giving information about the raw file being parsed."""
class ParseEK(ParseBase):
"""Class for converting data from Simrad echosounders."""
def __init__(self, file, params, storage_options):
super().__init__(file, storage_options)
# Parent class attributes
# regex pattern used to grab datetime embedded in filename
self.timestamp_pattern = FILENAME_DATETIME_EK60
# Class attributes
self.config_datagram = None
self.ping_data_dict = defaultdict(lambda: defaultdict(list))
self.ping_time = defaultdict(list) # store ping time according to channel
self.num_range_bin_groups = None # number of range_bin groups
self.ch_ids = defaultdict(
list
) # Stores the channel ids for each data type (power, angle, complex)
self.data_type = self._select_datagrams(params)
self.nmea = defaultdict(
list
) # Dictionary to store NMEA data(timestamp and string)
self.mru = defaultdict(
list
) # Dictionary to store MRU data (heading, pitch, roll, heave)
self.fil_coeffs = defaultdict(
dict
) # Dictionary to store PC and WBT coefficients
self.fil_df = defaultdict(dict) # Dictionary to store filter decimation factors
self.CON1_datagram = None # Holds the ME70 CON1 datagram
def _print_status(self):
time = (
self.config_datagram["timestamp"].astype(dt).strftime("%Y-%b-%d %H:%M:%S")
)
print(
f"{dt.now().strftime("%H:%M:%S")} parsing file {os.path.basename(self.source_file)}, "
f"time of first ping: {time}"
)
def parse_raw(self):
"""Parse raw data file from Simrad EK60, EK80, and EA640 echosounders."""
with RawSimradFile(
self.source_file, "r", storage_options=self.storage_options
) as fid:
self.config_datagram = fid.read(1)
self.config_datagram["timestamp"] = np.datetime64(
self.config_datagram["timestamp"].replace(tzinfo=None), "[ms]"
)
if "configuration" in self.config_datagram:
for v in self.config_datagram["configuration"].values():
if "pulse_duration" not in v and "pulse_length" in v:
# it seems like sometimes this field can appear with the name "pulse_length"
# and in the form of floats separated by semicolons
v["pulse_duration"] = [
float(x) for x in v["pulse_length"].split(";")
]
# If exporting to XML file (EK80/EA640 only), print a message
if "print_export_msg" in self.data_type:
if "ENV" in self.data_type:
xml_type = "environment"
elif "CONFIG" in self.data_type:
xml_type = "configuration"
print(f"{dt.now().strftime("%H:%M:%S")} exporting {xml_type} XML file")
# Don't parse anything else if only the config xml is required.
if "CONFIG" in self.data_type:
return
# If not exporting to XML, print the usual converting message
else:
self._print_status()
# Check if reading an ME70 file with a CON1 datagram.
next_datagram = fid.peek()
if next_datagram == "CON1":
self.CON1_datagram = fid.read(1)
else:
self.CON1_datagram = None
# IDs of the channels found in the dataset
# self.ch_ids = list(self.config_datagram['configuration'].keys())
# Read the rest of datagrams
self._read_datagrams(fid)
if "ALL" in self.data_type:
# Convert ping time to 1D numpy array, stored in dict indexed by channel,
# this will help merge data from all channels into a cube
for ch, val in self.ping_time.items():
self.ping_time[ch] = np.array(val)
# Manufacturer-specific power conversion factor
INDEX2POWER = 10.0 * np.log10(2.0) / 256.0
# Rectangularize all data and convert to numpy array indexed by channel
for data_type in ["power", "angle", "complex"]:
for k, v in self.ping_data_dict[data_type].items():
if all(
(x is None) or (x.size == 0) for x in v
): # if no data in a particular channel
self.ping_data_dict[data_type][k] = None
else:
# Sort complex and power/angle channels
self.ch_ids[data_type].append(k)
self.ping_data_dict[data_type][k] = self.pad_shorter_ping(v)
if data_type == "power":
self.ping_data_dict[data_type][k] = (
self.ping_data_dict[data_type][k].astype("float32")
* INDEX2POWER
)
def _read_datagrams(self, fid):
"""Read all datagrams.
A sample EK60 RAW0 datagram:
{'type': 'RAW0',
'low_date': 71406392,
'high_date': 30647127,
'channel': 1,
'mode': 3,
'transducer_depth': 9.149999618530273,
'frequency': 18000.0,
'transmit_power': 2000.0,
'pulse_length': 0.0010239999974146485,
'bandwidth': 1573.66552734375,
'sample_interval': 0.00025599999935366213,
'sound_velocity': 1466.0,
'absorption_coefficient': 0.0030043544247746468,
'heave': 0.0,
'roll': 0.0,
'pitch': 0.0,
'temperature': 4.0,
'heading': 0.0,
'transmit_mode': 1,
'spare0': '\x00\x00\x00\x00\x00\x00',
'offset': 0,
'count': 1386,
'timestamp': numpy.datetime64('2018-02-11T16:40:25.276'),
'bytes_read': 5648,
'power': array([ -6876, -8726, -11086, ..., -11913, -12522, -11799], dtype=int16),
'angle': array([[ 110, 13],
[ 3, -4],
[ -54, -65],
...,
[ -92, -107],
[-104, -122],
[ 82, 74]], dtype=int8)}
A sample EK80 XML-parameter datagram:
{'channel_id': 'WBT 545612-15 ES200-7C',
'channel_mode': 0,
'pulse_form': 1,
'frequency_start': '160000',
'frequency_end': '260000',
'pulse_duration': 0.001024,
'sample_interval': 5.33333333333333e-06,
'transmit_power': 15.0,
'slope': 0.01220703125}
A sample EK80 XML-environment datagram:
{'type': 'XML0',
'low_date': 3137819385,
'high_date': 30616609,
'timestamp': numpy.datetime64('2017-09-12T23:49:10.723'),
'bytes_read': 448,
'subtype': 'environment',
'environment': {'depth': 240.0,
'acidity': 8.0,
'salinity': 33.7,
'sound_speed': 1486.4,
'temperature': 6.9,
'latitude': 45.0,
'sound_velocity_profile': [1.0, 1486.4, 1000.0, 1486.4],
'sound_velocity_source': 'Manual',
'drop_keel_offset': 0.0,
'drop_keel_offset_is_manual': 0,
'water_level_draft': 0.0,
'water_level_draft_is_manual': 0,
'transducer_name': 'Unknown',
'transducer_sound_speed': 1490.0},
'xml': '<?xml version="1.0" encoding="utf-8"?>\r\n<Environment Depth="240" ... />\r\n</Environment>'}
""" # noqa
num_datagrams_parsed = 0
while True:
try:
# TODO: @ngkvain: what I need in the code to not PARSE the raw0/3 datagram
# when users only want CONFIG or ENV, but the way this is implemented
# the raw0/3 datagrams are still parsed, you are just not saving them
new_datagram = fid.read(1)
except SimradEOF:
break
# Convert the timestamp to a datetime64 object.
new_datagram["timestamp"] = np.datetime64(
new_datagram["timestamp"].replace(tzinfo=None), "[ms]"
)
num_datagrams_parsed += 1
# Skip any datagram that the user does not want to save
if (
not any(
new_datagram["type"].startswith(dgram) for dgram in self.data_type
)
and "ALL" not in self.data_type
):
continue
# XML datagrams store environment or instrument parameters for EK80
if new_datagram["type"].startswith("XML"):
if new_datagram["subtype"] == "environment" and (
"ENV" in self.data_type or "ALL" in self.data_type
):
self.environment = new_datagram["environment"]
self.environment["xml"] = new_datagram["xml"]
self.environment["timestamp"] = new_datagram["timestamp"]
# Don't parse anything else if only the environment xml is required.
if "ENV" in self.data_type:
break
elif new_datagram["subtype"] == "parameter" and (
"ALL" in self.data_type
):
current_parameters = new_datagram["parameter"]
# RAW0 datagrams store raw acoustic data for a channel for EK60
elif new_datagram["type"].startswith("RAW0"):
# Save channel-specific ping time. The channels are stored as 1-based indices
self.ping_time[new_datagram["channel"]].append(
new_datagram["timestamp"]
)
# Append ping by ping data
self._append_channel_ping_data(new_datagram)
# RAW3 datagrams store raw acoustic data for a channel for EK80
elif new_datagram["type"].startswith("RAW3"):
curr_ch_id = new_datagram["channel_id"]
# Check if the proceeding Parameter XML does not
# match with data in this RAW3 datagram
if current_parameters["channel_id"] != curr_ch_id:
raise ValueError("Parameter ID does not match RAW")
# Save channel-specific ping time
self.ping_time[curr_ch_id].append(new_datagram["timestamp"])
# Append ping by ping data
new_datagram.update(current_parameters)
self._append_channel_ping_data(new_datagram)
# NME datagrams store ancillary data as NMEA-0817 style ASCII data.
elif new_datagram["type"].startswith("NME"):
self.nmea["timestamp"].append(new_datagram["timestamp"])
self.nmea["nmea_string"].append(new_datagram["nmea_string"])
# MRU datagrams contain motion data for each ping for EK80
elif new_datagram["type"].startswith("MRU"):
self.mru["heading"].append(new_datagram["heading"])
self.mru["pitch"].append(new_datagram["pitch"])
self.mru["roll"].append(new_datagram["roll"])
self.mru["heave"].append(new_datagram["heave"])
self.mru["timestamp"].append(new_datagram["timestamp"])
# FIL datagrams contain filters for proccessing bascatter data for EK80
elif new_datagram["type"].startswith("FIL"):
self.fil_coeffs[new_datagram["channel_id"]][
new_datagram["stage"]
] = new_datagram["coefficients"]
self.fil_df[new_datagram["channel_id"]][
new_datagram["stage"]
] = new_datagram["decimation_factor"]
# TAG datagrams contain time-stamped annotations inserted via the recording software
elif new_datagram["type"].startswith("TAG"):
print("TAG datagram encountered.")
# BOT datagrams contain sounder detected bottom depths from .bot files
elif new_datagram["type"].startswith("BOT"):
print("BOT datagram encountered.")
# DEP datagrams contain sounder detected bottom depths from .out files
# as well as reflectivity data
elif new_datagram["type"].startswith("DEP"):
print("DEP datagram encountered.")
else:
print("Unknown datagram type: " + str(new_datagram["type"]))
def _append_channel_ping_data(self, datagram):
"""Append ping by ping data."""
# TODO: do a thorough check with the convention and processing
# unsaved = ['channel', 'channel_id', 'low_date', 'high_date', # 'offset', 'frequency' ,
# 'transmit_mode', 'spare0', 'bytes_read', 'type'] #, 'n_complex']
ch_id = (
datagram["channel_id"] if "channel_id" in datagram else datagram["channel"]
)
for k, v in datagram.items():
# if k not in unsaved:
self.ping_data_dict[k][ch_id].append(v)
@staticmethod
def pad_shorter_ping(data_list) -> np.ndarray:
"""
Pad shorter ping with NaN: power, angle, complex samples.
Parameters
----------
data_list : list
Power, angle, or complex samples for each channel from RAW3 datagram.
Each ping is one entry in the list.
Returns
-------
out_array : np.ndarray
Numpy array containing samplings from all pings.
The array is NaN-padded if some pings are of different lengths.
"""
lens = np.array([len(item) for item in data_list])
if (
np.unique(lens).size != 1
): # if some pings have different lengths along range
if data_list[0].ndim == 2:
# Angle data have an extra dimension for alongship and athwartship samples
mask = lens[:, None, None] > np.array([np.arange(lens.max())] * 2).T
else:
mask = lens[:, None] > np.arange(lens.max())
# Take care of problem of np.nan being implicitly "real"
if data_list[0].dtype in {np.dtype("complex64"), np.dtype("complex128")}:
out_array = np.full(mask.shape, np.nan + 0j)
else:
out_array = np.full(mask.shape, np.nan)
# Fill in values
out_array[mask] = np.concatenate(data_list).reshape(
-1
) # reshape in case data > 1D
else:
out_array = np.array(data_list)
return out_array
def _select_datagrams(self, params):
"""Translates user input into specific datagrams or ALL
Valid use cases:
# get GPS info only (EK60, EK80)
# ec.to_netcdf(data_type='GPS')
# get configuration XML only (EK80)
# ec.to_netcdf(data_type='CONFIG')
# get environment XML only (EK80)
# ec.to_netcdf(data_type='ENV')
"""
| import os
from collections import defaultdict
from datetime import datetime as dt
import numpy as np
from .utils.ek_raw_io import RawSimradFile, SimradEOF
FILENAME_DATETIME_EK60 = (
"(?P<survey>.+)?-?D(?P<date>\\w{1,8})-T(?P<time>\\w{1,6})-?(?P<postfix>\\w+)?.raw"
)
class ParseBase:
"""Parent class for all convert classes."""
def __init__(self, file, storage_options):
self.source_file = file
self.timestamp_pattern = (
None # regex pattern used to grab datetime embedded in filename
)
self.ping_time = [] # list to store ping time
self.storage_options = storage_options
def _print_status(self):
"""Prints message to console giving information about the raw file being parsed."""
class ParseEK(ParseBase):
"""Class for converting data from Simrad echosounders."""
def __init__(self, file, params, storage_options):
super().__init__(file, storage_options)
# Parent class attributes
# regex pattern used to grab datetime embedded in filename
self.timestamp_pattern = FILENAME_DATETIME_EK60
# Class attributes
self.config_datagram = None
self.ping_data_dict = defaultdict(lambda: defaultdict(list))
self.ping_time = defaultdict(list) # store ping time according to channel
self.num_range_bin_groups = None # number of range_bin groups
self.ch_ids = defaultdict(
list
) # Stores the channel ids for each data type (power, angle, complex)
self.data_type = self._select_datagrams(params)
self.nmea = defaultdict(
list
) # Dictionary to store NMEA data(timestamp and string)
self.mru = defaultdict(
list
) # Dictionary to store MRU data (heading, pitch, roll, heave)
self.fil_coeffs = defaultdict(
dict
) # Dictionary to store PC and WBT coefficients
self.fil_df = defaultdict(dict) # Dictionary to store filter decimation factors
self.CON1_datagram = None # Holds the ME70 CON1 datagram
def _print_status(self):
time = (
self.config_datagram["timestamp"].astype(dt).strftime("%Y-%b-%d %H:%M:%S")
)
print(
f"{dt.now().strftime('%H:%M:%S')} parsing file {os.path.basename(self.source_file)}, "
f"time of first ping: {time}"
)
def parse_raw(self):
"""Parse raw data file from Simrad EK60, EK80, and EA640 echosounders."""
with RawSimradFile(
self.source_file, "r", storage_options=self.storage_options
) as fid:
self.config_datagram = fid.read(1)
self.config_datagram["timestamp"] = np.datetime64(
self.config_datagram["timestamp"].replace(tzinfo=None), "[ms]"
)
if "configuration" in self.config_datagram:
for v in self.config_datagram["configuration"].values():
if "pulse_duration" not in v and "pulse_length" in v:
# it seems like sometimes this field can appear with the name "pulse_length"
# and in the form of floats separated by semicolons
v["pulse_duration"] = [
float(x) for x in v["pulse_length"].split(";")
]
# If exporting to XML file (EK80/EA640 only), print a message
if "print_export_msg" in self.data_type:
if "ENV" in self.data_type:
xml_type = "environment"
elif "CONFIG" in self.data_type:
xml_type = "configuration"
print(f"{dt.now().strftime('%H:%M:%S')} exporting {xml_type} XML file")
# Don't parse anything else if only the config xml is required.
if "CONFIG" in self.data_type:
return
# If not exporting to XML, print the usual converting message
else:
self._print_status()
# Check if reading an ME70 file with a CON1 datagram.
next_datagram = fid.peek()
if next_datagram == "CON1":
self.CON1_datagram = fid.read(1)
else:
self.CON1_datagram = None
# IDs of the channels found in the dataset
# self.ch_ids = list(self.config_datagram['configuration'].keys())
# Read the rest of datagrams
self._read_datagrams(fid)
if "ALL" in self.data_type:
# Convert ping time to 1D numpy array, stored in dict indexed by channel,
# this will help merge data from all channels into a cube
for ch, val in self.ping_time.items():
self.ping_time[ch] = np.array(val)
# Manufacturer-specific power conversion factor
INDEX2POWER = 10.0 * np.log10(2.0) / 256.0
# Rectangularize all data and convert to numpy array indexed by channel
for data_type in ["power", "angle", "complex"]:
for k, v in self.ping_data_dict[data_type].items():
if all(
(x is None) or (x.size == 0) for x in v
): # if no data in a particular channel
self.ping_data_dict[data_type][k] = None
else:
# Sort complex and power/angle channels
self.ch_ids[data_type].append(k)
self.ping_data_dict[data_type][k] = self.pad_shorter_ping(v)
if data_type == "power":
self.ping_data_dict[data_type][k] = (
self.ping_data_dict[data_type][k].astype("float32")
* INDEX2POWER
)
def _read_datagrams(self, fid):
"""Read all datagrams.
A sample EK60 RAW0 datagram:
{'type': 'RAW0',
'low_date': 71406392,
'high_date': 30647127,
'channel': 1,
'mode': 3,
'transducer_depth': 9.149999618530273,
'frequency': 18000.0,
'transmit_power': 2000.0,
'pulse_length': 0.0010239999974146485,
'bandwidth': 1573.66552734375,
'sample_interval': 0.00025599999935366213,
'sound_velocity': 1466.0,
'absorption_coefficient': 0.0030043544247746468,
'heave': 0.0,
'roll': 0.0,
'pitch': 0.0,
'temperature': 4.0,
'heading': 0.0,
'transmit_mode': 1,
'spare0': '\x00\x00\x00\x00\x00\x00',
'offset': 0,
'count': 1386,
'timestamp': numpy.datetime64('2018-02-11T16:40:25.276'),
'bytes_read': 5648,
'power': array([ -6876, -8726, -11086, ..., -11913, -12522, -11799], dtype=int16),
'angle': array([[ 110, 13],
[ 3, -4],
[ -54, -65],
...,
[ -92, -107],
[-104, -122],
[ 82, 74]], dtype=int8)}
A sample EK80 XML-parameter datagram:
{'channel_id': 'WBT 545612-15 ES200-7C',
'channel_mode': 0,
'pulse_form': 1,
'frequency_start': '160000',
'frequency_end': '260000',
'pulse_duration': 0.001024,
'sample_interval': 5.33333333333333e-06,
'transmit_power': 15.0,
'slope': 0.01220703125}
A sample EK80 XML-environment datagram:
{'type': 'XML0',
'low_date': 3137819385,
'high_date': 30616609,
'timestamp': numpy.datetime64('2017-09-12T23:49:10.723'),
'bytes_read': 448,
'subtype': 'environment',
'environment': {'depth': 240.0,
'acidity': 8.0,
'salinity': 33.7,
'sound_speed': 1486.4,
'temperature': 6.9,
'latitude': 45.0,
'sound_velocity_profile': [1.0, 1486.4, 1000.0, 1486.4],
'sound_velocity_source': 'Manual',
'drop_keel_offset': 0.0,
'drop_keel_offset_is_manual': 0,
'water_level_draft': 0.0,
'water_level_draft_is_manual': 0,
'transducer_name': 'Unknown',
'transducer_sound_speed': 1490.0},
'xml': '<?xml version="1.0" encoding="utf-8"?>\r\n<Environment Depth="240" ... />\r\n</Environment>'}
""" # noqa
num_datagrams_parsed = 0
while True:
try:
# TODO: @ngkvain: what I need in the code to not PARSE the raw0/3 datagram
# when users only want CONFIG or ENV, but the way this is implemented
# the raw0/3 datagrams are still parsed, you are just not saving them
new_datagram = fid.read(1)
except SimradEOF:
break
# Convert the timestamp to a datetime64 object.
new_datagram["timestamp"] = np.datetime64(
new_datagram["timestamp"].replace(tzinfo=None), "[ms]"
)
num_datagrams_parsed += 1
# Skip any datagram that the user does not want to save
if (
not any(
new_datagram["type"].startswith(dgram) for dgram in self.data_type
)
and "ALL" not in self.data_type
):
continue
# XML datagrams store environment or instrument parameters for EK80
if new_datagram["type"].startswith("XML"):
if new_datagram["subtype"] == "environment" and (
"ENV" in self.data_type or "ALL" in self.data_type
):
self.environment = new_datagram["environment"]
self.environment["xml"] = new_datagram["xml"]
self.environment["timestamp"] = new_datagram["timestamp"]
# Don't parse anything else if only the environment xml is required.
if "ENV" in self.data_type:
break
elif new_datagram["subtype"] == "parameter" and (
"ALL" in self.data_type
):
current_parameters = new_datagram["parameter"]
# RAW0 datagrams store raw acoustic data for a channel for EK60
elif new_datagram["type"].startswith("RAW0"):
# Save channel-specific ping time. The channels are stored as 1-based indices
self.ping_time[new_datagram["channel"]].append(
new_datagram["timestamp"]
)
# Append ping by ping data
self._append_channel_ping_data(new_datagram)
# RAW3 datagrams store raw acoustic data for a channel for EK80
elif new_datagram["type"].startswith("RAW3"):
curr_ch_id = new_datagram["channel_id"]
# Check if the proceeding Parameter XML does not
# match with data in this RAW3 datagram
if current_parameters["channel_id"] != curr_ch_id:
raise ValueError("Parameter ID does not match RAW")
# Save channel-specific ping time
self.ping_time[curr_ch_id].append(new_datagram["timestamp"])
# Append ping by ping data
new_datagram.update(current_parameters)
self._append_channel_ping_data(new_datagram)
# NME datagrams store ancillary data as NMEA-0817 style ASCII data.
elif new_datagram["type"].startswith("NME"):
self.nmea["timestamp"].append(new_datagram["timestamp"])
self.nmea["nmea_string"].append(new_datagram["nmea_string"])
# MRU datagrams contain motion data for each ping for EK80
elif new_datagram["type"].startswith("MRU"):
self.mru["heading"].append(new_datagram["heading"])
self.mru["pitch"].append(new_datagram["pitch"])
self.mru["roll"].append(new_datagram["roll"])
self.mru["heave"].append(new_datagram["heave"])
self.mru["timestamp"].append(new_datagram["timestamp"])
# FIL datagrams contain filters for proccessing bascatter data for EK80
elif new_datagram["type"].startswith("FIL"):
self.fil_coeffs[new_datagram["channel_id"]][
new_datagram["stage"]
] = new_datagram["coefficients"]
self.fil_df[new_datagram["channel_id"]][
new_datagram["stage"]
] = new_datagram["decimation_factor"]
# TAG datagrams contain time-stamped annotations inserted via the recording software
elif new_datagram["type"].startswith("TAG"):
print("TAG datagram encountered.")
# BOT datagrams contain sounder detected bottom depths from .bot files
elif new_datagram["type"].startswith("BOT"):
print("BOT datagram encountered.")
# DEP datagrams contain sounder detected bottom depths from .out files
# as well as reflectivity data
elif new_datagram["type"].startswith("DEP"):
print("DEP datagram encountered.")
else:
print("Unknown datagram type: " + str(new_datagram["type"]))
def _append_channel_ping_data(self, datagram):
"""Append ping by ping data."""
# TODO: do a thorough check with the convention and processing
# unsaved = ['channel', 'channel_id', 'low_date', 'high_date', # 'offset', 'frequency' ,
# 'transmit_mode', 'spare0', 'bytes_read', 'type'] #, 'n_complex']
ch_id = (
datagram["channel_id"] if "channel_id" in datagram else datagram["channel"]
)
for k, v in datagram.items():
# if k not in unsaved:
self.ping_data_dict[k][ch_id].append(v)
@staticmethod
def pad_shorter_ping(data_list) -> np.ndarray:
"""
Pad shorter ping with NaN: power, angle, complex samples.
Parameters
----------
data_list : list
Power, angle, or complex samples for each channel from RAW3 datagram.
Each ping is one entry in the list.
Returns
-------
out_array : np.ndarray
Numpy array containing samplings from all pings.
The array is NaN-padded if some pings are of different lengths.
"""
lens = np.array([len(item) for item in data_list])
if (
np.unique(lens).size != 1
): # if some pings have different lengths along range
if data_list[0].ndim == 2:
# Angle data have an extra dimension for alongship and athwartship samples
mask = lens[:, None, None] > np.array([np.arange(lens.max())] * 2).T
else:
mask = lens[:, None] > np.arange(lens.max())
# Take care of problem of np.nan being implicitly "real"
if data_list[0].dtype in {np.dtype("complex64"), np.dtype("complex128")}:
out_array = np.full(mask.shape, np.nan + 0j)
else:
out_array = np.full(mask.shape, np.nan)
# Fill in values
out_array[mask] = np.concatenate(data_list).reshape(
-1
) # reshape in case data > 1D
else:
out_array = np.array(data_list)
return out_array
def _select_datagrams(self, params):
"""Translates user input into specific datagrams or ALL
Valid use cases:
# get GPS info only (EK60, EK80)
# ec.to_netcdf(data_type='GPS')
# get configuration XML only (EK80)
# ec.to_netcdf(data_type='CONFIG')
# get environment XML only (EK80)
# ec.to_netcdf(data_type='ENV')
"""
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
from datetime import datetime
from covid import Covid
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.covid (.*)")
async def corona(event):
await event.edit("`Processing...`")
country = event.pattern_match.group(1)
covid = Covid()
country_data = covid.get_status_by_country_name(country)
if country_data:
output_text = f"`⚠️Confirmed : {country_data["confirmed"]}`\n"
output_text += f"`☢️Active : {country_data["active"]}`\n"
output_text += f"`⚰️Deaths : {country_data["deaths"]}`\n"
output_text += f"`♥️Recovered : {country_data["recovered"]}`\n"
output_text += (
"`Last update : "
f"{datetime.utcfromtimestamp(country_data["last_update"] // 1000).strftime("%Y-%m-%d %H:%M:%S")}`\n"
)
output_text += f"Data provided by [The Techboy](https://t.me/thetechboy3)"
else:
output_text = "No information yet about this country!"
await event.edit(f"Corona Virus Info in {country}:\n\n{output_text}")
CMD_HELP.update({
"covid":
".covid <country>"
"\nUsage: Get an information about data covid-19 in your country.\n"
})
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
from datetime import datetime
from covid import Covid
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.covid (.*)")
async def corona(event):
await event.edit("`Processing...`")
country = event.pattern_match.group(1)
covid = Covid()
country_data = covid.get_status_by_country_name(country)
if country_data:
output_text = f"`⚠️Confirmed : {country_data['confirmed']}`\n"
output_text += f"`☢️Active : {country_data['active']}`\n"
output_text += f"`⚰️Deaths : {country_data['deaths']}`\n"
output_text += f"`♥️Recovered : {country_data['recovered']}`\n"
output_text += (
"`Last update : "
f"{datetime.utcfromtimestamp(country_data['last_update'] // 1000).strftime('%Y-%m-%d %H:%M:%S')}`\n"
)
output_text += f"Data provided by [The Techboy](https://t.me/thetechboy3)"
else:
output_text = "No information yet about this country!"
await event.edit(f"Corona Virus Info in {country}:\n\n{output_text}")
CMD_HELP.update({
"covid":
".covid <country>"
"\nUsage: Get an information about data covid-19 in your country.\n"
})
|
from typing import Any
from datetime import datetime
def to_utc(dt: datetime) -> datetime:
return datetime.fromtimestamp(dt.timestamp())
def backoff_handler(details: Any) -> None:
from src.log import logger
logger.warning(
f"backing off after {details.get("tries", "???")} tries, waiting {details.get("wait", "???")}"
)
| from typing import Any
from datetime import datetime
def to_utc(dt: datetime) -> datetime:
return datetime.fromtimestamp(dt.timestamp())
def backoff_handler(details: Any) -> None:
from src.log import logger
logger.warning(
f"backing off after {details.get('tries', '???')} tries, waiting {details.get('wait', '???')}"
)
|
"""
Vaccines blueprint views
"""
import time
from flask import render_template
from flask_babel import gettext
from app.data_tools import (
get_latest_vax_update, get_perc_pop_vax, enrich_frontend_data,
get_admins_perc, get_vax_trends, get_area_population
)
from app.ui import vaccines
from settings import PAGE_BASE_TITLE, REGIONS, PC_TO_OD_MAP
URL_VACCINES = "/vaccines"
view_type = 'vaccines'
@vaccines.get('/')
def national_vax_view():
"""Render the vax report"""
dashboard_title = gettext("Italy")
page_title = f'{gettext('Vaccines')} | {PAGE_BASE_TITLE}'
population = get_area_population('Italia')
perc_pop_vax = get_perc_pop_vax(population)
report_data = enrich_frontend_data(
page_title=page_title,
view_type=view_type,
dashboard_title=dashboard_title,
ts=int(time.time()),
latest_update=get_latest_vax_update(),
admins_perc=get_admins_perc(),
perc_pop_vax=perc_pop_vax,
trends=get_vax_trends(),
population="{:,d}".format(population)
)
return render_template("vaccines.html", **report_data)
@vaccines.get('/<region>')
def regional_vax_view(region):
"""Render the vax regional view"""
dashboard_title = region
page_title = f'{gettext('Vaccines')} | {region} | {PAGE_BASE_TITLE}'
area = PC_TO_OD_MAP[region]
population = get_area_population(region)
perc_pop_vax = get_perc_pop_vax(population, area)
report_data = enrich_frontend_data(
page_title=page_title,
view_type=view_type,
dashboard_title=dashboard_title,
ts=int(time.time()),
latest_update=get_latest_vax_update(),
admins_perc=get_admins_perc(area=area),
perc_pop_vax=perc_pop_vax,
areas_length=len(REGIONS),
area=region,
trends=get_vax_trends(area),
population="{:,d}".format(population)
)
return render_template("vaccines.html", **report_data)
| """
Vaccines blueprint views
"""
import time
from flask import render_template
from flask_babel import gettext
from app.data_tools import (
get_latest_vax_update, get_perc_pop_vax, enrich_frontend_data,
get_admins_perc, get_vax_trends, get_area_population
)
from app.ui import vaccines
from settings import PAGE_BASE_TITLE, REGIONS, PC_TO_OD_MAP
URL_VACCINES = "/vaccines"
view_type = 'vaccines'
@vaccines.get('/')
def national_vax_view():
"""Render the vax report"""
dashboard_title = gettext("Italy")
page_title = f'{gettext("Vaccines")} | {PAGE_BASE_TITLE}'
population = get_area_population('Italia')
perc_pop_vax = get_perc_pop_vax(population)
report_data = enrich_frontend_data(
page_title=page_title,
view_type=view_type,
dashboard_title=dashboard_title,
ts=int(time.time()),
latest_update=get_latest_vax_update(),
admins_perc=get_admins_perc(),
perc_pop_vax=perc_pop_vax,
trends=get_vax_trends(),
population="{:,d}".format(population)
)
return render_template("vaccines.html", **report_data)
@vaccines.get('/<region>')
def regional_vax_view(region):
"""Render the vax regional view"""
dashboard_title = region
page_title = f'{gettext("Vaccines")} | {region} | {PAGE_BASE_TITLE}'
area = PC_TO_OD_MAP[region]
population = get_area_population(region)
perc_pop_vax = get_perc_pop_vax(population, area)
report_data = enrich_frontend_data(
page_title=page_title,
view_type=view_type,
dashboard_title=dashboard_title,
ts=int(time.time()),
latest_update=get_latest_vax_update(),
admins_perc=get_admins_perc(area=area),
perc_pop_vax=perc_pop_vax,
areas_length=len(REGIONS),
area=region,
trends=get_vax_trends(area),
population="{:,d}".format(population)
)
return render_template("vaccines.html", **report_data)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = ['TargetArgs', 'Target']
@pulumi.input_type
class TargetArgs:
def __init__(__self__, *,
max_capacity: pulumi.Input[int],
min_capacity: pulumi.Input[int],
resource_id: pulumi.Input[str],
scalable_dimension: pulumi.Input[str],
service_namespace: pulumi.Input[str],
role_arn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Target resource.
:param pulumi.Input[int] max_capacity: The max capacity of the scalable target.
:param pulumi.Input[int] min_capacity: The min capacity of the scalable target.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] role_arn: The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
"""
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
pulumi.set(__self__, "resource_id", resource_id)
pulumi.set(__self__, "scalable_dimension", scalable_dimension)
pulumi.set(__self__, "service_namespace", service_namespace)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> pulumi.Input[int]:
"""
The max capacity of the scalable target.
"""
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Input[int]:
"""
The min capacity of the scalable target.
"""
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "min_capacity", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Input[str]:
"""
The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="scalableDimension")
def scalable_dimension(self) -> pulumi.Input[str]:
"""
The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "scalable_dimension")
@scalable_dimension.setter
def scalable_dimension(self, value: pulumi.Input[str]):
pulumi.set(self, "scalable_dimension", value)
@property
@pulumi.getter(name="serviceNamespace")
def service_namespace(self) -> pulumi.Input[str]:
"""
The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "service_namespace")
@service_namespace.setter
def service_namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "service_namespace", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
class Target(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
max_capacity: Optional[pulumi.Input[int]] = None,
min_capacity: Optional[pulumi.Input[int]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an Application AutoScaling ScalableTarget resource. To manage policies which get attached to the target, see the `appautoscaling.Policy` resource.
> **NOTE:** The [Application Auto Scaling service automatically attempts to manage IAM Service-Linked Roles](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) when registering certain service namespaces for the first time. To manually manage this role, see the `iam.ServiceLinkedRole` resource.
## Example Usage
### DynamoDB Table Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_table_read_target = aws.appautoscaling.Target("dynamodbTableReadTarget",
max_capacity=100,
min_capacity=5,
resource_id=f"table/{aws_dynamodb_table["example"]["name"]}",
scalable_dimension="dynamodb:table:ReadCapacityUnits",
service_namespace="dynamodb")
```
### DynamoDB Index Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_index_read_target = aws.appautoscaling.Target("dynamodbIndexReadTarget",
max_capacity=100,
min_capacity=5,
resource_id=f"table/{aws_dynamodb_table["example"]["name"]}/index/{var["index_name"]}",
scalable_dimension="dynamodb:index:ReadCapacityUnits",
service_namespace="dynamodb")
```
### ECS Service Autoscaling
```python
import pulumi
import pulumi_aws as aws
ecs_target = aws.appautoscaling.Target("ecsTarget",
max_capacity=4,
min_capacity=1,
resource_id=f"service/{aws_ecs_cluster["example"]["name"]}/{aws_ecs_service["example"]["name"]}",
scalable_dimension="ecs:service:DesiredCount",
service_namespace="ecs")
```
### Aurora Read Replica Autoscaling
```python
import pulumi
import pulumi_aws as aws
replicas = aws.appautoscaling.Target("replicas",
max_capacity=15,
min_capacity=1,
resource_id=f"cluster:{aws_rds_cluster["example"]["id"]}",
scalable_dimension="rds:cluster:ReadReplicaCount",
service_namespace="rds")
```
### MSK / Kafka Autoscaling
```python
import pulumi
import pulumi_aws as aws
msk_target = aws.appautoscaling.Target("mskTarget",
max_capacity=8,
min_capacity=1,
resource_id=aws_msk_cluster["example"]["arn"],
scalable_dimension="kafka:broker-storage:VolumeSize",
service_namespace="kafka")
```
## Import
Application AutoScaling Target can be imported using the `service-namespace` , `resource-id` and `scalable-dimension` separated by `/`.
```sh
$ pulumi import aws:appautoscaling/target:Target test-target service-namespace/resource-id/scalable-dimension
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] max_capacity: The max capacity of the scalable target.
:param pulumi.Input[int] min_capacity: The min capacity of the scalable target.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] role_arn: The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TargetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Application AutoScaling ScalableTarget resource. To manage policies which get attached to the target, see the `appautoscaling.Policy` resource.
> **NOTE:** The [Application Auto Scaling service automatically attempts to manage IAM Service-Linked Roles](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) when registering certain service namespaces for the first time. To manually manage this role, see the `iam.ServiceLinkedRole` resource.
## Example Usage
### DynamoDB Table Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_table_read_target = aws.appautoscaling.Target("dynamodbTableReadTarget",
max_capacity=100,
min_capacity=5,
resource_id=f"table/{aws_dynamodb_table["example"]["name"]}",
scalable_dimension="dynamodb:table:ReadCapacityUnits",
service_namespace="dynamodb")
```
### DynamoDB Index Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_index_read_target = aws.appautoscaling.Target("dynamodbIndexReadTarget",
max_capacity=100,
min_capacity=5,
resource_id=f"table/{aws_dynamodb_table["example"]["name"]}/index/{var["index_name"]}",
scalable_dimension="dynamodb:index:ReadCapacityUnits",
service_namespace="dynamodb")
```
### ECS Service Autoscaling
```python
import pulumi
import pulumi_aws as aws
ecs_target = aws.appautoscaling.Target("ecsTarget",
max_capacity=4,
min_capacity=1,
resource_id=f"service/{aws_ecs_cluster["example"]["name"]}/{aws_ecs_service["example"]["name"]}",
scalable_dimension="ecs:service:DesiredCount",
service_namespace="ecs")
```
### Aurora Read Replica Autoscaling
```python
import pulumi
import pulumi_aws as aws
replicas = aws.appautoscaling.Target("replicas",
max_capacity=15,
min_capacity=1,
resource_id=f"cluster:{aws_rds_cluster["example"]["id"]}",
scalable_dimension="rds:cluster:ReadReplicaCount",
service_namespace="rds")
```
### MSK / Kafka Autoscaling
```python
import pulumi
import pulumi_aws as aws
msk_target = aws.appautoscaling.Target("mskTarget",
max_capacity=8,
min_capacity=1,
resource_id=aws_msk_cluster["example"]["arn"],
scalable_dimension="kafka:broker-storage:VolumeSize",
service_namespace="kafka")
```
## Import
Application AutoScaling Target can be imported using the `service-namespace` , `resource-id` and `scalable-dimension` separated by `/`.
```sh
$ pulumi import aws:appautoscaling/target:Target test-target service-namespace/resource-id/scalable-dimension
```
:param str resource_name: The name of the resource.
:param TargetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TargetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
max_capacity: Optional[pulumi.Input[int]] = None,
min_capacity: Optional[pulumi.Input[int]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if max_capacity is None and not opts.urn:
raise TypeError("Missing required property 'max_capacity'")
__props__['max_capacity'] = max_capacity
if min_capacity is None and not opts.urn:
raise TypeError("Missing required property 'min_capacity'")
__props__['min_capacity'] = min_capacity
if resource_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_id'")
__props__['resource_id'] = resource_id
__props__['role_arn'] = role_arn
if scalable_dimension is None and not opts.urn:
raise TypeError("Missing required property 'scalable_dimension'")
__props__['scalable_dimension'] = scalable_dimension
if service_namespace is None and not opts.urn:
raise TypeError("Missing required property 'service_namespace'")
__props__['service_namespace'] = service_namespace
super(Target, __self__).__init__(
'aws:appautoscaling/target:Target',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
max_capacity: Optional[pulumi.Input[int]] = None,
min_capacity: Optional[pulumi.Input[int]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None) -> 'Target':
"""
Get an existing Target resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] max_capacity: The max capacity of the scalable target.
:param pulumi.Input[int] min_capacity: The min capacity of the scalable target.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] role_arn: The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["max_capacity"] = max_capacity
__props__["min_capacity"] = min_capacity
__props__["resource_id"] = resource_id
__props__["role_arn"] = role_arn
__props__["scalable_dimension"] = scalable_dimension
__props__["service_namespace"] = service_namespace
return Target(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> pulumi.Output[int]:
"""
The max capacity of the scalable target.
"""
return pulumi.get(self, "max_capacity")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Output[int]:
"""
The min capacity of the scalable target.
"""
return pulumi.get(self, "min_capacity")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[str]:
"""
The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="scalableDimension")
def scalable_dimension(self) -> pulumi.Output[str]:
"""
The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "scalable_dimension")
@property
@pulumi.getter(name="serviceNamespace")
def service_namespace(self) -> pulumi.Output[str]:
"""
The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "service_namespace")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = ['TargetArgs', 'Target']
@pulumi.input_type
class TargetArgs:
def __init__(__self__, *,
max_capacity: pulumi.Input[int],
min_capacity: pulumi.Input[int],
resource_id: pulumi.Input[str],
scalable_dimension: pulumi.Input[str],
service_namespace: pulumi.Input[str],
role_arn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Target resource.
:param pulumi.Input[int] max_capacity: The max capacity of the scalable target.
:param pulumi.Input[int] min_capacity: The min capacity of the scalable target.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] role_arn: The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
"""
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
pulumi.set(__self__, "resource_id", resource_id)
pulumi.set(__self__, "scalable_dimension", scalable_dimension)
pulumi.set(__self__, "service_namespace", service_namespace)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> pulumi.Input[int]:
"""
The max capacity of the scalable target.
"""
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Input[int]:
"""
The min capacity of the scalable target.
"""
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "min_capacity", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Input[str]:
"""
The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="scalableDimension")
def scalable_dimension(self) -> pulumi.Input[str]:
"""
The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "scalable_dimension")
@scalable_dimension.setter
def scalable_dimension(self, value: pulumi.Input[str]):
pulumi.set(self, "scalable_dimension", value)
@property
@pulumi.getter(name="serviceNamespace")
def service_namespace(self) -> pulumi.Input[str]:
"""
The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "service_namespace")
@service_namespace.setter
def service_namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "service_namespace", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
class Target(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
max_capacity: Optional[pulumi.Input[int]] = None,
min_capacity: Optional[pulumi.Input[int]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an Application AutoScaling ScalableTarget resource. To manage policies which get attached to the target, see the `appautoscaling.Policy` resource.
> **NOTE:** The [Application Auto Scaling service automatically attempts to manage IAM Service-Linked Roles](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) when registering certain service namespaces for the first time. To manually manage this role, see the `iam.ServiceLinkedRole` resource.
## Example Usage
### DynamoDB Table Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_table_read_target = aws.appautoscaling.Target("dynamodbTableReadTarget",
max_capacity=100,
min_capacity=5,
resource_id=f"table/{aws_dynamodb_table['example']['name']}",
scalable_dimension="dynamodb:table:ReadCapacityUnits",
service_namespace="dynamodb")
```
### DynamoDB Index Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_index_read_target = aws.appautoscaling.Target("dynamodbIndexReadTarget",
max_capacity=100,
min_capacity=5,
resource_id=f"table/{aws_dynamodb_table['example']['name']}/index/{var['index_name']}",
scalable_dimension="dynamodb:index:ReadCapacityUnits",
service_namespace="dynamodb")
```
### ECS Service Autoscaling
```python
import pulumi
import pulumi_aws as aws
ecs_target = aws.appautoscaling.Target("ecsTarget",
max_capacity=4,
min_capacity=1,
resource_id=f"service/{aws_ecs_cluster['example']['name']}/{aws_ecs_service['example']['name']}",
scalable_dimension="ecs:service:DesiredCount",
service_namespace="ecs")
```
### Aurora Read Replica Autoscaling
```python
import pulumi
import pulumi_aws as aws
replicas = aws.appautoscaling.Target("replicas",
max_capacity=15,
min_capacity=1,
resource_id=f"cluster:{aws_rds_cluster['example']['id']}",
scalable_dimension="rds:cluster:ReadReplicaCount",
service_namespace="rds")
```
### MSK / Kafka Autoscaling
```python
import pulumi
import pulumi_aws as aws
msk_target = aws.appautoscaling.Target("mskTarget",
max_capacity=8,
min_capacity=1,
resource_id=aws_msk_cluster["example"]["arn"],
scalable_dimension="kafka:broker-storage:VolumeSize",
service_namespace="kafka")
```
## Import
Application AutoScaling Target can be imported using the `service-namespace` , `resource-id` and `scalable-dimension` separated by `/`.
```sh
$ pulumi import aws:appautoscaling/target:Target test-target service-namespace/resource-id/scalable-dimension
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] max_capacity: The max capacity of the scalable target.
:param pulumi.Input[int] min_capacity: The min capacity of the scalable target.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] role_arn: The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TargetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Application AutoScaling ScalableTarget resource. To manage policies which get attached to the target, see the `appautoscaling.Policy` resource.
> **NOTE:** The [Application Auto Scaling service automatically attempts to manage IAM Service-Linked Roles](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) when registering certain service namespaces for the first time. To manually manage this role, see the `iam.ServiceLinkedRole` resource.
## Example Usage
### DynamoDB Table Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_table_read_target = aws.appautoscaling.Target("dynamodbTableReadTarget",
max_capacity=100,
min_capacity=5,
resource_id=f"table/{aws_dynamodb_table['example']['name']}",
scalable_dimension="dynamodb:table:ReadCapacityUnits",
service_namespace="dynamodb")
```
### DynamoDB Index Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_index_read_target = aws.appautoscaling.Target("dynamodbIndexReadTarget",
max_capacity=100,
min_capacity=5,
resource_id=f"table/{aws_dynamodb_table['example']['name']}/index/{var['index_name']}",
scalable_dimension="dynamodb:index:ReadCapacityUnits",
service_namespace="dynamodb")
```
### ECS Service Autoscaling
```python
import pulumi
import pulumi_aws as aws
ecs_target = aws.appautoscaling.Target("ecsTarget",
max_capacity=4,
min_capacity=1,
resource_id=f"service/{aws_ecs_cluster['example']['name']}/{aws_ecs_service['example']['name']}",
scalable_dimension="ecs:service:DesiredCount",
service_namespace="ecs")
```
### Aurora Read Replica Autoscaling
```python
import pulumi
import pulumi_aws as aws
replicas = aws.appautoscaling.Target("replicas",
max_capacity=15,
min_capacity=1,
resource_id=f"cluster:{aws_rds_cluster['example']['id']}",
scalable_dimension="rds:cluster:ReadReplicaCount",
service_namespace="rds")
```
### MSK / Kafka Autoscaling
```python
import pulumi
import pulumi_aws as aws
msk_target = aws.appautoscaling.Target("mskTarget",
max_capacity=8,
min_capacity=1,
resource_id=aws_msk_cluster["example"]["arn"],
scalable_dimension="kafka:broker-storage:VolumeSize",
service_namespace="kafka")
```
## Import
Application AutoScaling Target can be imported using the `service-namespace` , `resource-id` and `scalable-dimension` separated by `/`.
```sh
$ pulumi import aws:appautoscaling/target:Target test-target service-namespace/resource-id/scalable-dimension
```
:param str resource_name: The name of the resource.
:param TargetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TargetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
max_capacity: Optional[pulumi.Input[int]] = None,
min_capacity: Optional[pulumi.Input[int]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if max_capacity is None and not opts.urn:
raise TypeError("Missing required property 'max_capacity'")
__props__['max_capacity'] = max_capacity
if min_capacity is None and not opts.urn:
raise TypeError("Missing required property 'min_capacity'")
__props__['min_capacity'] = min_capacity
if resource_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_id'")
__props__['resource_id'] = resource_id
__props__['role_arn'] = role_arn
if scalable_dimension is None and not opts.urn:
raise TypeError("Missing required property 'scalable_dimension'")
__props__['scalable_dimension'] = scalable_dimension
if service_namespace is None and not opts.urn:
raise TypeError("Missing required property 'service_namespace'")
__props__['service_namespace'] = service_namespace
super(Target, __self__).__init__(
'aws:appautoscaling/target:Target',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
max_capacity: Optional[pulumi.Input[int]] = None,
min_capacity: Optional[pulumi.Input[int]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None) -> 'Target':
"""
Get an existing Target resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] max_capacity: The max capacity of the scalable target.
:param pulumi.Input[int] min_capacity: The min capacity of the scalable target.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] role_arn: The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["max_capacity"] = max_capacity
__props__["min_capacity"] = min_capacity
__props__["resource_id"] = resource_id
__props__["role_arn"] = role_arn
__props__["scalable_dimension"] = scalable_dimension
__props__["service_namespace"] = service_namespace
return Target(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> pulumi.Output[int]:
"""
The max capacity of the scalable target.
"""
return pulumi.get(self, "max_capacity")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Output[int]:
"""
The min capacity of the scalable target.
"""
return pulumi.get(self, "min_capacity")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[str]:
"""
The ARN of the IAM role that allows Application AutoScaling to modify your scalable target on your behalf. This defaults to an IAM Service-Linked Role for most services and custom IAM Roles are ignored by the API for those namespaces. See the [AWS Application Auto Scaling documentation](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles) for more information about how this service interacts with IAM.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="scalableDimension")
def scalable_dimension(self) -> pulumi.Output[str]:
"""
The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "scalable_dimension")
@property
@pulumi.getter(name="serviceNamespace")
def service_namespace(self) -> pulumi.Output[str]:
"""
The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "service_namespace")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
from brandfolder.resource import Resource
from brandfolder.resource_container import ResourceContainer
from brandfolder.asset import Asset
from brandfolder.attachment import Attachment
class Collection(Resource):
RESOURCE_NAME = 'Collection'
RESOURCE_TYPE = 'collections'
def __init__(self, client, **kwargs):
super().__init__(client, **kwargs)
self.assets = ResourceContainer(client, Asset, parent=self)
self.attachments = ResourceContainer(client, Attachment, parent=self)
def __repr__(self):
return f'<{self.resource_name} {self.attributes['slug']}>'
def create_asset(self, attachments_data, section_key, **attributes):
data = {
'data': {
'attributes': [
{
**attributes,
'attachments': attachments_data
}
]
},
'section_key': section_key
}
res = self.client.post(f'/{self.resource_type}/{self.id}/assets', json=data)
return Asset(client=self.client, data=res['data'][0])
| from brandfolder.resource import Resource
from brandfolder.resource_container import ResourceContainer
from brandfolder.asset import Asset
from brandfolder.attachment import Attachment
class Collection(Resource):
RESOURCE_NAME = 'Collection'
RESOURCE_TYPE = 'collections'
def __init__(self, client, **kwargs):
super().__init__(client, **kwargs)
self.assets = ResourceContainer(client, Asset, parent=self)
self.attachments = ResourceContainer(client, Attachment, parent=self)
def __repr__(self):
return f'<{self.resource_name} {self.attributes["slug"]}>'
def create_asset(self, attachments_data, section_key, **attributes):
data = {
'data': {
'attributes': [
{
**attributes,
'attachments': attachments_data
}
]
},
'section_key': section_key
}
res = self.client.post(f'/{self.resource_type}/{self.id}/assets', json=data)
return Asset(client=self.client, data=res['data'][0])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Andre Augusto Giannotti Scota (https://sites.google.com/view/a2gs/)
import sys, os, locale
import requests
WTValidAssets = ['BRL-XBT', 'XBT-BRL']
def printUsage(exec: str):
print('Walltime command line')
print('-mi\t\tMarket info')
print('-ob\t\tOrder book')
print('-lt\t\tLast trades')
print(f'\t\t\t{exec} -lt [ASSET] [DATE]')
print('\t\t\t\tASSET = Walltime valid assets')
print('\t\t\t\tDATE = Last date (YYYY MM DD 24h)')
print(f'\t\t\tSample: {exec} -lt BRL-XBT \"2022 03 01 16\"')
print('-aw\t\tAdd withdraw address')
print('-c\t\tCancel order')
print('-co\t\tCreate order')
print('-da\t\tGenerate new deposit address')
print('-as\t\tGet account statement')
print('-ai\t\tGet account info')
print('-go\t\tGet orders')
print('-id\t\tInform deposit')
print('-rw\t\tRequest withdraw')
print(f'\nWalltime valid asstes: {WTValidAssets}')
def getRequest(url: str) -> [bool, int, {}]:
try:
urlResponse = requests.get(url)
except:
return [False, urlResponse.status_code, {}]
return [True, urlResponse.status_code, urlResponse.json() if urlResponse.status_code == 200 else {}]
def printMarketInfo() -> bool:
ret, getRetCode, retJson = getRequest(
'https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/walltime-info.json')
if ret == True:
print(f"Retorno:\n{retJson}")
return ret
def printLastTrade(asset: str, lasttradedate: str) -> bool:
ret, getRetCode, retJson = getRequest(
'https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/meta.json')
if ret == False:
return False
url = 'https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/' + retJson[
'last_trades_prefix'] + '_' + asset + '_p0.json'
ret, getRetCode, retJson = getRequest(url)
if ret == False:
return False
print(f'Returno: {retJson}')
def formatReal(value:float) -> "":
return locale.format_string("%.2f", value, grouping=True, monetary=False)
def printOrderBook():
ret, getRetCode, retJson = getRequest('https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/meta.json')
if ret == False:
return False
url = 'https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/' + retJson['order_book_prefix'] + '_r' + str(retJson['current_round']) + '_p0.json'
ret, getRetCode, retJson = getRequest(url)
if ret == False:
return False
sell = retJson['brl-xbt']
buy = retJson['xbt-brl']
print(f'{'COMPRA':^63s}{'VENDA':^63s}\n')
print(f'{'VALOR (R$)':^20s}|{'QTD (BTC)':^20s}|{'TOTAL (R$)':^20s}||{'VALOR (BTC)':^20s}|{'QTD (R$)':^20s}|{'TOTAL (R$)':^20s}')
print('-'*126)
[print(f'{formatReal(eval(a[0])):>20s}|{eval(a[1]):20.8f}|{formatReal(eval(a[0]) / eval(a[1])):>20s}||{eval(b[1]):20.8f}|{formatReal(eval(b[0])):>20s}|{formatReal(eval(b[0]) / eval(b[1])):>20s}') for a, b in zip(sell, buy)]
def cancelOrder():
pass
def addWithdrawAddress():
pass
def createOrder():
pass
def generateNewDepositAddress():
pass
def getAccountStatement():
pass
def getAccountInfo():
pass
def getOrders():
pass
def informDeposit():
pass
def requestWithdraw():
pass
if __name__ == '__main__':
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
if len(sys.argv) == 2:
if sys.argv[1] == '-mi':
if printMarketInfo() == False:
print('Market Info erro.')
elif sys.argv[1] == '-ob':
if printOrderBook() == False:
print('Order Book erro.')
elif sys.argv[1] == '-aw':
addWithdrawAddress()
elif sys.argv[1] == '-c':
cancelOrder()
elif sys.argv[1] == '-co':
createOrder()
elif sys.argv[1] == '-da':
generateNewDepositAddress()
elif sys.argv[1] == '-as':
getAccountStatement()
elif sys.argv[1] == '-ai':
getAccountInfo()
elif sys.argv[1] == '-go':
getOrders()
elif sys.argv[1] == '-id':
informDeposit()
elif sys.argv[1] == '-rw':
requestWithdraw()
elif len(sys.argv) == 4:
if sys.argv[1] == '-lt':
if sys.argv[2] in WTValidAssets:
if printLastTrade(sys.argv[2], sys.argv[3]) == False:
print("Last Trade erro.")
else:
print(f'Asset does not exist: {sys.argv[2]}')
else:
if len(sys.argv) != 1:
print('Parameters erro')
printUsage(os.path.basename(__file__))
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Andre Augusto Giannotti Scota (https://sites.google.com/view/a2gs/)
import sys, os, locale
import requests
WTValidAssets = ['BRL-XBT', 'XBT-BRL']
def printUsage(exec: str):
print('Walltime command line')
print('-mi\t\tMarket info')
print('-ob\t\tOrder book')
print('-lt\t\tLast trades')
print(f'\t\t\t{exec} -lt [ASSET] [DATE]')
print('\t\t\t\tASSET = Walltime valid assets')
print('\t\t\t\tDATE = Last date (YYYY MM DD 24h)')
print(f'\t\t\tSample: {exec} -lt BRL-XBT \"2022 03 01 16\"')
print('-aw\t\tAdd withdraw address')
print('-c\t\tCancel order')
print('-co\t\tCreate order')
print('-da\t\tGenerate new deposit address')
print('-as\t\tGet account statement')
print('-ai\t\tGet account info')
print('-go\t\tGet orders')
print('-id\t\tInform deposit')
print('-rw\t\tRequest withdraw')
print(f'\nWalltime valid asstes: {WTValidAssets}')
def getRequest(url: str) -> [bool, int, {}]:
try:
urlResponse = requests.get(url)
except:
return [False, urlResponse.status_code, {}]
return [True, urlResponse.status_code, urlResponse.json() if urlResponse.status_code == 200 else {}]
def printMarketInfo() -> bool:
ret, getRetCode, retJson = getRequest(
'https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/walltime-info.json')
if ret == True:
print(f"Retorno:\n{retJson}")
return ret
def printLastTrade(asset: str, lasttradedate: str) -> bool:
ret, getRetCode, retJson = getRequest(
'https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/meta.json')
if ret == False:
return False
url = 'https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/' + retJson[
'last_trades_prefix'] + '_' + asset + '_p0.json'
ret, getRetCode, retJson = getRequest(url)
if ret == False:
return False
print(f'Returno: {retJson}')
def formatReal(value:float) -> "":
return locale.format_string("%.2f", value, grouping=True, monetary=False)
def printOrderBook():
ret, getRetCode, retJson = getRequest('https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/meta.json')
if ret == False:
return False
url = 'https://s3.amazonaws.com/data-production-walltime-info/production/dynamic/' + retJson['order_book_prefix'] + '_r' + str(retJson['current_round']) + '_p0.json'
ret, getRetCode, retJson = getRequest(url)
if ret == False:
return False
sell = retJson['brl-xbt']
buy = retJson['xbt-brl']
print(f'{"COMPRA":^63s}{"VENDA":^63s}\n')
print(f'{"VALOR (R$)":^20s}|{"QTD (BTC)":^20s}|{"TOTAL (R$)":^20s}||{"VALOR (BTC)":^20s}|{"QTD (R$)":^20s}|{"TOTAL (R$)":^20s}')
print('-'*126)
[print(f'{formatReal(eval(a[0])):>20s}|{eval(a[1]):20.8f}|{formatReal(eval(a[0]) / eval(a[1])):>20s}||{eval(b[1]):20.8f}|{formatReal(eval(b[0])):>20s}|{formatReal(eval(b[0]) / eval(b[1])):>20s}') for a, b in zip(sell, buy)]
def cancelOrder():
pass
def addWithdrawAddress():
pass
def createOrder():
pass
def generateNewDepositAddress():
pass
def getAccountStatement():
pass
def getAccountInfo():
pass
def getOrders():
pass
def informDeposit():
pass
def requestWithdraw():
pass
if __name__ == '__main__':
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
if len(sys.argv) == 2:
if sys.argv[1] == '-mi':
if printMarketInfo() == False:
print('Market Info erro.')
elif sys.argv[1] == '-ob':
if printOrderBook() == False:
print('Order Book erro.')
elif sys.argv[1] == '-aw':
addWithdrawAddress()
elif sys.argv[1] == '-c':
cancelOrder()
elif sys.argv[1] == '-co':
createOrder()
elif sys.argv[1] == '-da':
generateNewDepositAddress()
elif sys.argv[1] == '-as':
getAccountStatement()
elif sys.argv[1] == '-ai':
getAccountInfo()
elif sys.argv[1] == '-go':
getOrders()
elif sys.argv[1] == '-id':
informDeposit()
elif sys.argv[1] == '-rw':
requestWithdraw()
elif len(sys.argv) == 4:
if sys.argv[1] == '-lt':
if sys.argv[2] in WTValidAssets:
if printLastTrade(sys.argv[2], sys.argv[3]) == False:
print("Last Trade erro.")
else:
print(f'Asset does not exist: {sys.argv[2]}')
else:
if len(sys.argv) != 1:
print('Parameters erro')
printUsage(os.path.basename(__file__))
|
"""Runway module utilities."""
from __future__ import annotations
import logging
import os
import platform
import subprocess
import sys
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union, cast
from ..utils import which
if TYPE_CHECKING:
from .._logging import RunwayLogger
LOGGER = cast("RunwayLogger", logging.getLogger(__name__))
NPM_BIN = "npm.cmd" if platform.system().lower() == "windows" else "npm"
NPX_BIN = "npx.cmd" if platform.system().lower() == "windows" else "npx"
def format_npm_command_for_logging(command: List[str]) -> str:
"""Convert npm command list to string for display to user."""
if platform.system().lower() == "windows" and (
command[0] == "npx.cmd" and command[1] == "-c"
):
return 'npx.cmd -c "%s"' % " ".join(command[2:])
return " ".join(command)
def generate_node_command(
command: str,
command_opts: List[str],
path: Path,
*,
logger: Union[logging.Logger, logging.LoggerAdapter] = LOGGER,
package: Optional[str] = None,
) -> List[str]:
"""Return node bin command list for subprocess execution.
Args:
command: Command to execute from a local ``node_modules/.bin``.
command_opts: Options to include with the command.
path: Current working directory. Used to construct a "fall-back" command
when ``npx`` is not available/included as part of npm.
logger: A specific logger to use when logging the constructed command.
package: Name of the npm package containing the binary to execute.
This is recommended when the name of the binary does not match the
name of the npm package.
"""
if which(NPX_BIN):
# Use npx if available (npm v5.2+)
cmd_list = [NPX_BIN]
if package:
cmd_list.extend(
[
"--package",
package,
command,
*command_opts,
]
)
else:
cmd_list.append("-c")
cmd_list.append(f"{command} {" ".join(command_opts)}".strip())
else:
logger.debug("npx not found; falling back to invoking shell script directly")
cmd_list = [str(path / "node_modules" / ".bin" / command), *command_opts]
logger.debug("node command: %s", format_npm_command_for_logging(cmd_list))
return cmd_list
def run_module_command(
cmd_list: List[str],
env_vars: Dict[str, str],
exit_on_error: bool = True,
logger: Union[logging.Logger, logging.LoggerAdapter] = LOGGER,
) -> None:
"""Shell out to provisioner command."""
logger.debug("running command: %s", " ".join(cmd_list))
if exit_on_error:
try:
subprocess.check_call(cmd_list, env=env_vars)
except subprocess.CalledProcessError as shelloutexc:
sys.exit(shelloutexc.returncode)
else:
subprocess.check_call(cmd_list, env=env_vars)
def use_npm_ci(path: Path) -> bool:
"""Return true if npm ci should be used in lieu of npm install."""
# https://docs.npmjs.com/cli/ci#description
with open(os.devnull, "w") as fnull:
if (
(path / "package-lock.json").is_file()
or (path / "npm-shrinkwrap.json").is_file()
) and subprocess.call(
[NPM_BIN, "ci", "-h"], stdout=fnull, stderr=subprocess.STDOUT
) == 0:
return True
return False
| """Runway module utilities."""
from __future__ import annotations
import logging
import os
import platform
import subprocess
import sys
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union, cast
from ..utils import which
if TYPE_CHECKING:
from .._logging import RunwayLogger
LOGGER = cast("RunwayLogger", logging.getLogger(__name__))
NPM_BIN = "npm.cmd" if platform.system().lower() == "windows" else "npm"
NPX_BIN = "npx.cmd" if platform.system().lower() == "windows" else "npx"
def format_npm_command_for_logging(command: List[str]) -> str:
"""Convert npm command list to string for display to user."""
if platform.system().lower() == "windows" and (
command[0] == "npx.cmd" and command[1] == "-c"
):
return 'npx.cmd -c "%s"' % " ".join(command[2:])
return " ".join(command)
def generate_node_command(
command: str,
command_opts: List[str],
path: Path,
*,
logger: Union[logging.Logger, logging.LoggerAdapter] = LOGGER,
package: Optional[str] = None,
) -> List[str]:
"""Return node bin command list for subprocess execution.
Args:
command: Command to execute from a local ``node_modules/.bin``.
command_opts: Options to include with the command.
path: Current working directory. Used to construct a "fall-back" command
when ``npx`` is not available/included as part of npm.
logger: A specific logger to use when logging the constructed command.
package: Name of the npm package containing the binary to execute.
This is recommended when the name of the binary does not match the
name of the npm package.
"""
if which(NPX_BIN):
# Use npx if available (npm v5.2+)
cmd_list = [NPX_BIN]
if package:
cmd_list.extend(
[
"--package",
package,
command,
*command_opts,
]
)
else:
cmd_list.append("-c")
cmd_list.append(f"{command} {' '.join(command_opts)}".strip())
else:
logger.debug("npx not found; falling back to invoking shell script directly")
cmd_list = [str(path / "node_modules" / ".bin" / command), *command_opts]
logger.debug("node command: %s", format_npm_command_for_logging(cmd_list))
return cmd_list
def run_module_command(
cmd_list: List[str],
env_vars: Dict[str, str],
exit_on_error: bool = True,
logger: Union[logging.Logger, logging.LoggerAdapter] = LOGGER,
) -> None:
"""Shell out to provisioner command."""
logger.debug("running command: %s", " ".join(cmd_list))
if exit_on_error:
try:
subprocess.check_call(cmd_list, env=env_vars)
except subprocess.CalledProcessError as shelloutexc:
sys.exit(shelloutexc.returncode)
else:
subprocess.check_call(cmd_list, env=env_vars)
def use_npm_ci(path: Path) -> bool:
"""Return true if npm ci should be used in lieu of npm install."""
# https://docs.npmjs.com/cli/ci#description
with open(os.devnull, "w") as fnull:
if (
(path / "package-lock.json").is_file()
or (path / "npm-shrinkwrap.json").is_file()
) and subprocess.call(
[NPM_BIN, "ci", "-h"], stdout=fnull, stderr=subprocess.STDOUT
) == 0:
return True
return False
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : richardchien
@Date : 2020-04-14 22:10:53
@LastEditors: yanyongyu
@LastEditTime: 2020-04-14 22:21:30
@Description : None
@GitHub : https://github.com/richardchien
"""
__author__ = "richardchien"
import re
import math
from datetime import datetime
from collections import defaultdict
from datetime import datetime, timedelta
from nonebot import CommandSession
from . import cg, __plugin_usage__
from .data_source import get_anime_list, get_timeline_list
WEB_URL = 'https://www.bilibili.com/anime/index/#' + \
'season_version=-1&area=-1&is_finish=-1&' + \
'copyright=-1&season_status=-1&' + \
'season_month={month}&pub_date={year}' + \
'&style_id=-1&order=3&st=1&sort=0&page=1'
@cg.command('index', aliases={'番剧索引', '番剧', '新番'})
async def index(session: CommandSession):
now = datetime.now()
year = session.state.get('year', now.year)
month = session.state.get('month', now.month)
month = math.ceil(month / 3) * 3 - 3 + 1
anime_list = await get_anime_list(year, month)
if not anime_list:
session.finish('没有查询到相关番剧……')
reply = f'{year}年{month}月番剧\n按追番人数排序,前20部如下:\n\n'
for anime in anime_list:
title = anime.get('title')
index_show = anime.get('index_show', '不详')
if not title:
continue
reply += f'{title} {index_show}\n'
web_url = WEB_URL.format(year=year, month=month)
reply += f'\n更多详细资料见哔哩哔哩官网 {web_url}'
session.finish(reply)
@index.args_parser
async def _(session: CommandSession):
argv = session.current_arg_text.split()
year = None
month = None
if len(argv) == 2 and \
re.fullmatch(r'(?:20)?\d{2}', argv[0]) and \
re.fullmatch(r'\d{1,2}', argv[1]):
year = int(argv[0]) if len(argv[0]) > 2 else 2000 + int(argv[0])
month = int(argv[1])
elif len(argv) == 1 and re.fullmatch(r'\d{1,2}', argv[0]):
month = int(argv[0])
elif len(argv) == 1 and re.fullmatch(r'(?:20)?\d{2}-\d{1,2}', argv[0]):
year, month = [int(x) for x in argv[0].split('-')]
year = 2000 + year if year < 100 else year
elif len(argv):
await session.send('抱歉无法识别输入的参数,下面将给出本季度的番剧~')
if year is not None:
session.state['year'] = year
if month is not None:
session.state['month'] = month
@cg.command('timeline', aliases={'番剧时间表', '新番时间表'})
async def timeline(session: CommandSession):
timeline_list = await get_timeline_list()
if timeline_list is None:
session.finish('查询失败了……')
date = session.state.get('date')
name = session.state.get('name')
if date:
timeline_list = list(
filter(lambda x: x.get('pub_date', '').endswith(date),
timeline_list))
if name:
name = name.strip()
timeline_list = list(
filter(lambda x: name.lower() in x.get('title', '').lower(),
timeline_list))
if len(set(map(lambda x: x['title'], timeline_list))) > 1:
timeline_list = list(
filter(lambda x: len(name) > len(x['title']) / 4,
timeline_list))
if date and name:
if not timeline_list:
reply = '没更新'
else:
reply = '\n'.join(
('更新了' if item['is_published'] else f'将在{item['ontime']}更新') +
(f'第{item['ep_index']}话' if item['ep_index'].isdigit(
) else item['ep_index']) for item in timeline_list)
session.finish(reply)
if not timeline_list:
session.finish('没有找到符合条件的时间表……')
if date:
month, day = [int(x) for x in date.split('-')]
reply = f'在{month}月{day}日更新的番剧有:\n\n'
reply += '\n'.join(f'{item['title'] or '未知动画'} '
f'{item['ontime'] or '未知时间'} ' +
(f'第{item['ep_index']}话' if item['ep_index'].isdigit(
) else item['ep_index']) for item in timeline_list)
session.finish(reply)
if name:
anime_dict = defaultdict(list)
for item in timeline_list:
anime_dict[item['title']].append(item)
for name, items in anime_dict.items():
reply = f'{name}\n'
for item in items:
_, month, day = [int(x) for x in item['pub_date'].split('-')]
reply += '\n' + ('已' if item['is_published'] else '将') + \
f'在{month}月{day}日{item['ontime']}更新' + \
(f'第{item['ep_index']}话'
if item['ep_index'].isdigit()
else item['ep_index'])
await session.send(reply)
@timeline.args_parser
async def _(session: CommandSession):
if session.state:
return
m = re.search(r'(?:(-?\d{1,2})(?:-(\d{1,2}))?)?\s*(.+)?',
session.current_arg_text.strip())
if not m:
session.finish(__plugin_usage__("description"))
num1 = m.group(1)
num2 = m.group(2)
name = m.group(3)
if num1 is None and name is None:
session.finish(__plugin_usage__("description"))
if num1 is not None and num2 is not None:
date = f'%02d-%02d' % (int(num1), int(num2))
elif num1 is not None:
date = (datetime.now() + timedelta(days=int(num1))).strftime('%m-%d')
else:
date = None
session.state['date'] = date
session.state['name'] = name
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : richardchien
@Date : 2020-04-14 22:10:53
@LastEditors: yanyongyu
@LastEditTime: 2020-04-14 22:21:30
@Description : None
@GitHub : https://github.com/richardchien
"""
__author__ = "richardchien"
import re
import math
from datetime import datetime
from collections import defaultdict
from datetime import datetime, timedelta
from nonebot import CommandSession
from . import cg, __plugin_usage__
from .data_source import get_anime_list, get_timeline_list
WEB_URL = 'https://www.bilibili.com/anime/index/#' + \
'season_version=-1&area=-1&is_finish=-1&' + \
'copyright=-1&season_status=-1&' + \
'season_month={month}&pub_date={year}' + \
'&style_id=-1&order=3&st=1&sort=0&page=1'
@cg.command('index', aliases={'番剧索引', '番剧', '新番'})
async def index(session: CommandSession):
now = datetime.now()
year = session.state.get('year', now.year)
month = session.state.get('month', now.month)
month = math.ceil(month / 3) * 3 - 3 + 1
anime_list = await get_anime_list(year, month)
if not anime_list:
session.finish('没有查询到相关番剧……')
reply = f'{year}年{month}月番剧\n按追番人数排序,前20部如下:\n\n'
for anime in anime_list:
title = anime.get('title')
index_show = anime.get('index_show', '不详')
if not title:
continue
reply += f'{title} {index_show}\n'
web_url = WEB_URL.format(year=year, month=month)
reply += f'\n更多详细资料见哔哩哔哩官网 {web_url}'
session.finish(reply)
@index.args_parser
async def _(session: CommandSession):
argv = session.current_arg_text.split()
year = None
month = None
if len(argv) == 2 and \
re.fullmatch(r'(?:20)?\d{2}', argv[0]) and \
re.fullmatch(r'\d{1,2}', argv[1]):
year = int(argv[0]) if len(argv[0]) > 2 else 2000 + int(argv[0])
month = int(argv[1])
elif len(argv) == 1 and re.fullmatch(r'\d{1,2}', argv[0]):
month = int(argv[0])
elif len(argv) == 1 and re.fullmatch(r'(?:20)?\d{2}-\d{1,2}', argv[0]):
year, month = [int(x) for x in argv[0].split('-')]
year = 2000 + year if year < 100 else year
elif len(argv):
await session.send('抱歉无法识别输入的参数,下面将给出本季度的番剧~')
if year is not None:
session.state['year'] = year
if month is not None:
session.state['month'] = month
@cg.command('timeline', aliases={'番剧时间表', '新番时间表'})
async def timeline(session: CommandSession):
timeline_list = await get_timeline_list()
if timeline_list is None:
session.finish('查询失败了……')
date = session.state.get('date')
name = session.state.get('name')
if date:
timeline_list = list(
filter(lambda x: x.get('pub_date', '').endswith(date),
timeline_list))
if name:
name = name.strip()
timeline_list = list(
filter(lambda x: name.lower() in x.get('title', '').lower(),
timeline_list))
if len(set(map(lambda x: x['title'], timeline_list))) > 1:
timeline_list = list(
filter(lambda x: len(name) > len(x['title']) / 4,
timeline_list))
if date and name:
if not timeline_list:
reply = '没更新'
else:
reply = '\n'.join(
('更新了' if item['is_published'] else f'将在{item["ontime"]}更新') +
(f'第{item["ep_index"]}话' if item['ep_index'].isdigit(
) else item['ep_index']) for item in timeline_list)
session.finish(reply)
if not timeline_list:
session.finish('没有找到符合条件的时间表……')
if date:
month, day = [int(x) for x in date.split('-')]
reply = f'在{month}月{day}日更新的番剧有:\n\n'
reply += '\n'.join(f'{item["title"] or "未知动画"} '
f'{item["ontime"] or "未知时间"} ' +
(f'第{item["ep_index"]}话' if item['ep_index'].isdigit(
) else item['ep_index']) for item in timeline_list)
session.finish(reply)
if name:
anime_dict = defaultdict(list)
for item in timeline_list:
anime_dict[item['title']].append(item)
for name, items in anime_dict.items():
reply = f'{name}\n'
for item in items:
_, month, day = [int(x) for x in item['pub_date'].split('-')]
reply += '\n' + ('已' if item['is_published'] else '将') + \
f'在{month}月{day}日{item["ontime"]}更新' + \
(f'第{item["ep_index"]}话'
if item['ep_index'].isdigit()
else item['ep_index'])
await session.send(reply)
@timeline.args_parser
async def _(session: CommandSession):
if session.state:
return
m = re.search(r'(?:(-?\d{1,2})(?:-(\d{1,2}))?)?\s*(.+)?',
session.current_arg_text.strip())
if not m:
session.finish(__plugin_usage__("description"))
num1 = m.group(1)
num2 = m.group(2)
name = m.group(3)
if num1 is None and name is None:
session.finish(__plugin_usage__("description"))
if num1 is not None and num2 is not None:
date = f'%02d-%02d' % (int(num1), int(num2))
elif num1 is not None:
date = (datetime.now() + timedelta(days=int(num1))).strftime('%m-%d')
else:
date = None
session.state['date'] = date
session.state['name'] = name
|
import argparse
import gc
import os
from pathlib import Path
import numpy as np
import pandas as pd
import features
import metrics
import preprocess
import utils
from dataset import DSB2019Dataset
from optimizedrounder import HistBaseRounder
from weightoptimzer import WeightOptimzer
from runner import Runner
parser = argparse.ArgumentParser(description='kaggle data science bowl 2019')
parser.add_argument("--debug", help="run debug mode",
action="store_true")
parser.add_argument("--optimize", help="auto tune ensemble weight",
action="store_true")
parser.add_argument("--test", help="run test mode",
action="store_true")
parser.add_argument("--config", "-c", type=str, help="ensemble config path")
args = parser.parse_args()
print(f'on kaggle: {utils.ON_KAGGLE}')
result_dict = []
# train_feat_path = utils.FEATURE_DIR / 'train_features.pkl'
test_feat_path = utils.FEATURE_DIR / 'test_features.pkl'
all_test_feat_path = utils.FEATURE_DIR / 'all_test_features.pkl'
if args.debug:
# train_feat_path = utils.FEATURE_DIR / 'train_features_debug.pkl'
test_feat_path = utils.FEATURE_DIR / 'test_features_debug.pkl'
all_test_feat_path = utils.FEATURE_DIR / 'all_test_features_debug.pkl'
train = DSB2019Dataset(mode='train')
event_code_list = list(train.main_df.event_code.unique())
event_id_list = list(train.main_df.event_id.unique())
del train
gc.collect()
# process test set
activities_map = utils.load_json(
utils.CONFIG_DIR / 'activities_map.json')
win_code = utils.make_win_code(activities_map)
if utils.ON_KAGGLE:
test = DSB2019Dataset(mode='test')
test = preprocess.preprocess_dataset(test)
X_test_org, all_test_history = features.generate_features_by_acc(
test.main_df, win_code, event_code_list, event_id_list, mode='test')
del test
gc.collect()
else:
X_test_org = utils.load_pickle(test_feat_path)
all_test_history = utils.load_pickle(all_test_feat_path)
X_test_org = features.add_feature(X_test_org, activities_map)
X_test_org = features.add_agg_feature_test(X_test_org, all_test_history)
ens_test_preds = np.zeros(X_test_org.shape[0])
ens_config = utils.load_yaml(args.config)
sum_weight = 0
preds_df = pd.DataFrame()
test_preds_df = pd.DataFrame()
for i, one_config in enumerate(ens_config):
# print('-'*30)
# print(f'{i}: {one_config}')
input_dir = utils.RESULTS_BASE_DIR / one_config['exp_name']
if utils.ON_KAGGLE:
input_dir = Path('/kaggle/input/') / one_config['exp_name']
config = utils.load_yaml(input_dir / 'model_config.yml')
X_train = utils.load_pickle(input_dir / 'train_x.pkl')
y_train = utils.load_pickle(input_dir / 'train_y.pkl')
fold_indices = utils.load_pickle(input_dir / 'fold_indices.pkl')
model_params = config['model_params']
runner = Runner(run_name='train_cv',
x=X_train,
y=y_train,
model_cls=config['model_class'],
params=model_params,
metrics=metrics.qwk,
save_dir=input_dir,
fold_indices=fold_indices
)
oof_preds, true_y = runner.get_oof_preds()
preds_df[i] = oof_preds
if config['model_class'] == 'ModelNNRegressor':
encoder_dict = utils.load_pickle(input_dir / 'encoder_dict.pkl')
oof_preds, true_y = preprocess.postprocess_for_nn(
oof_preds, encoder_dict, true_y)
weight = one_config['weight']
sum_weight += weight
if i < 1:
ens_oof_preds = oof_preds * weight
else:
ens_oof_preds += oof_preds * weight
if config['task'] == 'regression':
val_rmse = metrics.rmse(oof_preds, true_y)
optR = HistBaseRounder()
best_coef = utils.load_pickle(input_dir / 'best_coef.pkl')
print(f'best threshold: {best_coef}')
oof_preds = optR.predict(oof_preds, best_coef)
val_score = metrics.qwk(oof_preds, true_y)
print(f'rmse: {val_rmse}')
print(f'qwk: {val_score}')
result_dict.append({'exp_name': one_config['exp_name'],
'model_name': one_config['model_name'],
'weight': weight,
'val_rmse': val_rmse,
'val_qwk': val_score}
)
# predict test
X_test = X_test_org.copy()
features_list = utils.load_yaml(input_dir / 'features_list.yml')
all_features = features_list['features']
cat_features = features_list['categorical_features']
# adjust data
if os.path.exists(input_dir / 'adjust.json'):
print('adjust !!!')
adjust_dict = utils.load_json(input_dir / 'adjust.json')
for key, factor in adjust_dict.items():
# print(f'{key}: {factor}')
X_test[key] *= factor
X_test = X_test[all_features]
if config['model_class'] == 'ModelNNRegressor':
print('preprocessing for nn ...')
# encoder_dict = utils.load_pickle(input_dir / 'encoder_dict.pkl')
X_test = preprocess.preprocess_for_nn_from_encoder_dict(
X_test, all_features, cat_features, encoder_dict)
test_preds = runner.run_predict_cv(X_test)
if config['model_class'] == 'ModelNNRegressor':
print('post processing for nn ...')
test_preds = preprocess.postprocess_for_nn(test_preds, encoder_dict)
test_preds_df[i] = test_preds
ens_test_preds += test_preds * weight
for one_result in result_dict:
print('-'*30)
print(f'exp name: {one_result['exp_name']}')
print(f'model name: {one_result['model_name']}')
print(f'weight: {one_result['weight']}')
print(f'val rmse: {one_result['val_rmse']}')
print(f'val qwk: {one_result['val_qwk']}')
# find best coef
print('-'*30)
ens_oof_preds /= sum_weight
if config['task'] == 'regression':
val_rmse = metrics.rmse(ens_oof_preds, true_y)
optR = HistBaseRounder()
optR.fit(ens_oof_preds, true_y)
ens_best_coef = optR.coefficients()
print(f'ensemble best threshold: {ens_best_coef}')
ens_oof_preds = optR.predict(ens_oof_preds, ens_best_coef)
val_score = metrics.qwk(ens_oof_preds, true_y)
print(f'ensemble rmse: {val_rmse}')
print(f'ensemble qwk: {val_score}')
# find best weight
if args.optimize:
print('finding best weight')
weihgt_opt = WeightOptimzer(preds_df, true_y)
_, opt_weight = weihgt_opt.fit()
# print(f'optimzed score: {-optmized_score}')
# print(f'optimzed weight: {opt_weight}')
ens_oof_preds = weihgt_opt.weight_pred(preds_df)
if config['task'] == 'regression':
val_rmse = metrics.rmse(ens_oof_preds, true_y)
optR = HistBaseRounder()
optR.fit(ens_oof_preds, true_y)
ens_best_coef = optR.coefficients()
print(f'optimzed ensemble best threshold: {ens_best_coef}')
ens_oof_preds = optR.predict(ens_oof_preds, ens_best_coef)
val_score = metrics.qwk(ens_oof_preds, true_y)
print(f'optimzed rmse score: {val_rmse}')
print(f'optimzed qwk score: {val_score}')
print(f'optimzed weight: {opt_weight}')
print(f'optimzed best coef: {ens_best_coef}')
ens_test_preds /= sum_weight
if args.optimize:
ens_test_preds = weihgt_opt.weight_pred(test_preds_df)
if config['task'] == 'regression':
optR = HistBaseRounder()
# best_coef = utils.load_pickle(input_dir / 'best_coef.pkl')
ens_test_preds = optR.predict(ens_test_preds, ens_best_coef)
if utils.ON_KAGGLE:
save_path = 'submission.csv'
submission = pd.read_csv(utils.DATA_DIR / 'sample_submission.csv')
submission['accuracy_group'] = (ens_test_preds).astype('int')
submission.to_csv(save_path, index=False)
print('finish !!!')
| import argparse
import gc
import os
from pathlib import Path
import numpy as np
import pandas as pd
import features
import metrics
import preprocess
import utils
from dataset import DSB2019Dataset
from optimizedrounder import HistBaseRounder
from weightoptimzer import WeightOptimzer
from runner import Runner
parser = argparse.ArgumentParser(description='kaggle data science bowl 2019')
parser.add_argument("--debug", help="run debug mode",
action="store_true")
parser.add_argument("--optimize", help="auto tune ensemble weight",
action="store_true")
parser.add_argument("--test", help="run test mode",
action="store_true")
parser.add_argument("--config", "-c", type=str, help="ensemble config path")
args = parser.parse_args()
print(f'on kaggle: {utils.ON_KAGGLE}')
result_dict = []
# train_feat_path = utils.FEATURE_DIR / 'train_features.pkl'
test_feat_path = utils.FEATURE_DIR / 'test_features.pkl'
all_test_feat_path = utils.FEATURE_DIR / 'all_test_features.pkl'
if args.debug:
# train_feat_path = utils.FEATURE_DIR / 'train_features_debug.pkl'
test_feat_path = utils.FEATURE_DIR / 'test_features_debug.pkl'
all_test_feat_path = utils.FEATURE_DIR / 'all_test_features_debug.pkl'
train = DSB2019Dataset(mode='train')
event_code_list = list(train.main_df.event_code.unique())
event_id_list = list(train.main_df.event_id.unique())
del train
gc.collect()
# process test set
activities_map = utils.load_json(
utils.CONFIG_DIR / 'activities_map.json')
win_code = utils.make_win_code(activities_map)
if utils.ON_KAGGLE:
test = DSB2019Dataset(mode='test')
test = preprocess.preprocess_dataset(test)
X_test_org, all_test_history = features.generate_features_by_acc(
test.main_df, win_code, event_code_list, event_id_list, mode='test')
del test
gc.collect()
else:
X_test_org = utils.load_pickle(test_feat_path)
all_test_history = utils.load_pickle(all_test_feat_path)
X_test_org = features.add_feature(X_test_org, activities_map)
X_test_org = features.add_agg_feature_test(X_test_org, all_test_history)
ens_test_preds = np.zeros(X_test_org.shape[0])
ens_config = utils.load_yaml(args.config)
sum_weight = 0
preds_df = pd.DataFrame()
test_preds_df = pd.DataFrame()
for i, one_config in enumerate(ens_config):
# print('-'*30)
# print(f'{i}: {one_config}')
input_dir = utils.RESULTS_BASE_DIR / one_config['exp_name']
if utils.ON_KAGGLE:
input_dir = Path('/kaggle/input/') / one_config['exp_name']
config = utils.load_yaml(input_dir / 'model_config.yml')
X_train = utils.load_pickle(input_dir / 'train_x.pkl')
y_train = utils.load_pickle(input_dir / 'train_y.pkl')
fold_indices = utils.load_pickle(input_dir / 'fold_indices.pkl')
model_params = config['model_params']
runner = Runner(run_name='train_cv',
x=X_train,
y=y_train,
model_cls=config['model_class'],
params=model_params,
metrics=metrics.qwk,
save_dir=input_dir,
fold_indices=fold_indices
)
oof_preds, true_y = runner.get_oof_preds()
preds_df[i] = oof_preds
if config['model_class'] == 'ModelNNRegressor':
encoder_dict = utils.load_pickle(input_dir / 'encoder_dict.pkl')
oof_preds, true_y = preprocess.postprocess_for_nn(
oof_preds, encoder_dict, true_y)
weight = one_config['weight']
sum_weight += weight
if i < 1:
ens_oof_preds = oof_preds * weight
else:
ens_oof_preds += oof_preds * weight
if config['task'] == 'regression':
val_rmse = metrics.rmse(oof_preds, true_y)
optR = HistBaseRounder()
best_coef = utils.load_pickle(input_dir / 'best_coef.pkl')
print(f'best threshold: {best_coef}')
oof_preds = optR.predict(oof_preds, best_coef)
val_score = metrics.qwk(oof_preds, true_y)
print(f'rmse: {val_rmse}')
print(f'qwk: {val_score}')
result_dict.append({'exp_name': one_config['exp_name'],
'model_name': one_config['model_name'],
'weight': weight,
'val_rmse': val_rmse,
'val_qwk': val_score}
)
# predict test
X_test = X_test_org.copy()
features_list = utils.load_yaml(input_dir / 'features_list.yml')
all_features = features_list['features']
cat_features = features_list['categorical_features']
# adjust data
if os.path.exists(input_dir / 'adjust.json'):
print('adjust !!!')
adjust_dict = utils.load_json(input_dir / 'adjust.json')
for key, factor in adjust_dict.items():
# print(f'{key}: {factor}')
X_test[key] *= factor
X_test = X_test[all_features]
if config['model_class'] == 'ModelNNRegressor':
print('preprocessing for nn ...')
# encoder_dict = utils.load_pickle(input_dir / 'encoder_dict.pkl')
X_test = preprocess.preprocess_for_nn_from_encoder_dict(
X_test, all_features, cat_features, encoder_dict)
test_preds = runner.run_predict_cv(X_test)
if config['model_class'] == 'ModelNNRegressor':
print('post processing for nn ...')
test_preds = preprocess.postprocess_for_nn(test_preds, encoder_dict)
test_preds_df[i] = test_preds
ens_test_preds += test_preds * weight
for one_result in result_dict:
print('-'*30)
print(f'exp name: {one_result["exp_name"]}')
print(f'model name: {one_result["model_name"]}')
print(f'weight: {one_result["weight"]}')
print(f'val rmse: {one_result["val_rmse"]}')
print(f'val qwk: {one_result["val_qwk"]}')
# find best coef
print('-'*30)
ens_oof_preds /= sum_weight
if config['task'] == 'regression':
val_rmse = metrics.rmse(ens_oof_preds, true_y)
optR = HistBaseRounder()
optR.fit(ens_oof_preds, true_y)
ens_best_coef = optR.coefficients()
print(f'ensemble best threshold: {ens_best_coef}')
ens_oof_preds = optR.predict(ens_oof_preds, ens_best_coef)
val_score = metrics.qwk(ens_oof_preds, true_y)
print(f'ensemble rmse: {val_rmse}')
print(f'ensemble qwk: {val_score}')
# find best weight
if args.optimize:
print('finding best weight')
weihgt_opt = WeightOptimzer(preds_df, true_y)
_, opt_weight = weihgt_opt.fit()
# print(f'optimzed score: {-optmized_score}')
# print(f'optimzed weight: {opt_weight}')
ens_oof_preds = weihgt_opt.weight_pred(preds_df)
if config['task'] == 'regression':
val_rmse = metrics.rmse(ens_oof_preds, true_y)
optR = HistBaseRounder()
optR.fit(ens_oof_preds, true_y)
ens_best_coef = optR.coefficients()
print(f'optimzed ensemble best threshold: {ens_best_coef}')
ens_oof_preds = optR.predict(ens_oof_preds, ens_best_coef)
val_score = metrics.qwk(ens_oof_preds, true_y)
print(f'optimzed rmse score: {val_rmse}')
print(f'optimzed qwk score: {val_score}')
print(f'optimzed weight: {opt_weight}')
print(f'optimzed best coef: {ens_best_coef}')
ens_test_preds /= sum_weight
if args.optimize:
ens_test_preds = weihgt_opt.weight_pred(test_preds_df)
if config['task'] == 'regression':
optR = HistBaseRounder()
# best_coef = utils.load_pickle(input_dir / 'best_coef.pkl')
ens_test_preds = optR.predict(ens_test_preds, ens_best_coef)
if utils.ON_KAGGLE:
save_path = 'submission.csv'
submission = pd.read_csv(utils.DATA_DIR / 'sample_submission.csv')
submission['accuracy_group'] = (ens_test_preds).astype('int')
submission.to_csv(save_path, index=False)
print('finish !!!')
|
"""This module contains helper functions when working with sklearn (scikit-learn) objects;
in particular, for evaluating models"""
# pylint: disable=too-many-lines
import math
import warnings
from re import match
from typing import Tuple, Union, Optional, List, Dict
import numpy as np
import pandas as pd
import scipy.stats as st
import seaborn as sns
from plotly.graph_objs import _figure # noqa
import plotly.express as px
import yaml
from matplotlib import pyplot as plt
from pandas.io.formats.style import Styler
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.metrics import confusion_matrix, roc_auc_score, r2_score
from sklearn.model_selection._search import BaseSearchCV # noqa
import helpsk.color as hcolor
import helpsk.pandas_style as hstyle
import helpsk.string as hstring
# pylint: disable=too-many-locals
from helpsk.pandas import get_numeric_columns, get_non_numeric_columns
from helpsk.exceptions import HelpskParamValueError
from helpsk.plot import STANDARD_WIDTH_HEIGHT, GOLDEN_RATIO
from helpsk.validation import assert_true
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
from statsmodels import api as sm # https://github.com/statsmodels/statsmodels/issues/3814
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
class SearchCVParser:
"""
This class contains the logic to parse and extract information from a BaseSearchCV object (e.g.
GridSearchCV, RandomizedSearchCV, BayesSearchCV)
"""
# pylint: disable=too-many-arguments
def __init__(self,
searcher: BaseSearchCV,
higher_score_is_better: bool = True,
run_description: str = "",
parameter_name_mappings: Union[dict, None] = None):
"""
This object encapsulates the results from a SearchCV object (e.g.
sklearn.model_selection.GridSearch/RandomSearch, skopt.BayesSearchCV). The results can then be
converted to a dictionary, in a specific format with the intent to write the contents to a
yaml file.
At this time, this function does not capture the individual fold scores from the individual splits.
Params:
searcher:
A `BaseSearchCV` object that has either used a string passed to the `scoring` parameter of the
constructor (e.g. `GridSearchCV(..., scoring='auc', ...)` or a dictionary with metric
names as keys and callables as values.
An example of the dictionary option:
scores = {
'ROC/AUC': SCORERS['roc_auc'],
'F1': make_scorer(f1_score, greater_is_better=True),
'Pos. Pred. Val': make_scorer(precision_score, greater_is_better=True),
'True Pos. Rate': make_scorer(recall_score, greater_is_better=True),
}
grid_search = GridSearchCV(..., scoring=scores, ...)
higher_score_is_better:
If True, higher scores are better; if False, lower scores are better.
A value of False assumes that the scores returned from sklearn are negative and will multiple
the values by -1.
run_description:
An optional string to save in the dictionary
parameter_name_mappings:
A dictionary containing the parameter names returned by the searchCV object as keys (which
should correspond to the path of the pipeline(s) corresponding to the parameter) and the new,
friendlier, names that can be displayed in graphs and tables.
For example:
{'model__max_features': 'max_features',
'model__n_estimators': 'n_estimators',
'prep__non_numeric__encoder__transformer': 'encoder',
'prep__numeric__impute__transformer': 'imputer',
'prep__numeric__scaling__transformer': 'scaler'}
"""
if searcher is not None: # check for None in the case that __init__ is being called from `from_dict`
self._cv_dict = SearchCVParser.\
__search_cv_to_dict(searcher=searcher,
higher_score_is_better=higher_score_is_better,
run_description=run_description,
parameter_name_mappings=parameter_name_mappings)
else:
self._cv_dict = None
self._cv_dataframe = None
@classmethod
def from_dict(cls, cv_dict):
"""This method creates a SearchCVParser from the dictionary previously created by
`__search_cv_to_dict()`"""
parser = cls(searcher=None, higher_score_is_better=None, run_description=None, # noqa
parameter_name_mappings=None)
parser._cv_dict = cv_dict
return parser
@classmethod
def from_yaml_file(cls, yaml_file_name):
"""This method creates a SearchCVParser from a yaml file created by `to_yaml_file()`"""
with open(yaml_file_name, 'r') as file:
cv_dict = yaml.safe_load(file)
return SearchCVParser.from_dict(cv_dict=cv_dict)
def to_yaml_file(self, yaml_file_name: str):
"""This method saves the self._cv_dict dictionary to a yaml file."""
with open(yaml_file_name, 'w') as file:
yaml.dump(self._cv_dict, file, default_flow_style=False, sort_keys=False)
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
@staticmethod
def __search_cv_to_dict(searcher: BaseSearchCV,
higher_score_is_better: bool = True,
run_description: str = "",
parameter_name_mappings: Union[dict, None] = None) -> dict:
"""This extracts the information from a BaseSearchCV object and converts it to a dictionary."""
def string_if_not_number(obj):
if isinstance(obj, (int, float, complex)):
return obj
return str(obj)
cv_results_dict = {
'description': run_description,
'cross_validation_type': str(type(searcher)),
'higher_score_is_better': higher_score_is_better
}
if isinstance(searcher.scoring, dict):
score_names = list(searcher.scoring.keys())
elif isinstance(searcher.scoring, str):
score_names = [searcher.scoring]
else:
message = 'The `searcher` does not have a string or dictionary .scoring property. Cannot ' \
'extract scores.'
raise HelpskParamValueError(message)
# get number of splits (e.g. 5 fold 2 repeat cross validation has 10 splits)
# I could check the .cv param of the searcher object but not sure all types of cv objects have the
# same parameters e.g. searcher.cv.n_repeats
# if there is only 1 score, we need to look for e.g. "split0_test_score"
# if there are multiple scores we need to look for e.g. "split0_test_ROC/AUC" but we don't want
# to duplicate the counts e.g. we don't want to also capture "split0_test_True Pos. Rate"
if len(score_names) == 1:
split_score_matching_string = "split\\d_test_score"
else:
split_score_matching_string = "split\\d_test_" + score_names[0]
number_of_splits = len([x for x in searcher.cv_results_.keys()
if bool(match(split_score_matching_string, x))])
cv_results_dict['number_of_splits'] = number_of_splits
cv_results_dict['score_names'] = score_names
cv_results_dict['parameter_names'] = [key for key, value in searcher.cv_results_['params'][0].items()]
if parameter_name_mappings:
for key in parameter_name_mappings.keys():
assert_true(key in cv_results_dict['parameter_names'])
cv_results_dict['parameter_names_mapping'] = parameter_name_mappings
number_of_iterations = len(searcher.cv_results_['mean_fit_time'])
# convert test scores to dictionaries
if len(score_names) == 1:
test_score_ranking = searcher.cv_results_['rank_test_score'].tolist()
test_score_averages = searcher.cv_results_['mean_test_score'].tolist()
test_score_standard_deviations = searcher.cv_results_['std_test_score'].tolist()
assert_true(len(test_score_ranking) == number_of_iterations)
assert_true(len(test_score_averages) == number_of_iterations)
assert_true(len(test_score_standard_deviations) == number_of_iterations)
cv_results_dict['test_score_rankings'] = {score_names[0]: test_score_ranking}
cv_results_dict['test_score_averages'] = {score_names[0]: test_score_averages}
cv_results_dict['test_score_standard_deviations'] = {score_names[0]:
test_score_standard_deviations}
else:
ranking_dict = {}
averages_dict = {}
standard_deviations_dict = {}
for score in score_names:
rankings = searcher.cv_results_['rank_test_' + score].tolist()
averages = searcher.cv_results_['mean_test_' + score].tolist()
standard_deviations = searcher.cv_results_['std_test_' + score].tolist()
assert_true(len(rankings) == number_of_iterations)
assert_true(len(averages) == number_of_iterations)
assert_true(len(standard_deviations) == number_of_iterations)
ranking_dict[score] = rankings
averages_dict[score] = averages
standard_deviations_dict[score] = standard_deviations
cv_results_dict['test_score_rankings'] = ranking_dict
cv_results_dict['test_score_averages'] = averages_dict
cv_results_dict['test_score_standard_deviations'] = standard_deviations_dict
# if higher_score_is_better is False, sklearn will return negative numbers; I want actual values
if not higher_score_is_better:
averages = cv_results_dict['test_score_averages']
for key in averages.keys():
cv_results_dict['test_score_averages'][key] = [-1 * x for x in averages[key]]
# convert training scores to dictionaries, if training scores exists
# i.e. if return_train_score=True for the SearchCV object
if 'mean_train_score' in searcher.cv_results_ or 'mean_train_'+score_names[0] in searcher.cv_results_:
if len(score_names) == 1:
train_score_averages = searcher.cv_results_['mean_train_score'].tolist()
train_score_standard_deviations = searcher.cv_results_['std_train_score'].tolist()
assert_true(len(train_score_averages) == number_of_iterations)
assert_true(len(train_score_standard_deviations) == number_of_iterations)
cv_results_dict['train_score_averages'] = {score_names[0]: train_score_averages}
cv_results_dict['train_score_standard_deviations'] = {score_names[0]:
train_score_standard_deviations}
else:
averages_dict = {}
standard_deviations_dict = {}
for score in score_names:
averages = searcher.cv_results_['mean_train_' + score].tolist()
standard_deviations = searcher.cv_results_['std_train_' + score].tolist()
assert_true(len(averages) == number_of_iterations)
assert_true(len(standard_deviations) == number_of_iterations)
averages_dict[score] = averages
standard_deviations_dict[score] = standard_deviations
cv_results_dict['train_score_averages'] = averages_dict
cv_results_dict['train_score_standard_deviations'] = standard_deviations_dict
# if higher_score_is_better is False, sklearn will return negative numbers; I want actual
# values
if not higher_score_is_better:
averages = cv_results_dict['train_score_averages']
for key in averages.keys():
cv_results_dict['train_score_averages'][key] = [-1 * x for x in averages[key]]
assert_true(len(searcher.cv_results_['params']) == number_of_iterations)
cv_results_dict['parameter_iterations'] = [
{key: string_if_not_number(value) for key, value in searcher.cv_results_['params'][index].items()}
for index in range(len(searcher.cv_results_['params']))
]
fit_time_averages = searcher.cv_results_['mean_fit_time'].tolist()
fit_time_standard_deviations = searcher.cv_results_['std_fit_time'].tolist()
score_time_averages = searcher.cv_results_['mean_score_time'].tolist()
score_time_standard_deviations = searcher.cv_results_['std_score_time'].tolist()
assert_true(len(fit_time_averages) == number_of_iterations)
assert_true(len(fit_time_standard_deviations) == number_of_iterations)
assert_true(len(score_time_averages) == number_of_iterations)
assert_true(len(score_time_standard_deviations) == number_of_iterations)
cv_results_dict['timings'] = {'fit time averages': fit_time_averages,
'fit time standard deviations': fit_time_standard_deviations,
'score time averages': score_time_averages,
'score time standard deviations': score_time_standard_deviations}
return cv_results_dict
def to_dataframe(self, sort_by_score: bool = True):
"""This function converts the score information from the SearchCV object into a pd.DataFrame.
Params:
sort_by_score:
if True, sorts the dataframe starting with the best (primary) score to the worst score.
Secondary scores are not considered.
Returns:
a DataFrame containing score information for each cross-validation iteration. A single row
corresponds to one iteration (i.e. one set of hyper-parameters that were cross-validated).
"""
if self._cv_dataframe is None:
for score_name in self.score_names:
confidence_intervals = st.t.interval(alpha=0.95, # confidence interval
# number_of_splits is sample-size
df=self.number_of_splits - 1, # degrees of freedom
loc=self.test_score_averages[score_name],
scale=self.score_standard_errors(score_name=score_name))
# only give confidence intervals for the primary score
self._cv_dataframe = pd.concat([
self._cv_dataframe,
pd.DataFrame({score_name + " Mean": self.test_score_averages[score_name],
score_name + " 95CI.LO": confidence_intervals[0],
score_name + " 95CI.HI": confidence_intervals[1]})
],
axis=1
)
self._cv_dataframe = pd.concat([self._cv_dataframe,
pd.DataFrame.from_dict(self.parameter_iterations)], # noqa
axis=1)
if self.parameter_names_mapping:
self._cv_dataframe = self._cv_dataframe.rename(columns=self.parameter_names_mapping)
copy = self._cv_dataframe.copy(deep=True)
if sort_by_score:
copy = copy.iloc[self.primary_score_best_indexes]
return copy
def to_formatted_dataframe(self,
round_by: int = 3,
num_rows: int = 50,
primary_score_only: bool = False,
exclude_no_variance_params: bool = True,
return_style: bool = True,
sort_by_score: bool = True) -> Union[pd.DataFrame, Styler]:
"""This function converts the score information from the SearchCV object into a pd.DataFrame or a
Styler object, formatted accordingly.
The Hyper-Parameter columns will be highlighted in blue where the primary
score (i.e. first column) for the iteration (i.e. the row i.e. the combination of hyper-parameters
that were cross validated) is within 1 standard error of the top primary score (i.e. first column
first row).
Args:
round_by:
the number of digits to round by for the score columns (does not round the parameter columns)
num_rows:
the number of rows to return in the resulting DataFrame.
primary_score_only:
if True, then only include the primary score.
exclude_no_variance_params:
if True, exclude columns that only have 1 unique value
return_style:
If True, return Styler object, else return pd.DataFrame
sort_by_score:
if True, sorts the dataframe starting with the best (primary) score to the worst score.
Secondary scores are not considered.
Returns:
Returns either pd.DataFrame or pd.DataFrame.Styler.
"""
cv_dataframe = self.to_dataframe(sort_by_score=sort_by_score)
cv_dataframe = cv_dataframe.head(num_rows)
if exclude_no_variance_params:
columns_to_drop = [x for x in self.parameter_names if len(cv_dataframe[x].unique()) == 1]
cv_dataframe = cv_dataframe.drop(columns=columns_to_drop)
score_columns = list(cv_dataframe.columns[cv_dataframe.columns.str.endswith((' Mean',
' 95CI.LO',
' 95CI.HI'))])
if primary_score_only:
columns_to_drop = [x for x in score_columns if not x.startswith(self.primary_score_name)]
cv_dataframe = cv_dataframe.drop(columns=columns_to_drop)
cv_dataframe = cv_dataframe.round(dict(zip(score_columns, [round_by] * len(score_columns))))
final_columns = cv_dataframe.columns # save for style logic
if return_style:
cv_dataframe = cv_dataframe.style
for score in self.score_names:
mean_key = score + ' Mean'
ci_low_key = score + ' 95CI.LO'
ci_high_key = score + ' 95CI.HI'
if mean_key in final_columns:
cv_dataframe. \
bar(subset=[mean_key], color=hcolor.Colors.PIGMENT_GREEN.value)
if ci_low_key in final_columns:
cv_dataframe. \
bar(subset=[ci_high_key], color=hcolor.GRAY). \
pipe(hstyle.bar_inverse, subset=[ci_low_key], color=hcolor.GRAY)
cv_dataframe.pipe(hstyle.format, round_by=round_by, hide_index=True)
# highlight iterations whose primary score (i.e. first column of `results` dataframe) is within
# 1 standard error of the top primary score (i.e. first column first row).
# pylint: disable=invalid-name, unused-argument
def highlight_cols(s): # noqa
return 'background-color: %s' % hcolor.Colors.PASTEL_BLUE.value
# we might have removed columns (e.g. that don't have any variance) so check that the columns
# were in the final set
columns_to_highlight = [x for x in self.parameter_names if x in final_columns]
cv_dataframe.applymap(highlight_cols,
subset=pd.IndexSlice[self.indexes_within_1_standard_error,
columns_to_highlight])
return cv_dataframe
####
# The following properties expose the highest levels of the underlying dictionary/yaml
####
@property
def description(self):
"""the description passed to `run_description`."""
return self._cv_dict['description']
@property
def higher_score_is_better(self):
"""The value passed to `higher_score_is_better`."""
return self._cv_dict['higher_score_is_better']
@property
def cross_validation_type(self) -> str:
"""The string representation of the SearchCV object."""
return self._cv_dict['cross_validation_type']
@property
def number_of_splits(self) -> int:
"""This is the number of CV folds. For example, a 5-fold 2-repeat CV has 10 splits."""
return self._cv_dict['number_of_splits']
@property
def score_names(self) -> list:
"""Returns a list of the names of the scores"""
return self._cv_dict['score_names']
@property
def parameter_names_original(self) -> list:
"""Returns the original parameter names (i.e. the path generated by the scikit-learn pipelines."""
return self._cv_dict['parameter_names']
@property
def parameter_names(self) -> list:
"""This property returns either the original parameter names if no `parameter_names_mapping` was
provided, or it returns the new parameter names (i.e. the values from `parameter_names_mapping`)."""
if self.parameter_names_mapping:
return list(self.parameter_names_mapping.values())
return self.parameter_names_original
@property
def parameter_names_mapping(self) -> dict:
"""The dictionary passed to `parameter_name_mappings`."""
return self._cv_dict.get('parameter_names_mapping')
@property
def test_score_rankings(self) -> dict:
"""The rankings of each of the test scores, from the searcher.cv_results_ object."""
return self._cv_dict['test_score_rankings']
@property
def test_score_averages(self) -> dict:
"""The test score averages, from the searcher.cv_results_ object."""
return self._cv_dict['test_score_averages']
@property
def test_score_standard_deviations(self) -> dict:
"""The test score standard deviations, from the searcher.cv_results_ object."""
return self._cv_dict['test_score_standard_deviations']
@property
def train_score_averages(self) -> dict:
"""The training score averages, from the searcher.cv_results_ object, if provided."""
return self._cv_dict.get('train_score_averages')
@property
def train_score_standard_deviations(self) -> dict:
"""The training score standard deviations, from the searcher.cv_results_ object, if provided."""
return self._cv_dict.get('train_score_standard_deviations')
@property
def parameter_iterations(self) -> list:
"""The "iterations" i.e. the hyper-parameter combinations in order of execution."""
return self._cv_dict['parameter_iterations']
def iteration_labels(self, order_from_best_to_worst=True) -> List[str]:
"""An iteration is a set of hyper-parameters that were cross validated. The corresponding label for
each iteration is a single string containing all of the hyper-parameter names and values in the format
of `{param1: value1, param2: value2}`.
Params:
order_from_best_to_worst: if True, returns the labels in order from the best score to the worst
score, which should match the ordered of .to_dataframe() or .to_formatted_dataframe()`. If False,
returns the labels in order that they were ran by the cross validation object.
Returns:
a pd.Series the same length as `number_of_trials` containing a str
"""
def create_hyper_param_labels(iteration) -> list:
"""Creates a list of strings that represent the name/value pair for each hyper-parameter."""
return [f"{self.parameter_names_mapping[x] if self.parameter_names_mapping else x}: {iteration[x]}" # pylint: disable=line-too-long # noqa
for x in self.parameter_names_original]
# create_hyper_param_labels(iteration=self.parameter_iterations[0])
def create_trial_label(iteration) -> str:
return f"{{{hstring.collapse(create_hyper_param_labels(iteration), separate=", ")}}}"
# create_trial_label(iteration=self.parameter_iterations[0])
labels = [create_trial_label(x) for x in self.parameter_iterations]
if order_from_best_to_worst:
labels = [x for _, x in sorted(zip(self.primary_score_iteration_ranking, labels))]
return labels
@property
def timings(self) -> dict:
"""The timings providing by searcher.cv_results_."""
return self._cv_dict['timings']
####
# The following properties are additional helpers
####
@property
def number_of_iterations(self) -> int:
""""A single trial contains the cross validation runs for a single set of hyper-parameters. The
'number of trials' is basically the number of combinations of different hyper-parameters that were
cross validated."""
return len(self.parameter_iterations)
@property
def numeric_parameters(self) -> List[str]:
return [x for x in get_numeric_columns(dataframe=self.to_dataframe()) if x in self.parameter_names]
@property
def non_numeric_parameters(self) -> List[str]:
return [x for x in get_non_numeric_columns(dataframe=self.to_dataframe())
if x in self.parameter_names]
@property
def number_of_scores(self) -> int:
"""The number of scores passed to the SearchCV object"""
return len(self.score_names)
@property
def primary_score_name(self) -> str:
"""The first scorer passed to the SearchCV will be treated as the primary score. This property returns
the name of the score."""
return self.score_names[0]
@property
def primary_score_averages(self) -> np.array:
"""The first scorer passed to the SearchCV will be treated as the primary score. This property returns
the average score (across all splits) for each iteration. Note that the average scores are
the weighted averages
https://stackoverflow.com/questions/44947574/what-is-the-meaning-of-mean-test-score-in-cv-result"""
return np.array(self.test_score_averages[self.primary_score_name])
def score_standard_errors(self, score_name: str) -> np.array:
"""The first scorer passed to the SearchCV will be treated as the primary score. This property returns
the standard error associated with the mean score of each iteration, for the primary score."""
score_standard_deviations = self.test_score_standard_deviations[score_name]
return np.array(score_standard_deviations) / math.sqrt(self.number_of_splits)
@property
def primary_score_iteration_ranking(self) -> np.array:
"""The ranking of the corresponding index, in terms of best to worst score.
e.g. [5, 6, 7, 8, 3, 4, 1, 2]
This means that the 6th index/iteration had the highest ranking (1); and that the 3rd index had
the worst ranking (8)
This differs from `primary_score_best_indexes` which returns the order of indexes from best to worst.
So in the example above, the first value returned in the `primary_score_best_indexes` array would be
6 because the best score is at index 6. The last value in the array 3, because the worst score is at
index 3.
Note that `primary_score_iteration_ranking` starts at 1 while primary_score_best_indexes starts at 0.
"""
return np.array(self.test_score_rankings[self.primary_score_name])
@property
def primary_score_best_indexes(self) -> np.array:
"""The indexes of best to worst primary scores. See documentation for
`primary_score_iteration_ranking` to understand the differences between the two properties."""
return np.argsort(self.primary_score_iteration_ranking)
@property
def best_primary_score_index(self) -> int:
"""The index of best primary score."""
return self.primary_score_best_indexes[0]
@property
def best_primary_score_params(self) -> dict:
"""
The "best" score (could be the highest or lowest depending on `higher_score_is_better`) associated
with the primary score.
"""
best_params = self.parameter_iterations[self.best_primary_score_index]
if self.parameter_names_mapping:
best_params = {self.parameter_names_mapping[key]: value for key, value in best_params.items()}
return best_params
@property
def best_primary_score(self) -> float:
"""
The "best" score (could be the highest or lowest depending on `higher_score_is_better`) associated
with the primary score.
"""
return self.primary_score_averages[self.best_primary_score_index]
@property
def best_primary_score_standard_error(self) -> float:
"""The standard error associated with the best score of the primary scorer"""
return self.score_standard_errors(score_name=self.primary_score_name)[self.best_primary_score_index]
@property
def indexes_within_1_standard_error(self) -> list:
"""Returns the iteration indexes where the primary scores (i.e. first scorer
passed to SearchCV object; i.e. first column of the to_dataframe() DataFrame) are within 1 standard
error of the highest primary score."""
cv_dataframe = self.to_dataframe(sort_by_score=True)
if self.higher_score_is_better:
return list(cv_dataframe.index[cv_dataframe.iloc[:, 0] >=
self.best_primary_score - self.best_primary_score_standard_error])
return list(cv_dataframe.index[cv_dataframe.iloc[:, 0] <=
self.best_primary_score + self.best_primary_score_standard_error])
@property
def fit_time_averages(self) -> np.array:
"""
Returns a list of floats; one value for each iteration (i.e. a single set of hyper-params).
Each value is the average number of seconds that the iteration took to fit the model, per split
(i.e. the average fit time of all splits).
"""
return np.array(self.timings['fit time averages'])
@property
def fit_time_standard_deviations(self) -> np.array:
"""
Returns a list of floats; one value for each iteration (i.e. a single set of hyper-params).
Each value is the standard deviation of seconds that the iteration took to fit the model, per split
(i.e. the standard deviation of fit time across all splits).
"""
return np.array(self.timings['fit time standard deviations'])
@property
def score_time_averages(self) -> np.array:
"""
Returns a list of floats; one value for each iteration (i.e. a single set of hyper-params).
Each value is the average number of seconds that the iteration took to score the model, per split
(i.e. the average score time of all splits).
"""
return np.array(self.timings['score time averages'])
@property
def score_time_standard_deviations(self) -> np.array:
"""
Returns a list of floats; one value for each iteration (i.e. a single set of hyper-params).
Each value is the standard deviation of seconds that the iteration took to score the model, per split
(i.e. the standard deviation of score time across all splits).
"""
return np.array(self.timings['score time standard deviations'])
@property
def iteration_fit_times(self) -> np.array:
"""For each iteration, it is the amount of time it took to fit the model.
Calculated by Average fit time for each iteration multiplied by the number of splits per iteration.
self.fit_time_averages * self.number_of_splits
Returns:
array containing the fit time for each iteration
"""
return self.fit_time_averages * self.number_of_splits
@property
def fit_time_total(self) -> float:
"""Total fit time across all iterations."""
return float(np.sum(self.iteration_fit_times))
@property
def iteration_score_times(self) -> np.array:
"""For each iteration, it is the amount of time it took to score the model.
Calculated by Average score time for each iteration multiplied by the number of splits per iteration.
self.score_time_averages * self.number_of_splits
Returns:
array containing the score time for each iteration
"""
return self.score_time_averages * self.number_of_splits
@property
def score_time_total(self) -> float:
"""Total score time across all iterations."""
return float(np.sum(self.iteration_score_times))
@property
def average_time_per_trial(self) -> float:
"""Average time per trial"""
return float(np.mean(self.iteration_fit_times + self.iteration_score_times))
@property
def total_time(self) -> float:
"""Total time it took across all trials"""
return self.fit_time_total + self.score_time_total
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class TwoClassEvaluator:
"""This class calculates various metrics for Two Class (i.e. 0's/1's) prediction scenarios."""
def __init__(self,
actual_values: np.ndarray,
predicted_scores: np.ndarray,
positive_class: str = 'Positive Class',
negative_class: str = 'Negative Class',
score_threshold: float = 0.5
):
"""
Args:
actual_values:
array of 0's and 1's
predicted_scores:
array of decimal/float values from `predict_proba()`; NOT the actual class
positive_class:
string of the name/label of the positive class (i.e. value of 1). In other words, not
'positive' in the sense of 'good' but 'positive' as in 'True/False Positive'.
negative_class:
string of the name/label of the negative class (i.e. value of 0). In other words, not
'negative' in the sense of 'good' but 'negative' as in 'True/False Negative'.
score_threshold:
the score/probability threshold for turning scores into 0's and 1's and corresponding labels
"""
assert len(actual_values) == len(predicted_scores)
if not all(np.unique(actual_values) == [0, 1]):
message = f"Values of `actual_values` should 0 or 1. Found `{np.unique(actual_values)}`"
raise HelpskParamValueError(message)
if not all(np.logical_and(predicted_scores >= 0, predicted_scores <= 1)):
message = "Values of `predicted_scores` should be between 0 and 1."
raise HelpskParamValueError(message)
self._positive_class = positive_class
self._negative_class = negative_class
self._actual_values = actual_values
self._predicted_scores = predicted_scores
self.score_threshold = score_threshold
predicted_values = np.where(predicted_scores > self.score_threshold, 1, 0)
self._confusion_matrix = confusion_matrix(y_true=actual_values, y_pred=predicted_values)
self.sample_size = len(actual_values)
assert self.sample_size == self._confusion_matrix.sum()
true_negatives, false_positives, false_negatives, true_positives = self._confusion_matrix.ravel()
self._actual_positives = true_positives + false_negatives
assert self._actual_positives == sum(self._actual_values == 1)
self._actual_negatives = true_negatives + false_positives
self._true_negatives = true_negatives
self._false_positives = false_positives
self._false_negatives = false_negatives
self._true_positives = true_positives
self.auc = roc_auc_score(y_true=actual_values, y_score=predicted_scores)
@property
def true_positive_rate(self) -> float:
"""True Positive Rate"""
return 0 if self._actual_positives == 0 else self._true_positives / self._actual_positives
@property
def true_negative_rate(self) -> float:
"""True Negative Rate i.e. Specificity"""
return 0 if self._actual_negatives == 0 else self._true_negatives / self._actual_negatives
@property
def false_negative_rate(self) -> float:
"""False Negative Rate"""
return 0 if self._actual_positives == 0 else self._false_negatives / self._actual_positives
@property
def false_positive_rate(self) -> float:
"""False Positive Rate"""
return 0 if self._actual_negatives == 0 else self._false_positives / self._actual_negatives
@property
def accuracy(self) -> Union[float, None]:
"""accuracy"""
return None if self.sample_size == 0 else \
(self._true_negatives + self._true_positives) / self.sample_size
@property
def error_rate(self) -> Union[float, None]:
"""error_rate"""
return None if self.sample_size == 0 else \
(self._false_positives + self._false_negatives) / self.sample_size
@property
def positive_predictive_value(self) -> float:
"""Positive Predictive Value i.e. Precision"""
return 0 if (self._true_positives + self._false_positives) == 0 else \
self._true_positives / (self._true_positives + self._false_positives)
@property
def negative_predictive_value(self) -> float:
"""Negative Predictive Value"""
return 0 if (self._true_negatives + self._false_negatives) == 0 else \
self._true_negatives / (self._true_negatives + self._false_negatives)
@property
def prevalence(self) -> Union[float, None]:
"""Prevalence"""
return None if self.sample_size == 0 else \
self._actual_positives / self.sample_size
@property
def kappa(self) -> Union[float, None]:
"""Kappa"""
if self.sample_size == 0 or \
((self._true_negatives + self._false_negatives) / self.sample_size) == 0:
return None
# proportion of the actual agreements
# add the proportion of all instances where the predicted type and actual type agree
pr_a = (self._true_negatives + self._true_positives) / self.sample_size
# probability of both predicted and actual being negative
p_negative_prediction_and_actual = \
((self._true_negatives + self._false_positives) / self.sample_size) * \
((self._true_negatives + self._false_negatives) / self.sample_size)
# probability of both predicted and actual being positive
p_positive_prediction_and_actual = \
self.prevalence * ((self._false_positives + self._true_positives) / self.sample_size)
# probability that chance alone would lead the predicted and actual values to match, under the
# assumption that both are selected randomly (i.e. implies independence) according to the observed
# proportions (probability of independent events = P(A & B) == P(A) * P(B)
pr_e = p_negative_prediction_and_actual + p_positive_prediction_and_actual
return (pr_a - pr_e) / (1 - pr_e)
@property
def f1_score(self) -> float:
"""F1 Score
https://en.wikipedia.org/wiki/F-score
"""
return self.fbeta_score(beta=1)
def fbeta_score(self, beta: float) -> float:
"""
:param beta: The `beta` parameter determines the weight of precision in the combined score.
`beta < 1` lends more weight to precision (i.e. positive predictive value), while
`beta > 1` favors recall (i.e. true positive rate)
(`beta -> 0` considers only precision, `beta -> inf` only recall).
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.fbeta_score.html
:return:
"""
if self.positive_predictive_value is None or self.sensitivity is None or \
(self.positive_predictive_value + self.sensitivity) == 0:
return 0
return (1 + (beta ** 2)) * (self.positive_predictive_value * self.sensitivity) / \
(((beta ** 2) * self.positive_predictive_value) + self.sensitivity)
@property
def sensitivity(self) -> float:
"""Sensitivity i.e. True Positive Rate"""
return self.true_positive_rate
@property
def specificity(self) -> float:
"""Specificity i.e. True Negative Rate"""
return self.true_negative_rate
@property
def precision(self) -> float:
"""Precision i.e. Positive Predictive Value"""
return self.positive_predictive_value
@property
def recall(self):
"""Recall i.e. True Positive Rate"""
return self.true_positive_rate
@property
def all_metrics(self) -> dict:
"""All of the metrics are returned as a dictionary."""
auc_message = 'Area under the ROC curve (true pos. rate vs false pos. rate); ' \
'ranges from 0.5 (purely random classifier) to 1.0 (perfect classifier)'
tpr_message = f'{self.true_positive_rate:.1%} of positive instances were correctly identified.; ' \
f'i.e. {self._true_positives} "{self._positive_class}" labels were correctly identified ' \
f'out of {self._actual_positives} instances; a.k.a Sensitivity/Recall'
tnr_message = f'{self.true_negative_rate:.1%} of negative instances were correctly identified.; ' \
f'i.e. {self._true_negatives} "{self._negative_class}" labels were correctly identified ' \
f'out of {self._actual_negatives} instances'
fpr_message = f'{self.false_positive_rate:.1%} of negative instances were incorrectly identified ' \
f'as positive; ' \
f'i.e. {self._false_positives} "{self._negative_class}" labels were incorrectly ' \
f'identified as "{self._positive_class}", out of {self._actual_negatives} instances'
fnr_message = f'{self.false_negative_rate:.1%} of positive instances were incorrectly identified ' \
f'as negative; ' \
f'i.e. {self._false_negatives} "{self._positive_class}" labels were incorrectly ' \
f'identified as "{self._negative_class}", out of {self._actual_positives} instances'
ppv_message = f'When the model claims an instance is positive, it is correct ' \
f'{self.positive_predictive_value:.1%} of the time; ' \
f'i.e. out of the {self._true_positives + self._false_positives} times the model ' \
f'predicted "{self._positive_class}", it was correct {self._true_positives} ' \
f'times; a.k.a precision'
npv_message = f'When the model claims an instance is negative, it is correct ' \
f'{self.negative_predictive_value:.1%} of the time; ' \
f'i.e. out of the {self._true_negatives + self._false_negatives} times the model ' \
f'predicted "{self._negative_class}", it was correct {self._true_negatives} times'
f1_message = 'The F1 score can be interpreted as a weighted average of the precision and recall, ' \
'where an F1 score reaches its best value at 1 and worst score at 0.'
accuracy_message = f'{self.accuracy:.1%} of instances were correctly identified'
error_message = f'{self.error_rate:.1%} of instances were incorrectly identified'
prevalence_message = f'{self.prevalence:.1%} of the data are positive; i.e. out of ' \
f'{self.sample_size} total observations; {self._actual_positives} are labeled ' \
f'as "{self._positive_class}"'
total_obs_message = f'There are {self.sample_size} total observations; i.e. sample size'
return {'AUC': (self.auc, auc_message),
'True Positive Rate': (self.true_positive_rate, tpr_message),
'True Negative Rate': (self.true_negative_rate, tnr_message),
'False Positive Rate': (self.false_positive_rate, fpr_message),
'False Negative Rate': (self.false_negative_rate, fnr_message),
'Positive Predictive Value': (self.positive_predictive_value, ppv_message),
'Negative Predictive Value': (self.negative_predictive_value, npv_message),
'F1 Score': (self.f1_score, f1_message),
'Accuracy': (self.accuracy, accuracy_message),
'Error Rate': (self.error_rate, error_message),
'% Positive': (self.prevalence, prevalence_message),
'Total Observations': (self.sample_size, total_obs_message)}
def all_metrics_df(self,
return_explanations: bool = True,
dummy_classifier_strategy: Union[str, list, None] = 'prior',
dummy_classifier_constant: Union[int] = 1,
return_style: bool = False,
round_by: Optional[int] = None) -> Union[pd.DataFrame, Styler]:
"""All of the metrics are returned as a DataFrame.
Args:
return_explanations:
if True, then return descriptions of score and more information in an additional column
dummy_classifier_strategy:
if not None, then returns column(s) corresponding to the scores from predictions of
sklearn.dummy.DummyClassifier, based on the strategy (or strategies) provided. Valid values
correspond to values of `strategy` parameter listed
https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
If a list is passed in (e.g. ['prior', 'uniform'], then one score column per value is
added.
If None is passed, then no additional columns are added.
dummy_classifier_constant:
The explicit constant as predicted by the “constant” strategy for the
DummyClassifier.
This parameter is useful only for the “constant” dummy_classifier_strategy.
return_style:
if True, return styler object; else return dataframe
round_by:
the number of digits to round by; if None, then don't round
"""
result = pd.DataFrame.from_dict({key: value[0] for key, value in self.all_metrics.items()},
orient='index',
columns=['Score'])
score_columns = ['Score']
if dummy_classifier_strategy:
if isinstance(dummy_classifier_strategy, str):
dummy_classifier_strategy = [dummy_classifier_strategy]
for strategy in dummy_classifier_strategy:
dummy = DummyClassifier(strategy=strategy, constant=dummy_classifier_constant)
# https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
# "All strategies make predictions that ignore the input feature values passed as the X
# argument to fit and predict. The predictions, however, typically depend on values observed
# in the y parameter passed to fit."
_ = dummy.fit(X=self._actual_values, y=self._actual_values)
dummy_probabilities = dummy.predict_proba(X=self._actual_values)
dummy_probabilities = dummy_probabilities[:, 1]
dummy_evaluator = TwoClassEvaluator(actual_values=self._actual_values,
predicted_scores=dummy_probabilities,
score_threshold=self.score_threshold)
dummy_scores = dummy_evaluator.all_metrics_df(return_explanations=False,
dummy_classifier_strategy=None,
return_style=False)
column_name = f"Dummy ({strategy})"
score_columns = score_columns + [column_name]
dummy_scores = dummy_scores.rename(columns={'Score': column_name})
result = pd.concat([result, dummy_scores], axis=1)
if return_explanations:
explanations = pd.DataFrame.from_dict({key: value[1] for key, value in self.all_metrics.items()},
orient='index',
columns=['Explanation'])
result = pd.concat([result, explanations], axis=1)
if round_by:
for column in score_columns:
result[column] = result[column].round(round_by)
if return_style:
subset_scores = [x for x in result.index.values if x != 'Total Observations']
subset_scores = pd.IndexSlice[result.loc[subset_scores, :].index, score_columns]
subset_negative_bad = pd.IndexSlice[result.loc[['False Positive Rate',
'False Negative Rate'], score_columns].index,
score_columns]
subset_secondary = pd.IndexSlice[result.loc[['Accuracy', 'Error Rate', '% Positive'],
score_columns].index, score_columns]
subset_total_observations = pd.IndexSlice[result.loc[['Total Observations'],
score_columns].index, score_columns]
result = result.style
if round_by:
result = result.format(precision=round_by)
result = result.format(subset=subset_total_observations,
thousands=',',
precision=0)
result = result. \
bar(subset=subset_scores, color=hcolor.Colors.PIGMENT_GREEN.value, vmin=0, vmax=1). \
bar(subset=subset_negative_bad, color=hcolor.Colors.POPPY.value, vmin=0, vmax=1). \
bar(subset=subset_secondary, color=hcolor.GRAY, vmin=0, vmax=1)
return result
def plot_confusion_matrix(self):
"""Plots a heatmap of the confusion matrix."""
labels = np.array([[f'True Negatives\n{self._true_negatives}\n{self._true_negatives / self.sample_size:.1%}', # pylint: disable=line-too-long # noqa
f'False Positives\n{self._false_positives}\n{self._false_positives / self.sample_size:.1%}'], # pylint: disable=line-too-long # noqa
[f'False Negatives\n{self._false_negatives}\n{self._false_negatives / self.sample_size:.1%}', # pylint: disable=line-too-long # noqa
f'True Positives\n{self._true_positives}\n{self._true_positives / self.sample_size:.1%}']]) # pylint: disable=line-too-long # noqa
axis = plt.subplot()
sns.heatmap(self._confusion_matrix, annot=labels, cmap='Blues', ax=axis, fmt='')
# labels, title and ticks
axis.set_xlabel('Predicted')
axis.set_ylabel('Actual')
# axis.set_title('Confusion Matrix');
axis.xaxis.set_ticklabels([self._negative_class, self._positive_class])
axis.yaxis.set_ticklabels([self._negative_class, self._positive_class])
plt.tight_layout()
def _get_auc_curve_dataframe(self) -> pd.DataFrame:
"""
Returns a dataframe containing the AUC line (i.e. a column of score thresholds, and the corresponding
True Positive and False Positive Rate (as columns) for the corresponding score threshold.
(A score threshold is the value for which you would predict a positive label if the value of the score
is above the threshold (e.g. usually 0.5).
"""
def get_true_pos_false_pos(threshold):
temp_eval = TwoClassEvaluator(actual_values=self._actual_values,
predicted_scores=self._predicted_scores,
score_threshold=threshold)
return threshold, temp_eval.true_positive_rate, temp_eval.false_positive_rate
auc_curve = [get_true_pos_false_pos(threshold=x) for x in np.arange(0.0, 1.01, 0.01)]
auc_curve = pd.DataFrame(auc_curve,
columns=['threshold', 'True Positive Rate', 'False Positive Rate'])
return auc_curve
def _get_threshold_curve_dataframe(self, score_threshold_range: Tuple[float, float] = (0.1, 0.9)) \
-> pd.DataFrame:
"""
Returns a dataframe containing various score thresholds from 0 to 1 (i.e. cutoff point where score
will be labeled as a 'positive' event, and various rates (e.g. True Positive Rate, False Positive
Rate, etc.) for the corresponding score threshold.
(A score threshold is the value for which you would predict a positive label if the value of the score
is above the threshold (e.g. usually 0.5).
Args:
score_threshold_range:
range of score thresholds to plot (x-axis); tuple with minimum threshold in first index and
maximum threshold in second index.
"""
def get_threshold_scores(threshold):
temp_eval = TwoClassEvaluator(actual_values=self._actual_values,
predicted_scores=self._predicted_scores,
score_threshold=threshold)
return threshold,\
temp_eval.true_positive_rate,\
temp_eval.false_positive_rate,\
temp_eval.positive_predictive_value,\
temp_eval.false_negative_rate,\
temp_eval.true_negative_rate
threshold_curves = [get_threshold_scores(threshold=x)
for x in np.arange(score_threshold_range[0],
score_threshold_range[1] + 0.025,
0.025)]
threshold_curves = pd.DataFrame(threshold_curves,
columns=['Score Threshold',
'True Pos. Rate (Recall)',
'False Pos. Rate',
'Pos. Predictive Value (Precision)',
'False Neg. Rate',
'True Neg. Rate (Specificity)'])
return threshold_curves
def plot_auc_curve(self,
figure_size: tuple = STANDARD_WIDTH_HEIGHT,
return_plotly: bool = False) -> Union[None,
_figure.Figure]:
"""Plots the ROC AUC
Args:
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
return_plotly:
If True, return plotly object. Otherwise, use matplotlib and end function with call:
`plt.tight_layout()`
"""
plt.figure(figsize=figure_size)
auc_curve = self._get_auc_curve_dataframe()
if return_plotly:
fig = px.line(
data_frame=auc_curve,
x='False Positive Rate',
y='True Positive Rate',
color_discrete_sequence=[hcolor.Colors.DOVE_GRAY.value],
height=550,
width=550 * GOLDEN_RATIO,
title=f"AUC: {self.auc:.3f}<br><sub>The threshold of 0.5 is indicated with a large point.</sub>" # pylint: disable=line-too-long # noqa
)
fig.add_trace(
px.scatter(
data_frame=auc_curve,
x='False Positive Rate',
y='True Positive Rate',
color='threshold',
).data[0]
)
fig.add_trace(
px.scatter(
data_frame=auc_curve.query('threshold == 0.5'),
x='False Positive Rate',
y='True Positive Rate',
size=[2],
).data[0]
)
return fig
axis = sns.lineplot(data=auc_curve, x='False Positive Rate', y='True Positive Rate', ci=None)
axis.set_title(f"AUC: {round(self.auc, 3)}")
for i, (x, y, s) in enumerate(zip(auc_curve['False Positive Rate'], # pylint: disable=invalid-name
auc_curve['True Positive Rate'],
auc_curve['threshold'])):
if i % 5 == 0:
axis.text(x, y, f'{s:.3}')
axis.set_xticks(np.arange(0, 1.1, .1))
axis.set_yticks(np.arange(0, 1.1, .1))
plt.grid()
plt.tight_layout()
def plot_threshold_curves(self,
score_threshold_range: Tuple[float, float] = (0.1, 0.9),
figure_size: tuple = STANDARD_WIDTH_HEIGHT,
return_plotly: bool = False) -> Union[None,
_figure.Figure]:
"""Plots various scores (e.g. True Positive Rate, False Positive Rate, etc.) for various score
thresholds. (A score threshold is the value for which you would predict a positive label if the
value of the score is above the threshold (e.g. usually 0.5).
Args:
score_threshold_range:
range of score thresholds to plot (x-axis); tuple with minimum threshold in first index and
maximum threshold in second index.
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
return_plotly:
If True, return plotly object. Otherwise, use matplotlib and end function with call:
`plt.tight_layout()`
"""
plt.figure(figsize=figure_size)
threshold_curves = self._get_threshold_curve_dataframe(score_threshold_range=score_threshold_range)
if return_plotly:
custom_colors = [
hcolor.Colors.PASTEL_BLUE.value,
hcolor.Colors.CUSTOM_GREEN.value,
hcolor.Colors.YELLOW_PEPPER.value,
hcolor.Colors.CRAIL.value,
hcolor.Colors.CADMIUM_ORANGE.value,
]
fig = px.line(
data_frame=pd.melt(frame=threshold_curves, id_vars=['Score Threshold']),
x='Score Threshold',
y='value',
color='variable',
color_discrete_sequence=custom_colors,
labels={
'variable': 'Rate Type',
'value': 'Rate'
},
height=550,
width=550 * GOLDEN_RATIO,
title="Tradeoffs Across Various Score Thresholds<br><sub>Black line is default threshold of 0.5.</sub>" # pylint: disable=line-too-long # noqa
)
fig = fig.add_vline(x=0.5, line_color=hcolor.Colors.BLACK_SHADOW.value)
return fig
axis = sns.lineplot(x='Score Threshold', y='value', hue='variable',
data=pd.melt(frame=threshold_curves, id_vars=['Score Threshold']))
axis.set_xticks(np.arange(score_threshold_range[0], score_threshold_range[1] + 0.1, 0.1))
axis.set_yticks(np.arange(0, 1.1, .1))
plt.vlines(x=self.score_threshold, ymin=0, ymax=1, colors='black')
plt.grid()
plt.tight_layout()
def plot_precision_recall_tradeoff(self,
score_threshold_range: Tuple[float, float] = (0.1, 0.9),
figure_size: tuple = STANDARD_WIDTH_HEIGHT,
return_plotly: bool = False) -> Union[None,
_figure.Figure]:
"""Plots the tradeoff between precision (i.e. positive predict value) and recall (i.e. True Positive
Rate) for various score thresholds. (A score threshold is the value for which you would predict a
positive label if the value of the score is above the threshold (e.g. usually 0.5).
Args:
score_threshold_range:
range of score thresholds to plot (x-axis); tuple with minimum threshold in first index and
maximum threshold in second index.
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
return_plotly:
If True, return plotly object. Otherwise, use matplotlib and end function with call:
`plt.tight_layout()`
"""
plt.figure(figsize=figure_size)
threshold_curves = self._get_threshold_curve_dataframe(score_threshold_range=score_threshold_range)
threshold_curves = threshold_curves[['Score Threshold',
'True Pos. Rate (Recall)',
'Pos. Predictive Value (Precision)']]
if return_plotly:
custom_colors = [
hcolor.Colors.PASTEL_BLUE.value,
# hcolor.Colors.CUSTOM_GREEN.value,
hcolor.Colors.YELLOW_PEPPER.value,
# hcolor.Colors.CRAIL.value,
# hcolor.Colors.CADMIUM_ORANGE.value,
]
fig = px.line(
data_frame=pd.melt(frame=threshold_curves[['Score Threshold',
'True Pos. Rate (Recall)',
'Pos. Predictive Value (Precision)']],
id_vars=['Score Threshold']),
x='Score Threshold',
y='value',
color='variable',
color_discrete_sequence=custom_colors,
labels={
'variable': 'Rate',
'value': 'Value'
},
height=550,
width=550 * GOLDEN_RATIO,
title="Precision Recall Tradeoff<br><sub>Black line is default threshold of 0.5.</sub>"
)
fig = fig.add_vline(x=0.5, line_color=hcolor.Colors.BLACK_SHADOW.value)
return fig
axis = sns.lineplot(x='Score Threshold', y='value', hue='variable',
data=pd.melt(frame=threshold_curves, id_vars=['Score Threshold']))
axis.set_xticks(np.arange(score_threshold_range[0], score_threshold_range[1] + 0.1, 0.1))
axis.set_yticks(np.arange(0, 1.1, .1))
plt.vlines(x=self.score_threshold, ymin=0, ymax=1, colors='black')
plt.grid()
plt.tight_layout()
def calculate_lift_gain(self,
num_buckets: int = 20,
return_style: bool = False,
include_all_info: bool = False) -> Union[pd.DataFrame, Styler]:
"""
https://www.listendata.com/2014/08/excel-template-gain-and-lift-charts.html
Gain is the % of positive (actual) events we have 'captured' i.e. located by looking at the
top x% of predicted scores, such that the highest scores are looked at first.
For example, if the percentile is `5%` and the gain value is `0.3`, we can say that if we randomly
searched `5%` of the data, we would expect to uncover about `5%` of the positive events;
however, we have uncovered 30% of events by searching the highest 5% of scores.
Lift is simply the ratio of the percent of events that what we have uncovered for a given percentile
of data (i.e. gain) divided by what we would have expected by random chance (i.e. the percentile).
So in the previous example, we uncovered 30% of positive events by searching the top 5% of scores;
whereas if we took a random sample, we would have only expected to find 5% of the positive events.
The lift is `0.3 / 0.05` which is `6`; meaning we found `6` times the amount of positive events by
searching the top 5% of scores, than if we were to randomly sample the data.
"""
data = pd.DataFrame({
'predicted_scores': self._predicted_scores,
'actual_values': self._actual_values,
})
data.sort_values(['predicted_scores'], ascending=False, inplace=True)
# .qcut gets percentiles
bins = pd.qcut(x=data['predicted_scores'],
q=num_buckets,
labels=list(range(100, 0, round(-100 / num_buckets))))
data['Percentile'] = bins
def gain_grouping(group):
results = {
'# of Obs.': len(group.actual_values),
'# of Pos. Events': sum(group.actual_values == 1)
}
return pd.Series(results, index=['# of Obs.', '# of Pos. Events'])
gain_lift_data = data.groupby('Percentile').apply(gain_grouping)
temp = pd.DataFrame({'# of Obs.': 0, '# of Pos. Events': 0}, index=[0])
temp.index.names = ['Percentile']
gain_lift_data = pd.concat([gain_lift_data, temp])
gain_lift_data.sort_index(ascending=True, inplace=True)
gain_lift_data['Cumul. Pos. Events'] = gain_lift_data['# of Pos. Events'].cumsum()
gain_lift_data['Gain'] = gain_lift_data['Cumul. Pos. Events'] / self._actual_positives
gain_lift_data = gain_lift_data.loc[~(gain_lift_data.index == 0), :]
gain_lift_data['Lift'] = gain_lift_data['Gain'] / (gain_lift_data.index.values / 100)
if not include_all_info:
gain_lift_data = gain_lift_data[['Gain', 'Lift']]
gain_lift_data = gain_lift_data.round(2)
if return_style:
gain_lift_data = gain_lift_data.style
gain_lift_data.format(precision=2). \
bar(subset='Gain', color=hcolor.Colors.PASTEL_BLUE.value,
vmin=0, vmax=1). \
bar(subset='Lift', color=hcolor.Colors.PASTEL_BLUE.value)
return gain_lift_data
def plot_predicted_scores_histogram(self):
sns.histplot(self._predicted_scores)
plt.tight_layout()
def plot_actual_vs_predict_histogram(self):
actual_categories = pd.Series(self._actual_values).\
replace({0: self._negative_class, 1: self._positive_class})
axes = sns.displot(
pd.DataFrame({
'Predicted Score': self._predicted_scores,
'Actual Value': actual_categories
}),
x='Predicted Score',
col='Actual Value'
)
for axis in axes.axes.flat:
axis.axvline(x=0.5, ymin=0, ymax=100, color='red')
plt.tight_layout()
class RegressionEvaluator:
"""
Evaluates models for regression (i.e. numeric outcome) problems.
"""
def __init__(self,
actual_values: np.ndarray,
predicted_values: np.ndarray):
"""
Args:
actual_values:
the actual values
predicted_values:
the predicted values
"""
assert len(actual_values) == len(predicted_values)
self._actual_values = actual_values
self._predicted_values = predicted_values
self._residuals = actual_values - predicted_values
self._standard_deviation = np.std(actual_values)
self._mean_squared_error = float(np.mean(np.square(actual_values - predicted_values)))
self._mean_absolute_error = float(np.mean(np.abs(actual_values - predicted_values)))
self._r_squared = r2_score(y_true=actual_values, y_pred=predicted_values)
@property
def mean_absolute_error(self) -> float:
"""Mean Absolute Error"""
return self._mean_absolute_error
@property
def mean_squared_error(self) -> float:
"""Mean Squared Error"""
return self._mean_squared_error
@property
def root_mean_squared_error(self) -> float:
"""Root Mean Squared Error"""
return np.sqrt(self.mean_squared_error)
@property
def rmse_to_st_dev(self) -> float:
"""The ratio of RMSE to the standard deviation of the actual values.
Gives an indication of how large the errors are to the actual data.
"""
return self.root_mean_squared_error / self._standard_deviation
@property
def r_squared(self) -> float:
"""R Squared"""
return self._r_squared
@property
def total_observations(self):
"""The total number of observations i.e. sample size."""
return len(self._actual_values)
@property
def all_metrics(self) -> dict:
"""Returns a dictionary of the most common error metrics for regression problems."""
return {'Mean Absolute Error (MAE)': self.mean_absolute_error,
'Root Mean Squared Error (RMSE)': self.root_mean_squared_error,
'RMSE to Standard Deviation of Target': self.rmse_to_st_dev,
'R Squared': self.r_squared,
'Total Observations': self.total_observations}
def all_metrics_df(self,
dummy_regressor_strategy: Union[str, list, None] = 'mean',
dummy_regressor_constant: Union[int] = 1,
return_style: bool = False,
round_by: Optional[int] = None) -> Union[pd.DataFrame, Styler]:
"""All of the metrics are returned as a DataFrame.
Args:
dummy_regressor_strategy:
if not None, then returns column(s) corresponding to the scores from predictions of
sklearn.dummy.DummyRegressor, based on the strategy (or strategies) provided. Valid values
correspond to values of `strategy` parameter listed
https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyRegressor.html
If a list is passed in (e.g. ['prior', 'uniform'], then one score column per value is
added.
If None is passed, then no additional columns are added.
dummy_regressor_constant:
The explicit constant as predicted by the “constant” strategy for the
DummyRegressor.
This parameter is useful only for the “constant” dummy_regressor_strategy.
return_style:
if True, return styler object; else return dataframe
round_by:
the number of digits to round by; if None, then don't round
"""
result = pd.DataFrame.from_dict(self.all_metrics, orient='index', columns=['Score'])
score_columns = ['Score']
if dummy_regressor_strategy:
if isinstance(dummy_regressor_strategy, str):
dummy_regressor_strategy = [dummy_regressor_strategy]
for strategy in dummy_regressor_strategy:
dummy = DummyRegressor(strategy=strategy, constant=dummy_regressor_constant)
# https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
# "All strategies make predictions that ignore the input feature values passed as the X
# argument to fit and predict. The predictions, however, typically depend on values observed
# in the y parameter passed to fit."
_ = dummy.fit(X=self._actual_values, y=self._actual_values)
dummy_predictions = dummy.predict(X=self._actual_values)
dummy_evaluator = RegressionEvaluator(actual_values=self._actual_values,
predicted_values=dummy_predictions)
dummy_scores = dummy_evaluator.all_metrics_df(dummy_regressor_strategy=None,
return_style=False)
column_name = f"Dummy ({strategy})"
score_columns = score_columns + [column_name]
dummy_scores = dummy_scores.rename(columns={'Score': column_name})
result = pd.concat([result, dummy_scores], axis=1)
if round_by is not None:
result.iloc[0:2] = result.iloc[0:2].round(round_by)
if return_style:
subset_scores = pd.IndexSlice[result.loc[['Mean Absolute Error (MAE)',
'Root Mean Squared Error (RMSE)'],
score_columns].index,
score_columns]
subset_secondary = pd.IndexSlice[result.loc[['RMSE to Standard Deviation of Target',
'R Squared'],
score_columns].index, score_columns]
subset_total_observations = pd.IndexSlice[result.loc[['Total Observations'],
score_columns].index, score_columns]
result = result.style
if round_by is not None:
result = result.format(subset=subset_scores, thousands=',', precision=round_by)
else:
result = result.format(subset=subset_scores, thousands=',')
result = result.format(subset=subset_secondary, precision=3)
result = result.format(subset=subset_total_observations, thousands=',', precision=0)
return result
def plot_residuals_vs_fits(self, figure_size: tuple = STANDARD_WIDTH_HEIGHT):
"""Plots residuals vs fitted values
Args:
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
"""
lowess = sm.nonparametric.lowess
loess_points = lowess(self._residuals, self._predicted_values)
loess_x, loess_y = zip(*loess_points)
plt.figure(figsize=figure_size)
plt.plot(loess_x, loess_y, color='r')
plt.scatter(x=self._predicted_values, y=self._residuals, s=8, alpha=0.5)
plt.title('Residuals vs. Fitted Values')
plt.xlabel('Fitted Values')
plt.ylabel('Residuals (Actual - Predicted)')
def plot_predictions_vs_actuals(self, figure_size: tuple = STANDARD_WIDTH_HEIGHT):
"""Plots predictions vs actual values
Args:
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
"""
lowess = sm.nonparametric.lowess
loess_points = lowess(self._predicted_values, self._actual_values)
loess_x, loess_y = zip(*loess_points)
plt.figure(figsize=figure_size)
plt.plot(loess_x, loess_y, color='r', alpha=0.5, label='Loess (Predictions vs Actuals)')
plt.plot(self._actual_values, self._actual_values, color='b', alpha=0.5, label='Perfect Prediction')
plt.scatter(x=self._actual_values, y=self._predicted_values, s=8, alpha=0.5)
plt.title('Predicted Values vs. Actual Values')
plt.xlabel('Actuals')
plt.ylabel('Predicted')
axis = plt.gca()
handles, labels = axis.get_legend_handles_labels()
axis.legend(handles, labels)
plt.figtext(0.99, 0.01,
'Note: observations above blue line mean model is over-predicting; below means under-'
'predicting.', # noqa
horizontalalignment='right')
return axis
def plot_residuals_vs_actuals(self, figure_size: tuple = STANDARD_WIDTH_HEIGHT):
"""Plots residuals vs actuals values
Args:
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
"""
lowess = sm.nonparametric.lowess
loess_points = lowess(self._residuals, self._actual_values)
loess_x, loess_y = zip(*loess_points)
plt.figure(figsize=figure_size)
plt.plot(loess_x, loess_y, color='r')
plt.scatter(x=self._actual_values, y=self._residuals, s=8, alpha=0.5)
plt.title('Residuals vs. Actual Values')
plt.xlabel('Actual')
plt.ylabel('Residuals (Actual - Predicted)')
plt.figtext(0.99, 0.01,
'Note: Actual > Predicted => Under-predicting (positive residual); negative residuals '
'mean over-predicting', # noqa
horizontalalignment='right')
class TwoClassModelComparison:
"""This class compares multiple models trained on Two Class (i.e. 0's/1's) prediction scenarios."""
def __init__(self,
actual_values: np.ndarray,
predicted_scores: Dict[str, np.ndarray],
positive_class: str = 'Positive Class',
negative_class: str = 'Negative Class',
score_threshold: float = 0.5
):
"""
Args:
actual_values:
array of 0's and 1's
predicted_scores:
dictionary per model with key as the name of the model and value that is an array of
decimal/float values from `predict_proba()`; NOT the actual class
positive_class:
string of the name/label of the positive class (i.e. value of 1). In other words, not
'positive' in the sense of 'good' but 'positive' as in 'True/False Positive'.
negative_class:
string of the name/label of the negative class (i.e. value of 0). In other words, not
'negative' in the sense of 'good' but 'negative' as in 'True/False Negative'.
score_threshold:
the score/probability threshold for turning scores into 0's and 1's and corresponding labels
"""
assert isinstance(predicted_scores, dict)
for values in predicted_scores.values():
assert len(actual_values) == len(values)
self._positive_class = positive_class
self._negative_class = negative_class
self._actual_values = actual_values
self._predicted_scores = predicted_scores
self.score_threshold = score_threshold
self._evaluators = {key: TwoClassEvaluator(actual_values=actual_values,
predicted_scores=value,
positive_class=positive_class,
negative_class=negative_class,
score_threshold=score_threshold)
for key, value in predicted_scores.items()}
def all_metrics_df(self,
dummy_classifier_strategy: Union[str, list, None] = 'prior',
dummy_classifier_constant: Union[int] = 1,
return_style: bool = False,
round_by: Optional[int] = None) -> Union[pd.DataFrame, Styler]:
"""All of the metrics are returned as a DataFrame.
Args:
dummy_classifier_strategy:
if not None, then returns column(s) corresponding to the scores from predictions of
sklearn.dummy.DummyClassifier, based on the strategy (or strategies) provided. Valid values
correspond to values of `strategy` parameter listed
https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
If a list is passed in (e.g. ['prior', 'uniform'], then one score column per value is
added.
If None is passed, then no additional columns are added.
dummy_classifier_constant:
The explicit constant as predicted by the “constant” strategy for the
DummyClassifier.
This parameter is useful only for the “constant” dummy_classifier_strategy.
return_style:
if True, return styler object; else return dataframe
round_by:
the number of digits to round by; if None, then don't round
"""
result = None
last_key = list(self._evaluators.keys())[-1]
for key, value in self._evaluators.items():
dummy_strategy = dummy_classifier_strategy if key == last_key else None
scores = value.all_metrics_df(
return_explanations=False,
dummy_classifier_strategy=dummy_strategy,
dummy_classifier_constant=dummy_classifier_constant
)
scores = scores.rename(columns={'Score': key})
result = pd.concat([result, scores], axis=1)
result = result.loc[[
'AUC', 'F1 Score',
'True Positive Rate', 'True Negative Rate',
'False Positive Rate', 'False Negative Rate',
'Positive Predictive Value', 'Negative Predictive Value'
]]
result = result.transpose()
if round_by:
for column in result.columns:
result[column] = result[column].round(round_by)
if return_style:
positive_scores = [x for x in result.columns if not x.startswith('False')]
negative_scores = [x for x in result.columns if x.startswith('False')]
result = result.style
if round_by:
result = result.format(precision=round_by)
result = result. \
bar(subset=positive_scores, color=hcolor.Colors.PIGMENT_GREEN.value, vmin=0, vmax=1). \
bar(subset=negative_scores, color=hcolor.Colors.POPPY.value, vmin=0, vmax=1)
return result
def plot_metrics_comparison(self,
dummy_classifier_strategy: Union[str, list, None] = 'prior',
dummy_classifier_constant: Union[int] = 1,
) -> _figure.Figure:
"""
Returns a Plotly object of a bar-chart of the metrics across all of the models.
Args:
dummy_classifier_strategy:
if not None, then returns column(s) corresponding to the scores from predictions of
sklearn.dummy.DummyClassifier, based on the strategy (or strategies) provided. Valid values
correspond to values of `strategy` parameter listed
https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
If a list is passed in (e.g. ['prior', 'uniform'], then one score column per value is
added.
If None is passed, then no additional columns are added.
dummy_classifier_constant:
The explicit constant as predicted by the “constant” strategy for the
DummyClassifier.
This parameter is useful only for the “constant” dummy_classifier_strategy.
"""
score_df = self.all_metrics_df(
dummy_classifier_strategy=dummy_classifier_strategy,
dummy_classifier_constant=dummy_classifier_constant
).transpose()
score_df = score_df.reset_index()
colors = [e.value for e in hcolor.Colors]
fig = px.bar(
data_frame=score_df.melt(id_vars='index'),
y='variable',
x='value',
facet_col='index',
facet_col_wrap=2,
color='variable',
color_discrete_sequence=colors,
barmode='group',
height=1000,
labels={'index': 'Score'},
title="Model Comparison"
)
fig.update_layout(showlegend=False)
fig.update_yaxes(title=None)
return fig
def plot_roc_curves(self) -> _figure.Figure:
"""Returns a plotly object representing the ROC curves across all models."""
result = None
for key, value in self._evaluators.items():
auc_df = value._get_auc_curve_dataframe() # noqa
auc_df['Model'] = key
result = pd.concat([result, auc_df], axis=0)
colors = [e.value for e in hcolor.Colors]
fig = px.line(
data_frame=result,
x='False Positive Rate',
y='True Positive Rate',
color='Model',
color_discrete_sequence=colors,
height=550,
width=550 * GOLDEN_RATIO,
custom_data=['threshold', 'Model'],
title="ROC Curve of Models",
)
for index in range(len(self._evaluators)):
scatter_1 = px.scatter(
data_frame=result,
x='False Positive Rate',
y='True Positive Rate',
color='Model',
color_discrete_sequence=colors,
custom_data=['threshold', 'Model'],
)
scatter_1.data[index]['showlegend'] = False
fig.add_trace(
scatter_1.data[index]
)
query = f"threshold == 0.5 & Model == '{list(self._evaluators.keys())[index]}'"
scatter_2 = px.scatter(
data_frame=result.query(query),
x='False Positive Rate',
y='True Positive Rate',
color='Model',
color_discrete_sequence=[colors[index]] + colors,
custom_data=['threshold', 'Model'],
size=[2],
)
scatter_2.data[0]['showlegend'] = False
fig.add_trace(
scatter_2.data[0],
)
fig.update_traces(
hovertemplate="<br>".join([
"Model: %{customdata[1]}<br><br>"
"False Positive Rate: %{x}",
"True Positive Rate: %{y}",
"Threshold: %{customdata[0]}",
])
)
return fig
| """This module contains helper functions when working with sklearn (scikit-learn) objects;
in particular, for evaluating models"""
# pylint: disable=too-many-lines
import math
import warnings
from re import match
from typing import Tuple, Union, Optional, List, Dict
import numpy as np
import pandas as pd
import scipy.stats as st
import seaborn as sns
from plotly.graph_objs import _figure # noqa
import plotly.express as px
import yaml
from matplotlib import pyplot as plt
from pandas.io.formats.style import Styler
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.metrics import confusion_matrix, roc_auc_score, r2_score
from sklearn.model_selection._search import BaseSearchCV # noqa
import helpsk.color as hcolor
import helpsk.pandas_style as hstyle
import helpsk.string as hstring
# pylint: disable=too-many-locals
from helpsk.pandas import get_numeric_columns, get_non_numeric_columns
from helpsk.exceptions import HelpskParamValueError
from helpsk.plot import STANDARD_WIDTH_HEIGHT, GOLDEN_RATIO
from helpsk.validation import assert_true
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
from statsmodels import api as sm # https://github.com/statsmodels/statsmodels/issues/3814
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
class SearchCVParser:
"""
This class contains the logic to parse and extract information from a BaseSearchCV object (e.g.
GridSearchCV, RandomizedSearchCV, BayesSearchCV)
"""
# pylint: disable=too-many-arguments
def __init__(self,
searcher: BaseSearchCV,
higher_score_is_better: bool = True,
run_description: str = "",
parameter_name_mappings: Union[dict, None] = None):
"""
This object encapsulates the results from a SearchCV object (e.g.
sklearn.model_selection.GridSearch/RandomSearch, skopt.BayesSearchCV). The results can then be
converted to a dictionary, in a specific format with the intent to write the contents to a
yaml file.
At this time, this function does not capture the individual fold scores from the individual splits.
Params:
searcher:
A `BaseSearchCV` object that has either used a string passed to the `scoring` parameter of the
constructor (e.g. `GridSearchCV(..., scoring='auc', ...)` or a dictionary with metric
names as keys and callables as values.
An example of the dictionary option:
scores = {
'ROC/AUC': SCORERS['roc_auc'],
'F1': make_scorer(f1_score, greater_is_better=True),
'Pos. Pred. Val': make_scorer(precision_score, greater_is_better=True),
'True Pos. Rate': make_scorer(recall_score, greater_is_better=True),
}
grid_search = GridSearchCV(..., scoring=scores, ...)
higher_score_is_better:
If True, higher scores are better; if False, lower scores are better.
A value of False assumes that the scores returned from sklearn are negative and will multiple
the values by -1.
run_description:
An optional string to save in the dictionary
parameter_name_mappings:
A dictionary containing the parameter names returned by the searchCV object as keys (which
should correspond to the path of the pipeline(s) corresponding to the parameter) and the new,
friendlier, names that can be displayed in graphs and tables.
For example:
{'model__max_features': 'max_features',
'model__n_estimators': 'n_estimators',
'prep__non_numeric__encoder__transformer': 'encoder',
'prep__numeric__impute__transformer': 'imputer',
'prep__numeric__scaling__transformer': 'scaler'}
"""
if searcher is not None: # check for None in the case that __init__ is being called from `from_dict`
self._cv_dict = SearchCVParser.\
__search_cv_to_dict(searcher=searcher,
higher_score_is_better=higher_score_is_better,
run_description=run_description,
parameter_name_mappings=parameter_name_mappings)
else:
self._cv_dict = None
self._cv_dataframe = None
@classmethod
def from_dict(cls, cv_dict):
"""This method creates a SearchCVParser from the dictionary previously created by
`__search_cv_to_dict()`"""
parser = cls(searcher=None, higher_score_is_better=None, run_description=None, # noqa
parameter_name_mappings=None)
parser._cv_dict = cv_dict
return parser
@classmethod
def from_yaml_file(cls, yaml_file_name):
"""This method creates a SearchCVParser from a yaml file created by `to_yaml_file()`"""
with open(yaml_file_name, 'r') as file:
cv_dict = yaml.safe_load(file)
return SearchCVParser.from_dict(cv_dict=cv_dict)
def to_yaml_file(self, yaml_file_name: str):
"""This method saves the self._cv_dict dictionary to a yaml file."""
with open(yaml_file_name, 'w') as file:
yaml.dump(self._cv_dict, file, default_flow_style=False, sort_keys=False)
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
@staticmethod
def __search_cv_to_dict(searcher: BaseSearchCV,
higher_score_is_better: bool = True,
run_description: str = "",
parameter_name_mappings: Union[dict, None] = None) -> dict:
"""This extracts the information from a BaseSearchCV object and converts it to a dictionary."""
def string_if_not_number(obj):
if isinstance(obj, (int, float, complex)):
return obj
return str(obj)
cv_results_dict = {
'description': run_description,
'cross_validation_type': str(type(searcher)),
'higher_score_is_better': higher_score_is_better
}
if isinstance(searcher.scoring, dict):
score_names = list(searcher.scoring.keys())
elif isinstance(searcher.scoring, str):
score_names = [searcher.scoring]
else:
message = 'The `searcher` does not have a string or dictionary .scoring property. Cannot ' \
'extract scores.'
raise HelpskParamValueError(message)
# get number of splits (e.g. 5 fold 2 repeat cross validation has 10 splits)
# I could check the .cv param of the searcher object but not sure all types of cv objects have the
# same parameters e.g. searcher.cv.n_repeats
# if there is only 1 score, we need to look for e.g. "split0_test_score"
# if there are multiple scores we need to look for e.g. "split0_test_ROC/AUC" but we don't want
# to duplicate the counts e.g. we don't want to also capture "split0_test_True Pos. Rate"
if len(score_names) == 1:
split_score_matching_string = "split\\d_test_score"
else:
split_score_matching_string = "split\\d_test_" + score_names[0]
number_of_splits = len([x for x in searcher.cv_results_.keys()
if bool(match(split_score_matching_string, x))])
cv_results_dict['number_of_splits'] = number_of_splits
cv_results_dict['score_names'] = score_names
cv_results_dict['parameter_names'] = [key for key, value in searcher.cv_results_['params'][0].items()]
if parameter_name_mappings:
for key in parameter_name_mappings.keys():
assert_true(key in cv_results_dict['parameter_names'])
cv_results_dict['parameter_names_mapping'] = parameter_name_mappings
number_of_iterations = len(searcher.cv_results_['mean_fit_time'])
# convert test scores to dictionaries
if len(score_names) == 1:
test_score_ranking = searcher.cv_results_['rank_test_score'].tolist()
test_score_averages = searcher.cv_results_['mean_test_score'].tolist()
test_score_standard_deviations = searcher.cv_results_['std_test_score'].tolist()
assert_true(len(test_score_ranking) == number_of_iterations)
assert_true(len(test_score_averages) == number_of_iterations)
assert_true(len(test_score_standard_deviations) == number_of_iterations)
cv_results_dict['test_score_rankings'] = {score_names[0]: test_score_ranking}
cv_results_dict['test_score_averages'] = {score_names[0]: test_score_averages}
cv_results_dict['test_score_standard_deviations'] = {score_names[0]:
test_score_standard_deviations}
else:
ranking_dict = {}
averages_dict = {}
standard_deviations_dict = {}
for score in score_names:
rankings = searcher.cv_results_['rank_test_' + score].tolist()
averages = searcher.cv_results_['mean_test_' + score].tolist()
standard_deviations = searcher.cv_results_['std_test_' + score].tolist()
assert_true(len(rankings) == number_of_iterations)
assert_true(len(averages) == number_of_iterations)
assert_true(len(standard_deviations) == number_of_iterations)
ranking_dict[score] = rankings
averages_dict[score] = averages
standard_deviations_dict[score] = standard_deviations
cv_results_dict['test_score_rankings'] = ranking_dict
cv_results_dict['test_score_averages'] = averages_dict
cv_results_dict['test_score_standard_deviations'] = standard_deviations_dict
# if higher_score_is_better is False, sklearn will return negative numbers; I want actual values
if not higher_score_is_better:
averages = cv_results_dict['test_score_averages']
for key in averages.keys():
cv_results_dict['test_score_averages'][key] = [-1 * x for x in averages[key]]
# convert training scores to dictionaries, if training scores exists
# i.e. if return_train_score=True for the SearchCV object
if 'mean_train_score' in searcher.cv_results_ or 'mean_train_'+score_names[0] in searcher.cv_results_:
if len(score_names) == 1:
train_score_averages = searcher.cv_results_['mean_train_score'].tolist()
train_score_standard_deviations = searcher.cv_results_['std_train_score'].tolist()
assert_true(len(train_score_averages) == number_of_iterations)
assert_true(len(train_score_standard_deviations) == number_of_iterations)
cv_results_dict['train_score_averages'] = {score_names[0]: train_score_averages}
cv_results_dict['train_score_standard_deviations'] = {score_names[0]:
train_score_standard_deviations}
else:
averages_dict = {}
standard_deviations_dict = {}
for score in score_names:
averages = searcher.cv_results_['mean_train_' + score].tolist()
standard_deviations = searcher.cv_results_['std_train_' + score].tolist()
assert_true(len(averages) == number_of_iterations)
assert_true(len(standard_deviations) == number_of_iterations)
averages_dict[score] = averages
standard_deviations_dict[score] = standard_deviations
cv_results_dict['train_score_averages'] = averages_dict
cv_results_dict['train_score_standard_deviations'] = standard_deviations_dict
# if higher_score_is_better is False, sklearn will return negative numbers; I want actual
# values
if not higher_score_is_better:
averages = cv_results_dict['train_score_averages']
for key in averages.keys():
cv_results_dict['train_score_averages'][key] = [-1 * x for x in averages[key]]
assert_true(len(searcher.cv_results_['params']) == number_of_iterations)
cv_results_dict['parameter_iterations'] = [
{key: string_if_not_number(value) for key, value in searcher.cv_results_['params'][index].items()}
for index in range(len(searcher.cv_results_['params']))
]
fit_time_averages = searcher.cv_results_['mean_fit_time'].tolist()
fit_time_standard_deviations = searcher.cv_results_['std_fit_time'].tolist()
score_time_averages = searcher.cv_results_['mean_score_time'].tolist()
score_time_standard_deviations = searcher.cv_results_['std_score_time'].tolist()
assert_true(len(fit_time_averages) == number_of_iterations)
assert_true(len(fit_time_standard_deviations) == number_of_iterations)
assert_true(len(score_time_averages) == number_of_iterations)
assert_true(len(score_time_standard_deviations) == number_of_iterations)
cv_results_dict['timings'] = {'fit time averages': fit_time_averages,
'fit time standard deviations': fit_time_standard_deviations,
'score time averages': score_time_averages,
'score time standard deviations': score_time_standard_deviations}
return cv_results_dict
def to_dataframe(self, sort_by_score: bool = True):
"""This function converts the score information from the SearchCV object into a pd.DataFrame.
Params:
sort_by_score:
if True, sorts the dataframe starting with the best (primary) score to the worst score.
Secondary scores are not considered.
Returns:
a DataFrame containing score information for each cross-validation iteration. A single row
corresponds to one iteration (i.e. one set of hyper-parameters that were cross-validated).
"""
if self._cv_dataframe is None:
for score_name in self.score_names:
confidence_intervals = st.t.interval(alpha=0.95, # confidence interval
# number_of_splits is sample-size
df=self.number_of_splits - 1, # degrees of freedom
loc=self.test_score_averages[score_name],
scale=self.score_standard_errors(score_name=score_name))
# only give confidence intervals for the primary score
self._cv_dataframe = pd.concat([
self._cv_dataframe,
pd.DataFrame({score_name + " Mean": self.test_score_averages[score_name],
score_name + " 95CI.LO": confidence_intervals[0],
score_name + " 95CI.HI": confidence_intervals[1]})
],
axis=1
)
self._cv_dataframe = pd.concat([self._cv_dataframe,
pd.DataFrame.from_dict(self.parameter_iterations)], # noqa
axis=1)
if self.parameter_names_mapping:
self._cv_dataframe = self._cv_dataframe.rename(columns=self.parameter_names_mapping)
copy = self._cv_dataframe.copy(deep=True)
if sort_by_score:
copy = copy.iloc[self.primary_score_best_indexes]
return copy
def to_formatted_dataframe(self,
round_by: int = 3,
num_rows: int = 50,
primary_score_only: bool = False,
exclude_no_variance_params: bool = True,
return_style: bool = True,
sort_by_score: bool = True) -> Union[pd.DataFrame, Styler]:
"""This function converts the score information from the SearchCV object into a pd.DataFrame or a
Styler object, formatted accordingly.
The Hyper-Parameter columns will be highlighted in blue where the primary
score (i.e. first column) for the iteration (i.e. the row i.e. the combination of hyper-parameters
that were cross validated) is within 1 standard error of the top primary score (i.e. first column
first row).
Args:
round_by:
the number of digits to round by for the score columns (does not round the parameter columns)
num_rows:
the number of rows to return in the resulting DataFrame.
primary_score_only:
if True, then only include the primary score.
exclude_no_variance_params:
if True, exclude columns that only have 1 unique value
return_style:
If True, return Styler object, else return pd.DataFrame
sort_by_score:
if True, sorts the dataframe starting with the best (primary) score to the worst score.
Secondary scores are not considered.
Returns:
Returns either pd.DataFrame or pd.DataFrame.Styler.
"""
cv_dataframe = self.to_dataframe(sort_by_score=sort_by_score)
cv_dataframe = cv_dataframe.head(num_rows)
if exclude_no_variance_params:
columns_to_drop = [x for x in self.parameter_names if len(cv_dataframe[x].unique()) == 1]
cv_dataframe = cv_dataframe.drop(columns=columns_to_drop)
score_columns = list(cv_dataframe.columns[cv_dataframe.columns.str.endswith((' Mean',
' 95CI.LO',
' 95CI.HI'))])
if primary_score_only:
columns_to_drop = [x for x in score_columns if not x.startswith(self.primary_score_name)]
cv_dataframe = cv_dataframe.drop(columns=columns_to_drop)
cv_dataframe = cv_dataframe.round(dict(zip(score_columns, [round_by] * len(score_columns))))
final_columns = cv_dataframe.columns # save for style logic
if return_style:
cv_dataframe = cv_dataframe.style
for score in self.score_names:
mean_key = score + ' Mean'
ci_low_key = score + ' 95CI.LO'
ci_high_key = score + ' 95CI.HI'
if mean_key in final_columns:
cv_dataframe. \
bar(subset=[mean_key], color=hcolor.Colors.PIGMENT_GREEN.value)
if ci_low_key in final_columns:
cv_dataframe. \
bar(subset=[ci_high_key], color=hcolor.GRAY). \
pipe(hstyle.bar_inverse, subset=[ci_low_key], color=hcolor.GRAY)
cv_dataframe.pipe(hstyle.format, round_by=round_by, hide_index=True)
# highlight iterations whose primary score (i.e. first column of `results` dataframe) is within
# 1 standard error of the top primary score (i.e. first column first row).
# pylint: disable=invalid-name, unused-argument
def highlight_cols(s): # noqa
return 'background-color: %s' % hcolor.Colors.PASTEL_BLUE.value
# we might have removed columns (e.g. that don't have any variance) so check that the columns
# were in the final set
columns_to_highlight = [x for x in self.parameter_names if x in final_columns]
cv_dataframe.applymap(highlight_cols,
subset=pd.IndexSlice[self.indexes_within_1_standard_error,
columns_to_highlight])
return cv_dataframe
####
# The following properties expose the highest levels of the underlying dictionary/yaml
####
@property
def description(self):
"""the description passed to `run_description`."""
return self._cv_dict['description']
@property
def higher_score_is_better(self):
"""The value passed to `higher_score_is_better`."""
return self._cv_dict['higher_score_is_better']
@property
def cross_validation_type(self) -> str:
"""The string representation of the SearchCV object."""
return self._cv_dict['cross_validation_type']
@property
def number_of_splits(self) -> int:
"""This is the number of CV folds. For example, a 5-fold 2-repeat CV has 10 splits."""
return self._cv_dict['number_of_splits']
@property
def score_names(self) -> list:
"""Returns a list of the names of the scores"""
return self._cv_dict['score_names']
@property
def parameter_names_original(self) -> list:
"""Returns the original parameter names (i.e. the path generated by the scikit-learn pipelines."""
return self._cv_dict['parameter_names']
@property
def parameter_names(self) -> list:
"""This property returns either the original parameter names if no `parameter_names_mapping` was
provided, or it returns the new parameter names (i.e. the values from `parameter_names_mapping`)."""
if self.parameter_names_mapping:
return list(self.parameter_names_mapping.values())
return self.parameter_names_original
@property
def parameter_names_mapping(self) -> dict:
"""The dictionary passed to `parameter_name_mappings`."""
return self._cv_dict.get('parameter_names_mapping')
@property
def test_score_rankings(self) -> dict:
"""The rankings of each of the test scores, from the searcher.cv_results_ object."""
return self._cv_dict['test_score_rankings']
@property
def test_score_averages(self) -> dict:
"""The test score averages, from the searcher.cv_results_ object."""
return self._cv_dict['test_score_averages']
@property
def test_score_standard_deviations(self) -> dict:
"""The test score standard deviations, from the searcher.cv_results_ object."""
return self._cv_dict['test_score_standard_deviations']
@property
def train_score_averages(self) -> dict:
"""The training score averages, from the searcher.cv_results_ object, if provided."""
return self._cv_dict.get('train_score_averages')
@property
def train_score_standard_deviations(self) -> dict:
"""The training score standard deviations, from the searcher.cv_results_ object, if provided."""
return self._cv_dict.get('train_score_standard_deviations')
@property
def parameter_iterations(self) -> list:
"""The "iterations" i.e. the hyper-parameter combinations in order of execution."""
return self._cv_dict['parameter_iterations']
def iteration_labels(self, order_from_best_to_worst=True) -> List[str]:
"""An iteration is a set of hyper-parameters that were cross validated. The corresponding label for
each iteration is a single string containing all of the hyper-parameter names and values in the format
of `{param1: value1, param2: value2}`.
Params:
order_from_best_to_worst: if True, returns the labels in order from the best score to the worst
score, which should match the ordered of .to_dataframe() or .to_formatted_dataframe()`. If False,
returns the labels in order that they were ran by the cross validation object.
Returns:
a pd.Series the same length as `number_of_trials` containing a str
"""
def create_hyper_param_labels(iteration) -> list:
"""Creates a list of strings that represent the name/value pair for each hyper-parameter."""
return [f"{self.parameter_names_mapping[x] if self.parameter_names_mapping else x}: {iteration[x]}" # pylint: disable=line-too-long # noqa
for x in self.parameter_names_original]
# create_hyper_param_labels(iteration=self.parameter_iterations[0])
def create_trial_label(iteration) -> str:
return f"{{{hstring.collapse(create_hyper_param_labels(iteration), separate=', ')}}}"
# create_trial_label(iteration=self.parameter_iterations[0])
labels = [create_trial_label(x) for x in self.parameter_iterations]
if order_from_best_to_worst:
labels = [x for _, x in sorted(zip(self.primary_score_iteration_ranking, labels))]
return labels
@property
def timings(self) -> dict:
"""The timings providing by searcher.cv_results_."""
return self._cv_dict['timings']
####
# The following properties are additional helpers
####
@property
def number_of_iterations(self) -> int:
""""A single trial contains the cross validation runs for a single set of hyper-parameters. The
'number of trials' is basically the number of combinations of different hyper-parameters that were
cross validated."""
return len(self.parameter_iterations)
@property
def numeric_parameters(self) -> List[str]:
return [x for x in get_numeric_columns(dataframe=self.to_dataframe()) if x in self.parameter_names]
@property
def non_numeric_parameters(self) -> List[str]:
return [x for x in get_non_numeric_columns(dataframe=self.to_dataframe())
if x in self.parameter_names]
@property
def number_of_scores(self) -> int:
"""The number of scores passed to the SearchCV object"""
return len(self.score_names)
@property
def primary_score_name(self) -> str:
"""The first scorer passed to the SearchCV will be treated as the primary score. This property returns
the name of the score."""
return self.score_names[0]
@property
def primary_score_averages(self) -> np.array:
"""The first scorer passed to the SearchCV will be treated as the primary score. This property returns
the average score (across all splits) for each iteration. Note that the average scores are
the weighted averages
https://stackoverflow.com/questions/44947574/what-is-the-meaning-of-mean-test-score-in-cv-result"""
return np.array(self.test_score_averages[self.primary_score_name])
def score_standard_errors(self, score_name: str) -> np.array:
"""The first scorer passed to the SearchCV will be treated as the primary score. This property returns
the standard error associated with the mean score of each iteration, for the primary score."""
score_standard_deviations = self.test_score_standard_deviations[score_name]
return np.array(score_standard_deviations) / math.sqrt(self.number_of_splits)
@property
def primary_score_iteration_ranking(self) -> np.array:
"""The ranking of the corresponding index, in terms of best to worst score.
e.g. [5, 6, 7, 8, 3, 4, 1, 2]
This means that the 6th index/iteration had the highest ranking (1); and that the 3rd index had
the worst ranking (8)
This differs from `primary_score_best_indexes` which returns the order of indexes from best to worst.
So in the example above, the first value returned in the `primary_score_best_indexes` array would be
6 because the best score is at index 6. The last value in the array 3, because the worst score is at
index 3.
Note that `primary_score_iteration_ranking` starts at 1 while primary_score_best_indexes starts at 0.
"""
return np.array(self.test_score_rankings[self.primary_score_name])
@property
def primary_score_best_indexes(self) -> np.array:
"""The indexes of best to worst primary scores. See documentation for
`primary_score_iteration_ranking` to understand the differences between the two properties."""
return np.argsort(self.primary_score_iteration_ranking)
@property
def best_primary_score_index(self) -> int:
"""The index of best primary score."""
return self.primary_score_best_indexes[0]
@property
def best_primary_score_params(self) -> dict:
"""
The "best" score (could be the highest or lowest depending on `higher_score_is_better`) associated
with the primary score.
"""
best_params = self.parameter_iterations[self.best_primary_score_index]
if self.parameter_names_mapping:
best_params = {self.parameter_names_mapping[key]: value for key, value in best_params.items()}
return best_params
@property
def best_primary_score(self) -> float:
"""
The "best" score (could be the highest or lowest depending on `higher_score_is_better`) associated
with the primary score.
"""
return self.primary_score_averages[self.best_primary_score_index]
@property
def best_primary_score_standard_error(self) -> float:
"""The standard error associated with the best score of the primary scorer"""
return self.score_standard_errors(score_name=self.primary_score_name)[self.best_primary_score_index]
@property
def indexes_within_1_standard_error(self) -> list:
"""Returns the iteration indexes where the primary scores (i.e. first scorer
passed to SearchCV object; i.e. first column of the to_dataframe() DataFrame) are within 1 standard
error of the highest primary score."""
cv_dataframe = self.to_dataframe(sort_by_score=True)
if self.higher_score_is_better:
return list(cv_dataframe.index[cv_dataframe.iloc[:, 0] >=
self.best_primary_score - self.best_primary_score_standard_error])
return list(cv_dataframe.index[cv_dataframe.iloc[:, 0] <=
self.best_primary_score + self.best_primary_score_standard_error])
@property
def fit_time_averages(self) -> np.array:
"""
Returns a list of floats; one value for each iteration (i.e. a single set of hyper-params).
Each value is the average number of seconds that the iteration took to fit the model, per split
(i.e. the average fit time of all splits).
"""
return np.array(self.timings['fit time averages'])
@property
def fit_time_standard_deviations(self) -> np.array:
"""
Returns a list of floats; one value for each iteration (i.e. a single set of hyper-params).
Each value is the standard deviation of seconds that the iteration took to fit the model, per split
(i.e. the standard deviation of fit time across all splits).
"""
return np.array(self.timings['fit time standard deviations'])
@property
def score_time_averages(self) -> np.array:
"""
Returns a list of floats; one value for each iteration (i.e. a single set of hyper-params).
Each value is the average number of seconds that the iteration took to score the model, per split
(i.e. the average score time of all splits).
"""
return np.array(self.timings['score time averages'])
@property
def score_time_standard_deviations(self) -> np.array:
"""
Returns a list of floats; one value for each iteration (i.e. a single set of hyper-params).
Each value is the standard deviation of seconds that the iteration took to score the model, per split
(i.e. the standard deviation of score time across all splits).
"""
return np.array(self.timings['score time standard deviations'])
@property
def iteration_fit_times(self) -> np.array:
"""For each iteration, it is the amount of time it took to fit the model.
Calculated by Average fit time for each iteration multiplied by the number of splits per iteration.
self.fit_time_averages * self.number_of_splits
Returns:
array containing the fit time for each iteration
"""
return self.fit_time_averages * self.number_of_splits
@property
def fit_time_total(self) -> float:
"""Total fit time across all iterations."""
return float(np.sum(self.iteration_fit_times))
@property
def iteration_score_times(self) -> np.array:
"""For each iteration, it is the amount of time it took to score the model.
Calculated by Average score time for each iteration multiplied by the number of splits per iteration.
self.score_time_averages * self.number_of_splits
Returns:
array containing the score time for each iteration
"""
return self.score_time_averages * self.number_of_splits
@property
def score_time_total(self) -> float:
"""Total score time across all iterations."""
return float(np.sum(self.iteration_score_times))
@property
def average_time_per_trial(self) -> float:
"""Average time per trial"""
return float(np.mean(self.iteration_fit_times + self.iteration_score_times))
@property
def total_time(self) -> float:
"""Total time it took across all trials"""
return self.fit_time_total + self.score_time_total
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class TwoClassEvaluator:
"""This class calculates various metrics for Two Class (i.e. 0's/1's) prediction scenarios."""
def __init__(self,
actual_values: np.ndarray,
predicted_scores: np.ndarray,
positive_class: str = 'Positive Class',
negative_class: str = 'Negative Class',
score_threshold: float = 0.5
):
"""
Args:
actual_values:
array of 0's and 1's
predicted_scores:
array of decimal/float values from `predict_proba()`; NOT the actual class
positive_class:
string of the name/label of the positive class (i.e. value of 1). In other words, not
'positive' in the sense of 'good' but 'positive' as in 'True/False Positive'.
negative_class:
string of the name/label of the negative class (i.e. value of 0). In other words, not
'negative' in the sense of 'good' but 'negative' as in 'True/False Negative'.
score_threshold:
the score/probability threshold for turning scores into 0's and 1's and corresponding labels
"""
assert len(actual_values) == len(predicted_scores)
if not all(np.unique(actual_values) == [0, 1]):
message = f"Values of `actual_values` should 0 or 1. Found `{np.unique(actual_values)}`"
raise HelpskParamValueError(message)
if not all(np.logical_and(predicted_scores >= 0, predicted_scores <= 1)):
message = "Values of `predicted_scores` should be between 0 and 1."
raise HelpskParamValueError(message)
self._positive_class = positive_class
self._negative_class = negative_class
self._actual_values = actual_values
self._predicted_scores = predicted_scores
self.score_threshold = score_threshold
predicted_values = np.where(predicted_scores > self.score_threshold, 1, 0)
self._confusion_matrix = confusion_matrix(y_true=actual_values, y_pred=predicted_values)
self.sample_size = len(actual_values)
assert self.sample_size == self._confusion_matrix.sum()
true_negatives, false_positives, false_negatives, true_positives = self._confusion_matrix.ravel()
self._actual_positives = true_positives + false_negatives
assert self._actual_positives == sum(self._actual_values == 1)
self._actual_negatives = true_negatives + false_positives
self._true_negatives = true_negatives
self._false_positives = false_positives
self._false_negatives = false_negatives
self._true_positives = true_positives
self.auc = roc_auc_score(y_true=actual_values, y_score=predicted_scores)
@property
def true_positive_rate(self) -> float:
"""True Positive Rate"""
return 0 if self._actual_positives == 0 else self._true_positives / self._actual_positives
@property
def true_negative_rate(self) -> float:
"""True Negative Rate i.e. Specificity"""
return 0 if self._actual_negatives == 0 else self._true_negatives / self._actual_negatives
@property
def false_negative_rate(self) -> float:
"""False Negative Rate"""
return 0 if self._actual_positives == 0 else self._false_negatives / self._actual_positives
@property
def false_positive_rate(self) -> float:
"""False Positive Rate"""
return 0 if self._actual_negatives == 0 else self._false_positives / self._actual_negatives
@property
def accuracy(self) -> Union[float, None]:
"""accuracy"""
return None if self.sample_size == 0 else \
(self._true_negatives + self._true_positives) / self.sample_size
@property
def error_rate(self) -> Union[float, None]:
"""error_rate"""
return None if self.sample_size == 0 else \
(self._false_positives + self._false_negatives) / self.sample_size
@property
def positive_predictive_value(self) -> float:
"""Positive Predictive Value i.e. Precision"""
return 0 if (self._true_positives + self._false_positives) == 0 else \
self._true_positives / (self._true_positives + self._false_positives)
@property
def negative_predictive_value(self) -> float:
"""Negative Predictive Value"""
return 0 if (self._true_negatives + self._false_negatives) == 0 else \
self._true_negatives / (self._true_negatives + self._false_negatives)
@property
def prevalence(self) -> Union[float, None]:
"""Prevalence"""
return None if self.sample_size == 0 else \
self._actual_positives / self.sample_size
@property
def kappa(self) -> Union[float, None]:
"""Kappa"""
if self.sample_size == 0 or \
((self._true_negatives + self._false_negatives) / self.sample_size) == 0:
return None
# proportion of the actual agreements
# add the proportion of all instances where the predicted type and actual type agree
pr_a = (self._true_negatives + self._true_positives) / self.sample_size
# probability of both predicted and actual being negative
p_negative_prediction_and_actual = \
((self._true_negatives + self._false_positives) / self.sample_size) * \
((self._true_negatives + self._false_negatives) / self.sample_size)
# probability of both predicted and actual being positive
p_positive_prediction_and_actual = \
self.prevalence * ((self._false_positives + self._true_positives) / self.sample_size)
# probability that chance alone would lead the predicted and actual values to match, under the
# assumption that both are selected randomly (i.e. implies independence) according to the observed
# proportions (probability of independent events = P(A & B) == P(A) * P(B)
pr_e = p_negative_prediction_and_actual + p_positive_prediction_and_actual
return (pr_a - pr_e) / (1 - pr_e)
@property
def f1_score(self) -> float:
"""F1 Score
https://en.wikipedia.org/wiki/F-score
"""
return self.fbeta_score(beta=1)
def fbeta_score(self, beta: float) -> float:
"""
:param beta: The `beta` parameter determines the weight of precision in the combined score.
`beta < 1` lends more weight to precision (i.e. positive predictive value), while
`beta > 1` favors recall (i.e. true positive rate)
(`beta -> 0` considers only precision, `beta -> inf` only recall).
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.fbeta_score.html
:return:
"""
if self.positive_predictive_value is None or self.sensitivity is None or \
(self.positive_predictive_value + self.sensitivity) == 0:
return 0
return (1 + (beta ** 2)) * (self.positive_predictive_value * self.sensitivity) / \
(((beta ** 2) * self.positive_predictive_value) + self.sensitivity)
@property
def sensitivity(self) -> float:
"""Sensitivity i.e. True Positive Rate"""
return self.true_positive_rate
@property
def specificity(self) -> float:
"""Specificity i.e. True Negative Rate"""
return self.true_negative_rate
@property
def precision(self) -> float:
"""Precision i.e. Positive Predictive Value"""
return self.positive_predictive_value
@property
def recall(self):
"""Recall i.e. True Positive Rate"""
return self.true_positive_rate
@property
def all_metrics(self) -> dict:
"""All of the metrics are returned as a dictionary."""
auc_message = 'Area under the ROC curve (true pos. rate vs false pos. rate); ' \
'ranges from 0.5 (purely random classifier) to 1.0 (perfect classifier)'
tpr_message = f'{self.true_positive_rate:.1%} of positive instances were correctly identified.; ' \
f'i.e. {self._true_positives} "{self._positive_class}" labels were correctly identified ' \
f'out of {self._actual_positives} instances; a.k.a Sensitivity/Recall'
tnr_message = f'{self.true_negative_rate:.1%} of negative instances were correctly identified.; ' \
f'i.e. {self._true_negatives} "{self._negative_class}" labels were correctly identified ' \
f'out of {self._actual_negatives} instances'
fpr_message = f'{self.false_positive_rate:.1%} of negative instances were incorrectly identified ' \
f'as positive; ' \
f'i.e. {self._false_positives} "{self._negative_class}" labels were incorrectly ' \
f'identified as "{self._positive_class}", out of {self._actual_negatives} instances'
fnr_message = f'{self.false_negative_rate:.1%} of positive instances were incorrectly identified ' \
f'as negative; ' \
f'i.e. {self._false_negatives} "{self._positive_class}" labels were incorrectly ' \
f'identified as "{self._negative_class}", out of {self._actual_positives} instances'
ppv_message = f'When the model claims an instance is positive, it is correct ' \
f'{self.positive_predictive_value:.1%} of the time; ' \
f'i.e. out of the {self._true_positives + self._false_positives} times the model ' \
f'predicted "{self._positive_class}", it was correct {self._true_positives} ' \
f'times; a.k.a precision'
npv_message = f'When the model claims an instance is negative, it is correct ' \
f'{self.negative_predictive_value:.1%} of the time; ' \
f'i.e. out of the {self._true_negatives + self._false_negatives} times the model ' \
f'predicted "{self._negative_class}", it was correct {self._true_negatives} times'
f1_message = 'The F1 score can be interpreted as a weighted average of the precision and recall, ' \
'where an F1 score reaches its best value at 1 and worst score at 0.'
accuracy_message = f'{self.accuracy:.1%} of instances were correctly identified'
error_message = f'{self.error_rate:.1%} of instances were incorrectly identified'
prevalence_message = f'{self.prevalence:.1%} of the data are positive; i.e. out of ' \
f'{self.sample_size} total observations; {self._actual_positives} are labeled ' \
f'as "{self._positive_class}"'
total_obs_message = f'There are {self.sample_size} total observations; i.e. sample size'
return {'AUC': (self.auc, auc_message),
'True Positive Rate': (self.true_positive_rate, tpr_message),
'True Negative Rate': (self.true_negative_rate, tnr_message),
'False Positive Rate': (self.false_positive_rate, fpr_message),
'False Negative Rate': (self.false_negative_rate, fnr_message),
'Positive Predictive Value': (self.positive_predictive_value, ppv_message),
'Negative Predictive Value': (self.negative_predictive_value, npv_message),
'F1 Score': (self.f1_score, f1_message),
'Accuracy': (self.accuracy, accuracy_message),
'Error Rate': (self.error_rate, error_message),
'% Positive': (self.prevalence, prevalence_message),
'Total Observations': (self.sample_size, total_obs_message)}
def all_metrics_df(self,
return_explanations: bool = True,
dummy_classifier_strategy: Union[str, list, None] = 'prior',
dummy_classifier_constant: Union[int] = 1,
return_style: bool = False,
round_by: Optional[int] = None) -> Union[pd.DataFrame, Styler]:
"""All of the metrics are returned as a DataFrame.
Args:
return_explanations:
if True, then return descriptions of score and more information in an additional column
dummy_classifier_strategy:
if not None, then returns column(s) corresponding to the scores from predictions of
sklearn.dummy.DummyClassifier, based on the strategy (or strategies) provided. Valid values
correspond to values of `strategy` parameter listed
https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
If a list is passed in (e.g. ['prior', 'uniform'], then one score column per value is
added.
If None is passed, then no additional columns are added.
dummy_classifier_constant:
The explicit constant as predicted by the “constant” strategy for the
DummyClassifier.
This parameter is useful only for the “constant” dummy_classifier_strategy.
return_style:
if True, return styler object; else return dataframe
round_by:
the number of digits to round by; if None, then don't round
"""
result = pd.DataFrame.from_dict({key: value[0] for key, value in self.all_metrics.items()},
orient='index',
columns=['Score'])
score_columns = ['Score']
if dummy_classifier_strategy:
if isinstance(dummy_classifier_strategy, str):
dummy_classifier_strategy = [dummy_classifier_strategy]
for strategy in dummy_classifier_strategy:
dummy = DummyClassifier(strategy=strategy, constant=dummy_classifier_constant)
# https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
# "All strategies make predictions that ignore the input feature values passed as the X
# argument to fit and predict. The predictions, however, typically depend on values observed
# in the y parameter passed to fit."
_ = dummy.fit(X=self._actual_values, y=self._actual_values)
dummy_probabilities = dummy.predict_proba(X=self._actual_values)
dummy_probabilities = dummy_probabilities[:, 1]
dummy_evaluator = TwoClassEvaluator(actual_values=self._actual_values,
predicted_scores=dummy_probabilities,
score_threshold=self.score_threshold)
dummy_scores = dummy_evaluator.all_metrics_df(return_explanations=False,
dummy_classifier_strategy=None,
return_style=False)
column_name = f"Dummy ({strategy})"
score_columns = score_columns + [column_name]
dummy_scores = dummy_scores.rename(columns={'Score': column_name})
result = pd.concat([result, dummy_scores], axis=1)
if return_explanations:
explanations = pd.DataFrame.from_dict({key: value[1] for key, value in self.all_metrics.items()},
orient='index',
columns=['Explanation'])
result = pd.concat([result, explanations], axis=1)
if round_by:
for column in score_columns:
result[column] = result[column].round(round_by)
if return_style:
subset_scores = [x for x in result.index.values if x != 'Total Observations']
subset_scores = pd.IndexSlice[result.loc[subset_scores, :].index, score_columns]
subset_negative_bad = pd.IndexSlice[result.loc[['False Positive Rate',
'False Negative Rate'], score_columns].index,
score_columns]
subset_secondary = pd.IndexSlice[result.loc[['Accuracy', 'Error Rate', '% Positive'],
score_columns].index, score_columns]
subset_total_observations = pd.IndexSlice[result.loc[['Total Observations'],
score_columns].index, score_columns]
result = result.style
if round_by:
result = result.format(precision=round_by)
result = result.format(subset=subset_total_observations,
thousands=',',
precision=0)
result = result. \
bar(subset=subset_scores, color=hcolor.Colors.PIGMENT_GREEN.value, vmin=0, vmax=1). \
bar(subset=subset_negative_bad, color=hcolor.Colors.POPPY.value, vmin=0, vmax=1). \
bar(subset=subset_secondary, color=hcolor.GRAY, vmin=0, vmax=1)
return result
def plot_confusion_matrix(self):
"""Plots a heatmap of the confusion matrix."""
labels = np.array([[f'True Negatives\n{self._true_negatives}\n{self._true_negatives / self.sample_size:.1%}', # pylint: disable=line-too-long # noqa
f'False Positives\n{self._false_positives}\n{self._false_positives / self.sample_size:.1%}'], # pylint: disable=line-too-long # noqa
[f'False Negatives\n{self._false_negatives}\n{self._false_negatives / self.sample_size:.1%}', # pylint: disable=line-too-long # noqa
f'True Positives\n{self._true_positives}\n{self._true_positives / self.sample_size:.1%}']]) # pylint: disable=line-too-long # noqa
axis = plt.subplot()
sns.heatmap(self._confusion_matrix, annot=labels, cmap='Blues', ax=axis, fmt='')
# labels, title and ticks
axis.set_xlabel('Predicted')
axis.set_ylabel('Actual')
# axis.set_title('Confusion Matrix');
axis.xaxis.set_ticklabels([self._negative_class, self._positive_class])
axis.yaxis.set_ticklabels([self._negative_class, self._positive_class])
plt.tight_layout()
def _get_auc_curve_dataframe(self) -> pd.DataFrame:
"""
Returns a dataframe containing the AUC line (i.e. a column of score thresholds, and the corresponding
True Positive and False Positive Rate (as columns) for the corresponding score threshold.
(A score threshold is the value for which you would predict a positive label if the value of the score
is above the threshold (e.g. usually 0.5).
"""
def get_true_pos_false_pos(threshold):
temp_eval = TwoClassEvaluator(actual_values=self._actual_values,
predicted_scores=self._predicted_scores,
score_threshold=threshold)
return threshold, temp_eval.true_positive_rate, temp_eval.false_positive_rate
auc_curve = [get_true_pos_false_pos(threshold=x) for x in np.arange(0.0, 1.01, 0.01)]
auc_curve = pd.DataFrame(auc_curve,
columns=['threshold', 'True Positive Rate', 'False Positive Rate'])
return auc_curve
def _get_threshold_curve_dataframe(self, score_threshold_range: Tuple[float, float] = (0.1, 0.9)) \
-> pd.DataFrame:
"""
Returns a dataframe containing various score thresholds from 0 to 1 (i.e. cutoff point where score
will be labeled as a 'positive' event, and various rates (e.g. True Positive Rate, False Positive
Rate, etc.) for the corresponding score threshold.
(A score threshold is the value for which you would predict a positive label if the value of the score
is above the threshold (e.g. usually 0.5).
Args:
score_threshold_range:
range of score thresholds to plot (x-axis); tuple with minimum threshold in first index and
maximum threshold in second index.
"""
def get_threshold_scores(threshold):
temp_eval = TwoClassEvaluator(actual_values=self._actual_values,
predicted_scores=self._predicted_scores,
score_threshold=threshold)
return threshold,\
temp_eval.true_positive_rate,\
temp_eval.false_positive_rate,\
temp_eval.positive_predictive_value,\
temp_eval.false_negative_rate,\
temp_eval.true_negative_rate
threshold_curves = [get_threshold_scores(threshold=x)
for x in np.arange(score_threshold_range[0],
score_threshold_range[1] + 0.025,
0.025)]
threshold_curves = pd.DataFrame(threshold_curves,
columns=['Score Threshold',
'True Pos. Rate (Recall)',
'False Pos. Rate',
'Pos. Predictive Value (Precision)',
'False Neg. Rate',
'True Neg. Rate (Specificity)'])
return threshold_curves
def plot_auc_curve(self,
figure_size: tuple = STANDARD_WIDTH_HEIGHT,
return_plotly: bool = False) -> Union[None,
_figure.Figure]:
"""Plots the ROC AUC
Args:
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
return_plotly:
If True, return plotly object. Otherwise, use matplotlib and end function with call:
`plt.tight_layout()`
"""
plt.figure(figsize=figure_size)
auc_curve = self._get_auc_curve_dataframe()
if return_plotly:
fig = px.line(
data_frame=auc_curve,
x='False Positive Rate',
y='True Positive Rate',
color_discrete_sequence=[hcolor.Colors.DOVE_GRAY.value],
height=550,
width=550 * GOLDEN_RATIO,
title=f"AUC: {self.auc:.3f}<br><sub>The threshold of 0.5 is indicated with a large point.</sub>" # pylint: disable=line-too-long # noqa
)
fig.add_trace(
px.scatter(
data_frame=auc_curve,
x='False Positive Rate',
y='True Positive Rate',
color='threshold',
).data[0]
)
fig.add_trace(
px.scatter(
data_frame=auc_curve.query('threshold == 0.5'),
x='False Positive Rate',
y='True Positive Rate',
size=[2],
).data[0]
)
return fig
axis = sns.lineplot(data=auc_curve, x='False Positive Rate', y='True Positive Rate', ci=None)
axis.set_title(f"AUC: {round(self.auc, 3)}")
for i, (x, y, s) in enumerate(zip(auc_curve['False Positive Rate'], # pylint: disable=invalid-name
auc_curve['True Positive Rate'],
auc_curve['threshold'])):
if i % 5 == 0:
axis.text(x, y, f'{s:.3}')
axis.set_xticks(np.arange(0, 1.1, .1))
axis.set_yticks(np.arange(0, 1.1, .1))
plt.grid()
plt.tight_layout()
def plot_threshold_curves(self,
score_threshold_range: Tuple[float, float] = (0.1, 0.9),
figure_size: tuple = STANDARD_WIDTH_HEIGHT,
return_plotly: bool = False) -> Union[None,
_figure.Figure]:
"""Plots various scores (e.g. True Positive Rate, False Positive Rate, etc.) for various score
thresholds. (A score threshold is the value for which you would predict a positive label if the
value of the score is above the threshold (e.g. usually 0.5).
Args:
score_threshold_range:
range of score thresholds to plot (x-axis); tuple with minimum threshold in first index and
maximum threshold in second index.
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
return_plotly:
If True, return plotly object. Otherwise, use matplotlib and end function with call:
`plt.tight_layout()`
"""
plt.figure(figsize=figure_size)
threshold_curves = self._get_threshold_curve_dataframe(score_threshold_range=score_threshold_range)
if return_plotly:
custom_colors = [
hcolor.Colors.PASTEL_BLUE.value,
hcolor.Colors.CUSTOM_GREEN.value,
hcolor.Colors.YELLOW_PEPPER.value,
hcolor.Colors.CRAIL.value,
hcolor.Colors.CADMIUM_ORANGE.value,
]
fig = px.line(
data_frame=pd.melt(frame=threshold_curves, id_vars=['Score Threshold']),
x='Score Threshold',
y='value',
color='variable',
color_discrete_sequence=custom_colors,
labels={
'variable': 'Rate Type',
'value': 'Rate'
},
height=550,
width=550 * GOLDEN_RATIO,
title="Tradeoffs Across Various Score Thresholds<br><sub>Black line is default threshold of 0.5.</sub>" # pylint: disable=line-too-long # noqa
)
fig = fig.add_vline(x=0.5, line_color=hcolor.Colors.BLACK_SHADOW.value)
return fig
axis = sns.lineplot(x='Score Threshold', y='value', hue='variable',
data=pd.melt(frame=threshold_curves, id_vars=['Score Threshold']))
axis.set_xticks(np.arange(score_threshold_range[0], score_threshold_range[1] + 0.1, 0.1))
axis.set_yticks(np.arange(0, 1.1, .1))
plt.vlines(x=self.score_threshold, ymin=0, ymax=1, colors='black')
plt.grid()
plt.tight_layout()
def plot_precision_recall_tradeoff(self,
score_threshold_range: Tuple[float, float] = (0.1, 0.9),
figure_size: tuple = STANDARD_WIDTH_HEIGHT,
return_plotly: bool = False) -> Union[None,
_figure.Figure]:
"""Plots the tradeoff between precision (i.e. positive predict value) and recall (i.e. True Positive
Rate) for various score thresholds. (A score threshold is the value for which you would predict a
positive label if the value of the score is above the threshold (e.g. usually 0.5).
Args:
score_threshold_range:
range of score thresholds to plot (x-axis); tuple with minimum threshold in first index and
maximum threshold in second index.
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
return_plotly:
If True, return plotly object. Otherwise, use matplotlib and end function with call:
`plt.tight_layout()`
"""
plt.figure(figsize=figure_size)
threshold_curves = self._get_threshold_curve_dataframe(score_threshold_range=score_threshold_range)
threshold_curves = threshold_curves[['Score Threshold',
'True Pos. Rate (Recall)',
'Pos. Predictive Value (Precision)']]
if return_plotly:
custom_colors = [
hcolor.Colors.PASTEL_BLUE.value,
# hcolor.Colors.CUSTOM_GREEN.value,
hcolor.Colors.YELLOW_PEPPER.value,
# hcolor.Colors.CRAIL.value,
# hcolor.Colors.CADMIUM_ORANGE.value,
]
fig = px.line(
data_frame=pd.melt(frame=threshold_curves[['Score Threshold',
'True Pos. Rate (Recall)',
'Pos. Predictive Value (Precision)']],
id_vars=['Score Threshold']),
x='Score Threshold',
y='value',
color='variable',
color_discrete_sequence=custom_colors,
labels={
'variable': 'Rate',
'value': 'Value'
},
height=550,
width=550 * GOLDEN_RATIO,
title="Precision Recall Tradeoff<br><sub>Black line is default threshold of 0.5.</sub>"
)
fig = fig.add_vline(x=0.5, line_color=hcolor.Colors.BLACK_SHADOW.value)
return fig
axis = sns.lineplot(x='Score Threshold', y='value', hue='variable',
data=pd.melt(frame=threshold_curves, id_vars=['Score Threshold']))
axis.set_xticks(np.arange(score_threshold_range[0], score_threshold_range[1] + 0.1, 0.1))
axis.set_yticks(np.arange(0, 1.1, .1))
plt.vlines(x=self.score_threshold, ymin=0, ymax=1, colors='black')
plt.grid()
plt.tight_layout()
def calculate_lift_gain(self,
num_buckets: int = 20,
return_style: bool = False,
include_all_info: bool = False) -> Union[pd.DataFrame, Styler]:
"""
https://www.listendata.com/2014/08/excel-template-gain-and-lift-charts.html
Gain is the % of positive (actual) events we have 'captured' i.e. located by looking at the
top x% of predicted scores, such that the highest scores are looked at first.
For example, if the percentile is `5%` and the gain value is `0.3`, we can say that if we randomly
searched `5%` of the data, we would expect to uncover about `5%` of the positive events;
however, we have uncovered 30% of events by searching the highest 5% of scores.
Lift is simply the ratio of the percent of events that what we have uncovered for a given percentile
of data (i.e. gain) divided by what we would have expected by random chance (i.e. the percentile).
So in the previous example, we uncovered 30% of positive events by searching the top 5% of scores;
whereas if we took a random sample, we would have only expected to find 5% of the positive events.
The lift is `0.3 / 0.05` which is `6`; meaning we found `6` times the amount of positive events by
searching the top 5% of scores, than if we were to randomly sample the data.
"""
data = pd.DataFrame({
'predicted_scores': self._predicted_scores,
'actual_values': self._actual_values,
})
data.sort_values(['predicted_scores'], ascending=False, inplace=True)
# .qcut gets percentiles
bins = pd.qcut(x=data['predicted_scores'],
q=num_buckets,
labels=list(range(100, 0, round(-100 / num_buckets))))
data['Percentile'] = bins
def gain_grouping(group):
results = {
'# of Obs.': len(group.actual_values),
'# of Pos. Events': sum(group.actual_values == 1)
}
return pd.Series(results, index=['# of Obs.', '# of Pos. Events'])
gain_lift_data = data.groupby('Percentile').apply(gain_grouping)
temp = pd.DataFrame({'# of Obs.': 0, '# of Pos. Events': 0}, index=[0])
temp.index.names = ['Percentile']
gain_lift_data = pd.concat([gain_lift_data, temp])
gain_lift_data.sort_index(ascending=True, inplace=True)
gain_lift_data['Cumul. Pos. Events'] = gain_lift_data['# of Pos. Events'].cumsum()
gain_lift_data['Gain'] = gain_lift_data['Cumul. Pos. Events'] / self._actual_positives
gain_lift_data = gain_lift_data.loc[~(gain_lift_data.index == 0), :]
gain_lift_data['Lift'] = gain_lift_data['Gain'] / (gain_lift_data.index.values / 100)
if not include_all_info:
gain_lift_data = gain_lift_data[['Gain', 'Lift']]
gain_lift_data = gain_lift_data.round(2)
if return_style:
gain_lift_data = gain_lift_data.style
gain_lift_data.format(precision=2). \
bar(subset='Gain', color=hcolor.Colors.PASTEL_BLUE.value,
vmin=0, vmax=1). \
bar(subset='Lift', color=hcolor.Colors.PASTEL_BLUE.value)
return gain_lift_data
def plot_predicted_scores_histogram(self):
sns.histplot(self._predicted_scores)
plt.tight_layout()
def plot_actual_vs_predict_histogram(self):
actual_categories = pd.Series(self._actual_values).\
replace({0: self._negative_class, 1: self._positive_class})
axes = sns.displot(
pd.DataFrame({
'Predicted Score': self._predicted_scores,
'Actual Value': actual_categories
}),
x='Predicted Score',
col='Actual Value'
)
for axis in axes.axes.flat:
axis.axvline(x=0.5, ymin=0, ymax=100, color='red')
plt.tight_layout()
class RegressionEvaluator:
"""
Evaluates models for regression (i.e. numeric outcome) problems.
"""
def __init__(self,
actual_values: np.ndarray,
predicted_values: np.ndarray):
"""
Args:
actual_values:
the actual values
predicted_values:
the predicted values
"""
assert len(actual_values) == len(predicted_values)
self._actual_values = actual_values
self._predicted_values = predicted_values
self._residuals = actual_values - predicted_values
self._standard_deviation = np.std(actual_values)
self._mean_squared_error = float(np.mean(np.square(actual_values - predicted_values)))
self._mean_absolute_error = float(np.mean(np.abs(actual_values - predicted_values)))
self._r_squared = r2_score(y_true=actual_values, y_pred=predicted_values)
@property
def mean_absolute_error(self) -> float:
"""Mean Absolute Error"""
return self._mean_absolute_error
@property
def mean_squared_error(self) -> float:
"""Mean Squared Error"""
return self._mean_squared_error
@property
def root_mean_squared_error(self) -> float:
"""Root Mean Squared Error"""
return np.sqrt(self.mean_squared_error)
@property
def rmse_to_st_dev(self) -> float:
"""The ratio of RMSE to the standard deviation of the actual values.
Gives an indication of how large the errors are to the actual data.
"""
return self.root_mean_squared_error / self._standard_deviation
@property
def r_squared(self) -> float:
"""R Squared"""
return self._r_squared
@property
def total_observations(self):
"""The total number of observations i.e. sample size."""
return len(self._actual_values)
@property
def all_metrics(self) -> dict:
"""Returns a dictionary of the most common error metrics for regression problems."""
return {'Mean Absolute Error (MAE)': self.mean_absolute_error,
'Root Mean Squared Error (RMSE)': self.root_mean_squared_error,
'RMSE to Standard Deviation of Target': self.rmse_to_st_dev,
'R Squared': self.r_squared,
'Total Observations': self.total_observations}
def all_metrics_df(self,
dummy_regressor_strategy: Union[str, list, None] = 'mean',
dummy_regressor_constant: Union[int] = 1,
return_style: bool = False,
round_by: Optional[int] = None) -> Union[pd.DataFrame, Styler]:
"""All of the metrics are returned as a DataFrame.
Args:
dummy_regressor_strategy:
if not None, then returns column(s) corresponding to the scores from predictions of
sklearn.dummy.DummyRegressor, based on the strategy (or strategies) provided. Valid values
correspond to values of `strategy` parameter listed
https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyRegressor.html
If a list is passed in (e.g. ['prior', 'uniform'], then one score column per value is
added.
If None is passed, then no additional columns are added.
dummy_regressor_constant:
The explicit constant as predicted by the “constant” strategy for the
DummyRegressor.
This parameter is useful only for the “constant” dummy_regressor_strategy.
return_style:
if True, return styler object; else return dataframe
round_by:
the number of digits to round by; if None, then don't round
"""
result = pd.DataFrame.from_dict(self.all_metrics, orient='index', columns=['Score'])
score_columns = ['Score']
if dummy_regressor_strategy:
if isinstance(dummy_regressor_strategy, str):
dummy_regressor_strategy = [dummy_regressor_strategy]
for strategy in dummy_regressor_strategy:
dummy = DummyRegressor(strategy=strategy, constant=dummy_regressor_constant)
# https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
# "All strategies make predictions that ignore the input feature values passed as the X
# argument to fit and predict. The predictions, however, typically depend on values observed
# in the y parameter passed to fit."
_ = dummy.fit(X=self._actual_values, y=self._actual_values)
dummy_predictions = dummy.predict(X=self._actual_values)
dummy_evaluator = RegressionEvaluator(actual_values=self._actual_values,
predicted_values=dummy_predictions)
dummy_scores = dummy_evaluator.all_metrics_df(dummy_regressor_strategy=None,
return_style=False)
column_name = f"Dummy ({strategy})"
score_columns = score_columns + [column_name]
dummy_scores = dummy_scores.rename(columns={'Score': column_name})
result = pd.concat([result, dummy_scores], axis=1)
if round_by is not None:
result.iloc[0:2] = result.iloc[0:2].round(round_by)
if return_style:
subset_scores = pd.IndexSlice[result.loc[['Mean Absolute Error (MAE)',
'Root Mean Squared Error (RMSE)'],
score_columns].index,
score_columns]
subset_secondary = pd.IndexSlice[result.loc[['RMSE to Standard Deviation of Target',
'R Squared'],
score_columns].index, score_columns]
subset_total_observations = pd.IndexSlice[result.loc[['Total Observations'],
score_columns].index, score_columns]
result = result.style
if round_by is not None:
result = result.format(subset=subset_scores, thousands=',', precision=round_by)
else:
result = result.format(subset=subset_scores, thousands=',')
result = result.format(subset=subset_secondary, precision=3)
result = result.format(subset=subset_total_observations, thousands=',', precision=0)
return result
def plot_residuals_vs_fits(self, figure_size: tuple = STANDARD_WIDTH_HEIGHT):
"""Plots residuals vs fitted values
Args:
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
"""
lowess = sm.nonparametric.lowess
loess_points = lowess(self._residuals, self._predicted_values)
loess_x, loess_y = zip(*loess_points)
plt.figure(figsize=figure_size)
plt.plot(loess_x, loess_y, color='r')
plt.scatter(x=self._predicted_values, y=self._residuals, s=8, alpha=0.5)
plt.title('Residuals vs. Fitted Values')
plt.xlabel('Fitted Values')
plt.ylabel('Residuals (Actual - Predicted)')
def plot_predictions_vs_actuals(self, figure_size: tuple = STANDARD_WIDTH_HEIGHT):
"""Plots predictions vs actual values
Args:
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
"""
lowess = sm.nonparametric.lowess
loess_points = lowess(self._predicted_values, self._actual_values)
loess_x, loess_y = zip(*loess_points)
plt.figure(figsize=figure_size)
plt.plot(loess_x, loess_y, color='r', alpha=0.5, label='Loess (Predictions vs Actuals)')
plt.plot(self._actual_values, self._actual_values, color='b', alpha=0.5, label='Perfect Prediction')
plt.scatter(x=self._actual_values, y=self._predicted_values, s=8, alpha=0.5)
plt.title('Predicted Values vs. Actual Values')
plt.xlabel('Actuals')
plt.ylabel('Predicted')
axis = plt.gca()
handles, labels = axis.get_legend_handles_labels()
axis.legend(handles, labels)
plt.figtext(0.99, 0.01,
'Note: observations above blue line mean model is over-predicting; below means under-'
'predicting.', # noqa
horizontalalignment='right')
return axis
def plot_residuals_vs_actuals(self, figure_size: tuple = STANDARD_WIDTH_HEIGHT):
"""Plots residuals vs actuals values
Args:
figure_size:
tuple containing `(width, height)` of plot. The default height is defined by
`helpsk.plot.STANDARD_HEIGHT`, and the default width is
`helpsk.plot.STANDARD_HEIGHT / helpsk.plot.GOLDEN_RATIO`
"""
lowess = sm.nonparametric.lowess
loess_points = lowess(self._residuals, self._actual_values)
loess_x, loess_y = zip(*loess_points)
plt.figure(figsize=figure_size)
plt.plot(loess_x, loess_y, color='r')
plt.scatter(x=self._actual_values, y=self._residuals, s=8, alpha=0.5)
plt.title('Residuals vs. Actual Values')
plt.xlabel('Actual')
plt.ylabel('Residuals (Actual - Predicted)')
plt.figtext(0.99, 0.01,
'Note: Actual > Predicted => Under-predicting (positive residual); negative residuals '
'mean over-predicting', # noqa
horizontalalignment='right')
class TwoClassModelComparison:
"""This class compares multiple models trained on Two Class (i.e. 0's/1's) prediction scenarios."""
def __init__(self,
actual_values: np.ndarray,
predicted_scores: Dict[str, np.ndarray],
positive_class: str = 'Positive Class',
negative_class: str = 'Negative Class',
score_threshold: float = 0.5
):
"""
Args:
actual_values:
array of 0's and 1's
predicted_scores:
dictionary per model with key as the name of the model and value that is an array of
decimal/float values from `predict_proba()`; NOT the actual class
positive_class:
string of the name/label of the positive class (i.e. value of 1). In other words, not
'positive' in the sense of 'good' but 'positive' as in 'True/False Positive'.
negative_class:
string of the name/label of the negative class (i.e. value of 0). In other words, not
'negative' in the sense of 'good' but 'negative' as in 'True/False Negative'.
score_threshold:
the score/probability threshold for turning scores into 0's and 1's and corresponding labels
"""
assert isinstance(predicted_scores, dict)
for values in predicted_scores.values():
assert len(actual_values) == len(values)
self._positive_class = positive_class
self._negative_class = negative_class
self._actual_values = actual_values
self._predicted_scores = predicted_scores
self.score_threshold = score_threshold
self._evaluators = {key: TwoClassEvaluator(actual_values=actual_values,
predicted_scores=value,
positive_class=positive_class,
negative_class=negative_class,
score_threshold=score_threshold)
for key, value in predicted_scores.items()}
def all_metrics_df(self,
dummy_classifier_strategy: Union[str, list, None] = 'prior',
dummy_classifier_constant: Union[int] = 1,
return_style: bool = False,
round_by: Optional[int] = None) -> Union[pd.DataFrame, Styler]:
"""All of the metrics are returned as a DataFrame.
Args:
dummy_classifier_strategy:
if not None, then returns column(s) corresponding to the scores from predictions of
sklearn.dummy.DummyClassifier, based on the strategy (or strategies) provided. Valid values
correspond to values of `strategy` parameter listed
https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
If a list is passed in (e.g. ['prior', 'uniform'], then one score column per value is
added.
If None is passed, then no additional columns are added.
dummy_classifier_constant:
The explicit constant as predicted by the “constant” strategy for the
DummyClassifier.
This parameter is useful only for the “constant” dummy_classifier_strategy.
return_style:
if True, return styler object; else return dataframe
round_by:
the number of digits to round by; if None, then don't round
"""
result = None
last_key = list(self._evaluators.keys())[-1]
for key, value in self._evaluators.items():
dummy_strategy = dummy_classifier_strategy if key == last_key else None
scores = value.all_metrics_df(
return_explanations=False,
dummy_classifier_strategy=dummy_strategy,
dummy_classifier_constant=dummy_classifier_constant
)
scores = scores.rename(columns={'Score': key})
result = pd.concat([result, scores], axis=1)
result = result.loc[[
'AUC', 'F1 Score',
'True Positive Rate', 'True Negative Rate',
'False Positive Rate', 'False Negative Rate',
'Positive Predictive Value', 'Negative Predictive Value'
]]
result = result.transpose()
if round_by:
for column in result.columns:
result[column] = result[column].round(round_by)
if return_style:
positive_scores = [x for x in result.columns if not x.startswith('False')]
negative_scores = [x for x in result.columns if x.startswith('False')]
result = result.style
if round_by:
result = result.format(precision=round_by)
result = result. \
bar(subset=positive_scores, color=hcolor.Colors.PIGMENT_GREEN.value, vmin=0, vmax=1). \
bar(subset=negative_scores, color=hcolor.Colors.POPPY.value, vmin=0, vmax=1)
return result
def plot_metrics_comparison(self,
dummy_classifier_strategy: Union[str, list, None] = 'prior',
dummy_classifier_constant: Union[int] = 1,
) -> _figure.Figure:
"""
Returns a Plotly object of a bar-chart of the metrics across all of the models.
Args:
dummy_classifier_strategy:
if not None, then returns column(s) corresponding to the scores from predictions of
sklearn.dummy.DummyClassifier, based on the strategy (or strategies) provided. Valid values
correspond to values of `strategy` parameter listed
https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
If a list is passed in (e.g. ['prior', 'uniform'], then one score column per value is
added.
If None is passed, then no additional columns are added.
dummy_classifier_constant:
The explicit constant as predicted by the “constant” strategy for the
DummyClassifier.
This parameter is useful only for the “constant” dummy_classifier_strategy.
"""
score_df = self.all_metrics_df(
dummy_classifier_strategy=dummy_classifier_strategy,
dummy_classifier_constant=dummy_classifier_constant
).transpose()
score_df = score_df.reset_index()
colors = [e.value for e in hcolor.Colors]
fig = px.bar(
data_frame=score_df.melt(id_vars='index'),
y='variable',
x='value',
facet_col='index',
facet_col_wrap=2,
color='variable',
color_discrete_sequence=colors,
barmode='group',
height=1000,
labels={'index': 'Score'},
title="Model Comparison"
)
fig.update_layout(showlegend=False)
fig.update_yaxes(title=None)
return fig
def plot_roc_curves(self) -> _figure.Figure:
"""Returns a plotly object representing the ROC curves across all models."""
result = None
for key, value in self._evaluators.items():
auc_df = value._get_auc_curve_dataframe() # noqa
auc_df['Model'] = key
result = pd.concat([result, auc_df], axis=0)
colors = [e.value for e in hcolor.Colors]
fig = px.line(
data_frame=result,
x='False Positive Rate',
y='True Positive Rate',
color='Model',
color_discrete_sequence=colors,
height=550,
width=550 * GOLDEN_RATIO,
custom_data=['threshold', 'Model'],
title="ROC Curve of Models",
)
for index in range(len(self._evaluators)):
scatter_1 = px.scatter(
data_frame=result,
x='False Positive Rate',
y='True Positive Rate',
color='Model',
color_discrete_sequence=colors,
custom_data=['threshold', 'Model'],
)
scatter_1.data[index]['showlegend'] = False
fig.add_trace(
scatter_1.data[index]
)
query = f"threshold == 0.5 & Model == '{list(self._evaluators.keys())[index]}'"
scatter_2 = px.scatter(
data_frame=result.query(query),
x='False Positive Rate',
y='True Positive Rate',
color='Model',
color_discrete_sequence=[colors[index]] + colors,
custom_data=['threshold', 'Model'],
size=[2],
)
scatter_2.data[0]['showlegend'] = False
fig.add_trace(
scatter_2.data[0],
)
fig.update_traces(
hovertemplate="<br>".join([
"Model: %{customdata[1]}<br><br>"
"False Positive Rate: %{x}",
"True Positive Rate: %{y}",
"Threshold: %{customdata[0]}",
])
)
return fig
|
#!/usr/bin/env python
import copy
import logging
import optparse
import os
import random
import sqlite3
import string
import sys
import threading
from typing import Optional, Union, Tuple, List, Dict
import pump
import pump_bfd
import pump_csv
import pump_cb
import pump_gen
import pump_mc
import pump_dcp
from pump import PumpingStation
def exit_handler(err: Optional[str]):
if err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
else:
sys.exit(0)
class Transfer:
"""Base class for 2.0 Backup/Restore/Transfer."""
def __init__(self):
self.name = "cbtransfer"
self.source_alias = "source"
self.sink_alias = "destination"
self.usage = \
"%prog [options] source destination\n\n" \
"Transfer couchbase cluster data from source to destination.\n\n" \
"Examples:\n" \
" %prog http://SOURCE:8091 /backups/backup-42\n" \
" %prog /backups/backup-42 http://DEST:8091\n" \
" %prog /backups/backup-42 couchbase://DEST:8091\n" \
" %prog http://SOURCE:8091 http://DEST:8091\n" \
" %prog couchstore-files:///opt/couchbase/var/lib/couchbase/data/ /backup-XXX\n" \
" %prog couchstore-files:///opt/couchbase/var/lib/couchbase/data/ couchbase://DEST:8091\n"
def main(self, argv, opts_etc=None):
if threading.currentThread().getName() == "MainThread":
threading.currentThread().setName("mt")
err, opts, source, sink = self.opt_parse(argv)
if err:
return err
if opts_etc:
opts.etc = opts_etc # Used for unit tests, etc.
process_name = f'{os.path.basename(argv[0])}-{''.join(random.sample(string.ascii_letters, 16))}'
setattr(opts, "process_name", process_name)
logging.info(f'{self.name}...')
logging.info(f' source : {source}')
logging.info(f' sink : {sink}')
logging.info(f' opts : {opts.safe}')
source_class, sink_class = self.find_handlers(opts, source, sink)
if not source_class:
return f'error: unknown type of source: {source}'
if not sink_class:
return f'error: unknown type of sink: {sink}'
err = sink_class.check_source(opts, source_class, source, sink_class, sink)
if err:
return err
try:
pumpStation = pump.PumpingStation(opts, source_class, source,
sink_class, sink)
rv = pumpStation.run()
self.aggregate_stats(pumpStation.cur)
return rv
except KeyboardInterrupt:
return "interrupted."
def aggregate_stats(self, cur):
return 0
def check_opts(self, opts):
return None
def opt_parse(self, argv):
p = self.opt_parser()
opts, rest = p.parse_args(argv[1:])
if len(rest) != 2:
p.print_help()
return f'\nError: please provide both a {self.source_alias} and a {self.sink_alias}', None, None, None
err = self.check_opts(opts) # pylint: disable=assignment-from-none
if err:
return err, None, None, None
min_thread = 1
max_thread = 20
if opts.threads not in list(range(min_thread, max_thread)):
return f'\nError: option -t: value is out of range [{min_thread}, {max_thread}]', None, None, None
if opts.username is None:
username = os.environ.get('CB_REST_USERNAME', None)
if username:
opts.username = username
else:
return "\nError: option -u/--username is required", None, None, None
if opts.password is None:
password = os.environ.get('CB_REST_PASSWORD', None)
if password:
opts.password = password
else:
return "\nError: option -p/--password is required", None, None, None
opts.extra = opt_parse_extra(opts.extra, self.opt_extra_defaults())
opts.safe = opt_parse_helper(opts)
return None, opts, rest[0], rest[1]
def opt_parser(self):
p = optparse.OptionParser(usage=self.usage)
opt_extra_help(p, self.opt_extra_defaults(False))
self.opt_parser_options(p)
return p
def opt_parser_options(self, p):
p.add_option("-b", "--bucket-source",
action="store", type="string", default=None,
help="""Single named bucket from source cluster to transfer""")
p.add_option("-B", "--bucket-destination",
action="store", type="string", default=None,
help="""Single named bucket on destination cluster which receives transfer.
This allows you to transfer to a bucket with a different name
as your source bucket. If you do not provide defaults to the
same name as the bucket-source""")
self.opt_parser_options_common(p)
p.add_option("", "--single-node",
action="store_true", default=False,
help="""Transfer from a single server node in a source cluster,
This single server node is a source node URL""")
p.add_option("", "--source-vbucket-state",
action="store", type="string", default='active',
help="""Only transfer from source vbuckets in this state,
such as 'active' (default) or 'replica'.
Must be used with Couchbase cluster as source""")
p.add_option("", "--destination-vbucket-state",
action="store", type="string", default='active',
help="""Only transfer to destination vbuckets in this state,
such as 'active' (default) or 'replica'.
Must be used with Couchbase cluster as source""")
p.add_option("", "--destination-operation",
action="store", type="string", default=None,
help="""Perform this operation on transfer.
'set' will override an existing document,
'add' will not override, 'get' will load all keys transferred
from a source cluster into the caching layer at the destination""")
def opt_parser_options_common(self, p):
p.add_option("-i", "--id",
action="store", type="int", default=None,
help="""Transfer only items that match a vbucketID""")
p.add_option("-k", "--key",
action="store", type="string", default=None,
help="""Transfer only items with keys that match a regexp""")
p.add_option("", "--vbucket-list",
action="store", type="string", default=None,
help=optparse.SUPPRESS_HELP)
p.add_option("-n", "--dry-run",
action="store_true", default=False,
help="""No actual transfer; just validate parameters, files,
connectivity and configurations""")
p.add_option("-u", "--username",
action="store", type="string", default=None,
help="REST username for source cluster or server node")
p.add_option("-p", "--password",
action="store", type="string", default=None,
help="REST password for source cluster or server node")
p.add_option("-U", "--username-dest",
action="store", type="string", default=None,
help="REST username for destination cluster or server node")
p.add_option("-P", "--password-dest",
action="store", type="string", default=None,
help="REST password for destination cluster or server node")
p.add_option("-s", "--ssl",
action="store_true", default=False,
help="Transfer data with SSL enabled")
p.add_option("", "--no-ssl-verify", default=True, action="store_false",
help="Skips SSL verification of certificates against the CA")
p.add_option("", "--cacert", dest="cacert", default=None, action="store",
help="Verifies the cluster identity with this certificate")
p.add_option("-t", "--threads",
action="store", type="int", default=4,
help="""Number of concurrent workers threads performing the transfer""")
p.add_option("-v", "--verbose",
action="count", default=0,
help="verbose logging; more -v's provide more verbosity. Max is -vvv")
p.add_option("", "--silent", action="store_true", default=False,
help="""Reduce logging verbosity to only include errors""")
p.add_option("-x", "--extra",
action="store", type="string", default=None,
help="""Provide extra, uncommon config parameters;
comma-separated key=val(,key=val)* pairs""")
p.add_option("-c", "--collection",
help=optparse.SUPPRESS_HELP)
p.add_option("", "--force-txn", default=False, action="store_true", help=optparse.SUPPRESS_HELP)
def opt_extra_defaults(self, add_hidden=True):
rv = {
"batch_max_size": (1000, "Transfer this # of documents per batch"),
"batch_max_bytes": (400000, "Transfer this # of bytes per batch"),
"cbb_max_mb": (100000, "Split backup file on destination cluster if it exceeds MB"),
"max_retry": (10, "Max number of sequential retries if transfer fails"),
"report": (5, "Number batches transferred before updating progress bar in console"),
"report_full": (2000, "Number batches transferred before emitting progress information in console"),
"recv_min_bytes": (4096, "Amount of bytes for every TCP/IP call transferred"),
"try_xwm": (1, "Transfer documents with metadata. 0 should only be used if you transfer from 1.8.x to 1.8.x"),
"nmv_retry": (1, "0 or 1, where 1 retries transfer after a NOT_MY_VBUCKET message"),
"rehash": (0, "For value 1, rehash the partition id's of each item; \
this is needed when transferring data between clusters with different number of partitions, \
such as when transferring data from an OSX server to a non-OSX cluster"),
"data_only": (0, "For value 1, only transfer data from a backup file or cluster"),
"design_doc_only": (0, "For value 1, transfer design documents only from a backup file or cluster"),
"conflict_resolve":(1, "By default, enable conflict resolution."),
"seqno": (0, "By default, start seqno from beginning."),
"mcd_compatible": (1, "For value 0, display extended fields for stdout output."),
"uncompress": (0, "For value 1, restore data in uncompressed mode"),
"backoff_cap": (10, "Max backoff time during rebalance period"),
"flow_control": (1, "For value 0, disable flow control to improve throughput"),
"dcp_consumer_queue_length": (1000,"A DCP client needs a queue for incoming documents/messages. A large length is more efficient, but memory proportional to length*avg. doc size. Below length 150, performance degrades significantly."),
}
if add_hidden:
rv["allow_recovery_vb_remap"] = (0, "Allows the vbucket list to override the vbucket map from the server.")
return rv
def find_handlers(self, opts, source, sink):
return (PumpingStation.find_handler(opts, source, SOURCES),
PumpingStation.find_handler(opts, sink, SINKS))
class Backup(Transfer):
"""Entry point for 2.0 cbbackup."""
def __init__(self):
self.name = "cbbackup"
self.source_alias = "source"
self.sink_alias = "backup_dir"
if self._is_enterprise():
self.usage = \
"%prog [options] source backup_dir\n\n" \
"Online backup of a couchbase cluster or server node.\n\n" \
"Examples:\n" \
" The first backup to a given directory is a full backup, any subsequent ones are incremental.\n" \
" %prog -u Administrator -p password http://HOST:8091 /backup-42\n\n" \
" To take a differential backup after taking a full backup. \n" \
" %prog -u Administrator -p password couchbase://HOST:8091 /backup-43 -m diff\n\n" \
" To take an accumulative backup after taking a full backup. \n" \
" %prog -u Administrator -p password couchbase://HOST:8091 /backup-43 -m accu --single-node\n\n" \
"Note: A full backup task is always triggered for a new sink location\n" \
" no matter what backup mode is specified.\n"
else:
self.usage = \
"%prog [options] source backup_dir\n\n" \
"Online backup of a couchbase cluster or server node.\n\n" \
"Examples:\n" \
" Take a full backup of a cluster. \n" \
" %prog -u Administrator -p password http://HOST:8091 /backup-42\n\n" \
" Take a full backup for a single node. \n" \
" %prog -u Administrator -p password couchbase://HOST:8091 /backup-43 --single-node\n" \
def opt_parser_options(self, p):
p.add_option("-b", "--bucket-source",
action="store", type="string", default=None,
help="""single bucket from source to backup""")
p.add_option("", "--single-node",
action="store_true", default=False,
help="""use a single server node from the source only,
not all server nodes from the entire cluster;
this single server node is defined by the source URL""")
if self._is_enterprise():
p.add_option("-m", "--mode",
action="store", type="string", default="diff",
help="backup mode: full, diff or accu [default:%default]")
else:
p.add_option("-m", "--mode",
action="store", type="string", default="full",
help=optparse.SUPPRESS_HELP)
Transfer.opt_parser_options_common(self, p)
def find_handlers(self, opts, source, sink):
return PumpingStation.find_handler(opts, source, SOURCES), \
PumpingStation.find_handler(opts, sink, SINKS)
def check_opts(self, opts):
mode = getattr(opts, "mode", None)
if mode:
if mode not in ["full", "diff", "accu"]:
return "\nError: option mode has to be 'full', 'diff' or 'accu'"
return None
def _is_enterprise(self):
try:
import pump_bfd2
return True
except ImportError:
return False
class Restore(Transfer):
"""Entry point for 2.0 cbrestore."""
# TODO: (1) Restore - opt_parse handle 1.8 backwards compatible args.
def __init__(self):
self.name = "cbrestore"
self.source_alias = "backup_dir"
self.sink_alias = "destination"
self.usage = \
"%prog [options] backup_dir destination\n\n" \
"Restores a single couchbase bucket.\n\n" \
"Please first create the destination / bucket before restoring.\n\n" \
"Examples:\n" \
" %prog /backups/backup-42 http://HOST:8091 \\\n" \
" --bucket-source=default --from-date=2014-01-20 --to-date=2014-03-31\n" \
" %prog /backups/backup-42 couchbase://HOST:8091 \\\n" \
" --bucket-source=default\n" \
" %prog /backups/backup-42 memcached://HOST:11211 \\\n" \
" --bucket-source=sessions --bucket-destination=sessions2"
def opt_parser_options(self, p):
p.add_option("-a", "--add",
action="store_true", default=False,
help="""use add instead of set to not overwrite existing
items in the destination""")
p.add_option("-b", "--bucket-source",
action="store", type="string", default=None,
help="""single bucket from the backup_dir to restore;
if the backup_dir only contains a single bucket,
then that bucket will be automatically used""")
p.add_option("-B", "--bucket-destination",
action="store", type="string", default=None,
help="""when --bucket-source is specified, overrides the
destination bucket name; this allows you to restore
to a different bucket; defaults to the same as the
bucket-source""")
p.add_option("", "--from-date",
action="store", type="string", default=None,
help="""restore data from the date specified as yyyy-mm-dd. By default,
all data from the very beginning will be restored""")
p.add_option("", "--to-date",
action="store", type="string", default=None,
help="""restore data till the date specified as yyyy-mm-dd. By default,
all data that are collected will be restored""")
Transfer.opt_parser_options_common(self, p)
# TODO: (1) cbrestore parameter --create-design-docs=y|n
# TODO: (1) cbrestore parameter -d DATA, --data=DATA
# TODO: (1) cbrestore parameter --validate-only
# TODO: (1) cbrestore parameter -H HOST, --host=HOST
# TODO: (1) cbrestore parameter -p PORT, --port=PORT
# TODO: (1) cbrestore parameter option to override expiration?
def find_handlers(self, opts, source, sink):
return pump_bfd.BFDSource, PumpingStation.find_handler(opts, sink, SINKS)
# --------------------------------------------------
def opt_parse_helper(opts):
logging_level = logging.WARN
if opts.verbose >= 1:
logging_level = logging.INFO
if opts.verbose >= 2:
logging_level = logging.DEBUG
if opts.silent:
logging_level = logging.ERROR
logging.basicConfig(format=pump.LOGGING_FORMAT, level=logging_level)
opts_x = copy.deepcopy(opts)
if opts_x.username:
opts_x.username = "<xxx>"
if opts_x.password:
opts_x.password = "<xxx>"
return opts_x
def opt_parse_extra(extra, extra_defaults):
"""Convert an extra string (comma-separated key=val pairs) into
a dict, using default values from extra_defaults dict."""
extra_in = dict([(x[0], x[1]) for x in
[(kv + '=').split('=') for kv in
(extra or "").split(',')]])
for k, v in extra_in.items():
if k and not extra_defaults.get(k):
sys.exit("error: unknown extra option: " + k)
return dict([(k, float(extra_in.get(k, extra_defaults[k][0])))
for k in extra_defaults.keys()])
def opt_extra_help(parser, extra_defaults):
extra_help = "; ".join([f'{k}={extra_defaults[k][0]} ({extra_defaults[k][1]})'
for k in sorted(extra_defaults.keys())])
group = optparse.OptionGroup(parser, "Available extra config parameters (-x)",
extra_help)
parser.add_option_group(group)
# --------------------------------------------------
SOURCES = [pump_bfd.BFDSource,
pump_csv.CSVSource,
pump_gen.GenSource,
pump_dcp.DCPStreamSource,
pump.StdInSource]
SINKS = [pump_bfd.BFDSink,
pump_mc.MCSink,
pump_cb.CBSink,
pump_csv.CSVSink,
pump.StdOutSink]
try:
import pump_sfd
SOURCES.append(pump_sfd.SFDSource)
SINKS.append(pump_sfd.SFDSink)
except ImportError:
pass
try:
import pump_json
SOURCES.append(pump_json.JSONSource)
except ImportError:
pass
try:
import pump_bfd2
SINKS.insert(0, pump_bfd2.BFDSinkEx)
except ImportError:
pass
# TODO: (1) pump_transfer - use QUIET commands
# TODO: (1) pump_transfer - verify that nth replica got the msg
# TODO: (1) pump_transfer - ability to TAP a non-active or replica vbucket / MB-4583
# TODO: (10) pump_transfer - incremental backup/restore
if __name__ == '__main__':
sys.exit(Transfer().main(sys.argv))
| #!/usr/bin/env python
import copy
import logging
import optparse
import os
import random
import sqlite3
import string
import sys
import threading
from typing import Optional, Union, Tuple, List, Dict
import pump
import pump_bfd
import pump_csv
import pump_cb
import pump_gen
import pump_mc
import pump_dcp
from pump import PumpingStation
def exit_handler(err: Optional[str]):
if err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
else:
sys.exit(0)
class Transfer:
"""Base class for 2.0 Backup/Restore/Transfer."""
def __init__(self):
self.name = "cbtransfer"
self.source_alias = "source"
self.sink_alias = "destination"
self.usage = \
"%prog [options] source destination\n\n" \
"Transfer couchbase cluster data from source to destination.\n\n" \
"Examples:\n" \
" %prog http://SOURCE:8091 /backups/backup-42\n" \
" %prog /backups/backup-42 http://DEST:8091\n" \
" %prog /backups/backup-42 couchbase://DEST:8091\n" \
" %prog http://SOURCE:8091 http://DEST:8091\n" \
" %prog couchstore-files:///opt/couchbase/var/lib/couchbase/data/ /backup-XXX\n" \
" %prog couchstore-files:///opt/couchbase/var/lib/couchbase/data/ couchbase://DEST:8091\n"
def main(self, argv, opts_etc=None):
if threading.currentThread().getName() == "MainThread":
threading.currentThread().setName("mt")
err, opts, source, sink = self.opt_parse(argv)
if err:
return err
if opts_etc:
opts.etc = opts_etc # Used for unit tests, etc.
process_name = f'{os.path.basename(argv[0])}-{"".join(random.sample(string.ascii_letters, 16))}'
setattr(opts, "process_name", process_name)
logging.info(f'{self.name}...')
logging.info(f' source : {source}')
logging.info(f' sink : {sink}')
logging.info(f' opts : {opts.safe}')
source_class, sink_class = self.find_handlers(opts, source, sink)
if not source_class:
return f'error: unknown type of source: {source}'
if not sink_class:
return f'error: unknown type of sink: {sink}'
err = sink_class.check_source(opts, source_class, source, sink_class, sink)
if err:
return err
try:
pumpStation = pump.PumpingStation(opts, source_class, source,
sink_class, sink)
rv = pumpStation.run()
self.aggregate_stats(pumpStation.cur)
return rv
except KeyboardInterrupt:
return "interrupted."
def aggregate_stats(self, cur):
return 0
def check_opts(self, opts):
return None
def opt_parse(self, argv):
p = self.opt_parser()
opts, rest = p.parse_args(argv[1:])
if len(rest) != 2:
p.print_help()
return f'\nError: please provide both a {self.source_alias} and a {self.sink_alias}', None, None, None
err = self.check_opts(opts) # pylint: disable=assignment-from-none
if err:
return err, None, None, None
min_thread = 1
max_thread = 20
if opts.threads not in list(range(min_thread, max_thread)):
return f'\nError: option -t: value is out of range [{min_thread}, {max_thread}]', None, None, None
if opts.username is None:
username = os.environ.get('CB_REST_USERNAME', None)
if username:
opts.username = username
else:
return "\nError: option -u/--username is required", None, None, None
if opts.password is None:
password = os.environ.get('CB_REST_PASSWORD', None)
if password:
opts.password = password
else:
return "\nError: option -p/--password is required", None, None, None
opts.extra = opt_parse_extra(opts.extra, self.opt_extra_defaults())
opts.safe = opt_parse_helper(opts)
return None, opts, rest[0], rest[1]
def opt_parser(self):
p = optparse.OptionParser(usage=self.usage)
opt_extra_help(p, self.opt_extra_defaults(False))
self.opt_parser_options(p)
return p
def opt_parser_options(self, p):
p.add_option("-b", "--bucket-source",
action="store", type="string", default=None,
help="""Single named bucket from source cluster to transfer""")
p.add_option("-B", "--bucket-destination",
action="store", type="string", default=None,
help="""Single named bucket on destination cluster which receives transfer.
This allows you to transfer to a bucket with a different name
as your source bucket. If you do not provide defaults to the
same name as the bucket-source""")
self.opt_parser_options_common(p)
p.add_option("", "--single-node",
action="store_true", default=False,
help="""Transfer from a single server node in a source cluster,
This single server node is a source node URL""")
p.add_option("", "--source-vbucket-state",
action="store", type="string", default='active',
help="""Only transfer from source vbuckets in this state,
such as 'active' (default) or 'replica'.
Must be used with Couchbase cluster as source""")
p.add_option("", "--destination-vbucket-state",
action="store", type="string", default='active',
help="""Only transfer to destination vbuckets in this state,
such as 'active' (default) or 'replica'.
Must be used with Couchbase cluster as source""")
p.add_option("", "--destination-operation",
action="store", type="string", default=None,
help="""Perform this operation on transfer.
'set' will override an existing document,
'add' will not override, 'get' will load all keys transferred
from a source cluster into the caching layer at the destination""")
def opt_parser_options_common(self, p):
p.add_option("-i", "--id",
action="store", type="int", default=None,
help="""Transfer only items that match a vbucketID""")
p.add_option("-k", "--key",
action="store", type="string", default=None,
help="""Transfer only items with keys that match a regexp""")
p.add_option("", "--vbucket-list",
action="store", type="string", default=None,
help=optparse.SUPPRESS_HELP)
p.add_option("-n", "--dry-run",
action="store_true", default=False,
help="""No actual transfer; just validate parameters, files,
connectivity and configurations""")
p.add_option("-u", "--username",
action="store", type="string", default=None,
help="REST username for source cluster or server node")
p.add_option("-p", "--password",
action="store", type="string", default=None,
help="REST password for source cluster or server node")
p.add_option("-U", "--username-dest",
action="store", type="string", default=None,
help="REST username for destination cluster or server node")
p.add_option("-P", "--password-dest",
action="store", type="string", default=None,
help="REST password for destination cluster or server node")
p.add_option("-s", "--ssl",
action="store_true", default=False,
help="Transfer data with SSL enabled")
p.add_option("", "--no-ssl-verify", default=True, action="store_false",
help="Skips SSL verification of certificates against the CA")
p.add_option("", "--cacert", dest="cacert", default=None, action="store",
help="Verifies the cluster identity with this certificate")
p.add_option("-t", "--threads",
action="store", type="int", default=4,
help="""Number of concurrent workers threads performing the transfer""")
p.add_option("-v", "--verbose",
action="count", default=0,
help="verbose logging; more -v's provide more verbosity. Max is -vvv")
p.add_option("", "--silent", action="store_true", default=False,
help="""Reduce logging verbosity to only include errors""")
p.add_option("-x", "--extra",
action="store", type="string", default=None,
help="""Provide extra, uncommon config parameters;
comma-separated key=val(,key=val)* pairs""")
p.add_option("-c", "--collection",
help=optparse.SUPPRESS_HELP)
p.add_option("", "--force-txn", default=False, action="store_true", help=optparse.SUPPRESS_HELP)
def opt_extra_defaults(self, add_hidden=True):
rv = {
"batch_max_size": (1000, "Transfer this # of documents per batch"),
"batch_max_bytes": (400000, "Transfer this # of bytes per batch"),
"cbb_max_mb": (100000, "Split backup file on destination cluster if it exceeds MB"),
"max_retry": (10, "Max number of sequential retries if transfer fails"),
"report": (5, "Number batches transferred before updating progress bar in console"),
"report_full": (2000, "Number batches transferred before emitting progress information in console"),
"recv_min_bytes": (4096, "Amount of bytes for every TCP/IP call transferred"),
"try_xwm": (1, "Transfer documents with metadata. 0 should only be used if you transfer from 1.8.x to 1.8.x"),
"nmv_retry": (1, "0 or 1, where 1 retries transfer after a NOT_MY_VBUCKET message"),
"rehash": (0, "For value 1, rehash the partition id's of each item; \
this is needed when transferring data between clusters with different number of partitions, \
such as when transferring data from an OSX server to a non-OSX cluster"),
"data_only": (0, "For value 1, only transfer data from a backup file or cluster"),
"design_doc_only": (0, "For value 1, transfer design documents only from a backup file or cluster"),
"conflict_resolve":(1, "By default, enable conflict resolution."),
"seqno": (0, "By default, start seqno from beginning."),
"mcd_compatible": (1, "For value 0, display extended fields for stdout output."),
"uncompress": (0, "For value 1, restore data in uncompressed mode"),
"backoff_cap": (10, "Max backoff time during rebalance period"),
"flow_control": (1, "For value 0, disable flow control to improve throughput"),
"dcp_consumer_queue_length": (1000,"A DCP client needs a queue for incoming documents/messages. A large length is more efficient, but memory proportional to length*avg. doc size. Below length 150, performance degrades significantly."),
}
if add_hidden:
rv["allow_recovery_vb_remap"] = (0, "Allows the vbucket list to override the vbucket map from the server.")
return rv
def find_handlers(self, opts, source, sink):
return (PumpingStation.find_handler(opts, source, SOURCES),
PumpingStation.find_handler(opts, sink, SINKS))
class Backup(Transfer):
"""Entry point for 2.0 cbbackup."""
def __init__(self):
self.name = "cbbackup"
self.source_alias = "source"
self.sink_alias = "backup_dir"
if self._is_enterprise():
self.usage = \
"%prog [options] source backup_dir\n\n" \
"Online backup of a couchbase cluster or server node.\n\n" \
"Examples:\n" \
" The first backup to a given directory is a full backup, any subsequent ones are incremental.\n" \
" %prog -u Administrator -p password http://HOST:8091 /backup-42\n\n" \
" To take a differential backup after taking a full backup. \n" \
" %prog -u Administrator -p password couchbase://HOST:8091 /backup-43 -m diff\n\n" \
" To take an accumulative backup after taking a full backup. \n" \
" %prog -u Administrator -p password couchbase://HOST:8091 /backup-43 -m accu --single-node\n\n" \
"Note: A full backup task is always triggered for a new sink location\n" \
" no matter what backup mode is specified.\n"
else:
self.usage = \
"%prog [options] source backup_dir\n\n" \
"Online backup of a couchbase cluster or server node.\n\n" \
"Examples:\n" \
" Take a full backup of a cluster. \n" \
" %prog -u Administrator -p password http://HOST:8091 /backup-42\n\n" \
" Take a full backup for a single node. \n" \
" %prog -u Administrator -p password couchbase://HOST:8091 /backup-43 --single-node\n" \
def opt_parser_options(self, p):
p.add_option("-b", "--bucket-source",
action="store", type="string", default=None,
help="""single bucket from source to backup""")
p.add_option("", "--single-node",
action="store_true", default=False,
help="""use a single server node from the source only,
not all server nodes from the entire cluster;
this single server node is defined by the source URL""")
if self._is_enterprise():
p.add_option("-m", "--mode",
action="store", type="string", default="diff",
help="backup mode: full, diff or accu [default:%default]")
else:
p.add_option("-m", "--mode",
action="store", type="string", default="full",
help=optparse.SUPPRESS_HELP)
Transfer.opt_parser_options_common(self, p)
def find_handlers(self, opts, source, sink):
return PumpingStation.find_handler(opts, source, SOURCES), \
PumpingStation.find_handler(opts, sink, SINKS)
def check_opts(self, opts):
mode = getattr(opts, "mode", None)
if mode:
if mode not in ["full", "diff", "accu"]:
return "\nError: option mode has to be 'full', 'diff' or 'accu'"
return None
def _is_enterprise(self):
try:
import pump_bfd2
return True
except ImportError:
return False
class Restore(Transfer):
"""Entry point for 2.0 cbrestore."""
# TODO: (1) Restore - opt_parse handle 1.8 backwards compatible args.
def __init__(self):
self.name = "cbrestore"
self.source_alias = "backup_dir"
self.sink_alias = "destination"
self.usage = \
"%prog [options] backup_dir destination\n\n" \
"Restores a single couchbase bucket.\n\n" \
"Please first create the destination / bucket before restoring.\n\n" \
"Examples:\n" \
" %prog /backups/backup-42 http://HOST:8091 \\\n" \
" --bucket-source=default --from-date=2014-01-20 --to-date=2014-03-31\n" \
" %prog /backups/backup-42 couchbase://HOST:8091 \\\n" \
" --bucket-source=default\n" \
" %prog /backups/backup-42 memcached://HOST:11211 \\\n" \
" --bucket-source=sessions --bucket-destination=sessions2"
def opt_parser_options(self, p):
p.add_option("-a", "--add",
action="store_true", default=False,
help="""use add instead of set to not overwrite existing
items in the destination""")
p.add_option("-b", "--bucket-source",
action="store", type="string", default=None,
help="""single bucket from the backup_dir to restore;
if the backup_dir only contains a single bucket,
then that bucket will be automatically used""")
p.add_option("-B", "--bucket-destination",
action="store", type="string", default=None,
help="""when --bucket-source is specified, overrides the
destination bucket name; this allows you to restore
to a different bucket; defaults to the same as the
bucket-source""")
p.add_option("", "--from-date",
action="store", type="string", default=None,
help="""restore data from the date specified as yyyy-mm-dd. By default,
all data from the very beginning will be restored""")
p.add_option("", "--to-date",
action="store", type="string", default=None,
help="""restore data till the date specified as yyyy-mm-dd. By default,
all data that are collected will be restored""")
Transfer.opt_parser_options_common(self, p)
# TODO: (1) cbrestore parameter --create-design-docs=y|n
# TODO: (1) cbrestore parameter -d DATA, --data=DATA
# TODO: (1) cbrestore parameter --validate-only
# TODO: (1) cbrestore parameter -H HOST, --host=HOST
# TODO: (1) cbrestore parameter -p PORT, --port=PORT
# TODO: (1) cbrestore parameter option to override expiration?
def find_handlers(self, opts, source, sink):
return pump_bfd.BFDSource, PumpingStation.find_handler(opts, sink, SINKS)
# --------------------------------------------------
def opt_parse_helper(opts):
logging_level = logging.WARN
if opts.verbose >= 1:
logging_level = logging.INFO
if opts.verbose >= 2:
logging_level = logging.DEBUG
if opts.silent:
logging_level = logging.ERROR
logging.basicConfig(format=pump.LOGGING_FORMAT, level=logging_level)
opts_x = copy.deepcopy(opts)
if opts_x.username:
opts_x.username = "<xxx>"
if opts_x.password:
opts_x.password = "<xxx>"
return opts_x
def opt_parse_extra(extra, extra_defaults):
"""Convert an extra string (comma-separated key=val pairs) into
a dict, using default values from extra_defaults dict."""
extra_in = dict([(x[0], x[1]) for x in
[(kv + '=').split('=') for kv in
(extra or "").split(',')]])
for k, v in extra_in.items():
if k and not extra_defaults.get(k):
sys.exit("error: unknown extra option: " + k)
return dict([(k, float(extra_in.get(k, extra_defaults[k][0])))
for k in extra_defaults.keys()])
def opt_extra_help(parser, extra_defaults):
extra_help = "; ".join([f'{k}={extra_defaults[k][0]} ({extra_defaults[k][1]})'
for k in sorted(extra_defaults.keys())])
group = optparse.OptionGroup(parser, "Available extra config parameters (-x)",
extra_help)
parser.add_option_group(group)
# --------------------------------------------------
SOURCES = [pump_bfd.BFDSource,
pump_csv.CSVSource,
pump_gen.GenSource,
pump_dcp.DCPStreamSource,
pump.StdInSource]
SINKS = [pump_bfd.BFDSink,
pump_mc.MCSink,
pump_cb.CBSink,
pump_csv.CSVSink,
pump.StdOutSink]
try:
import pump_sfd
SOURCES.append(pump_sfd.SFDSource)
SINKS.append(pump_sfd.SFDSink)
except ImportError:
pass
try:
import pump_json
SOURCES.append(pump_json.JSONSource)
except ImportError:
pass
try:
import pump_bfd2
SINKS.insert(0, pump_bfd2.BFDSinkEx)
except ImportError:
pass
# TODO: (1) pump_transfer - use QUIET commands
# TODO: (1) pump_transfer - verify that nth replica got the msg
# TODO: (1) pump_transfer - ability to TAP a non-active or replica vbucket / MB-4583
# TODO: (10) pump_transfer - incremental backup/restore
if __name__ == '__main__':
sys.exit(Transfer().main(sys.argv))
|
# Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs HPC Challenge.
Homepage: http://icl.cs.utk.edu/hpcc/
Most of the configuration of the HPC-Challenge revolves around HPL, the rest of
the HPCC piggybacks upon the HPL configration.
Homepage: http://www.netlib.org/benchmark/hpl/
HPL requires a BLAS library (Basic Linear Algebra Subprograms)
OpenBlas: http://www.openblas.net/
Intel MKL: https://software.intel.com/en-us/mkl
HPL also requires a MPI (Message Passing Interface) Library
OpenMPI: http://www.open-mpi.org/
MPI needs to be configured:
Configuring MPI:
http://techtinkering.com/2009/12/02/setting-up-a-beowulf-cluster-using-open-mpi-on-linux/
Once HPL is built the configuration file must be created:
Configuring HPL.dat:
http://www.advancedclustering.com/faq/how-do-i-tune-my-hpldat-file.html
http://www.netlib.org/benchmark/hpl/faqs.html
"""
import inspect
import logging
import math
import re
from typing import Any, Dict, List, Tuple
from absl import flags
import dataclasses
from perfkitbenchmarker import benchmark_spec as bm_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import linux_virtual_machine as linux_vm
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import hpcc
from perfkitbenchmarker.linux_packages import intel_repo
from perfkitbenchmarker.linux_packages import intelmpi
from perfkitbenchmarker.linux_packages import mkl
from perfkitbenchmarker.linux_packages import numactl
from perfkitbenchmarker.linux_packages import openblas
FLAGS = flags.FLAGS
LOCAL_HPCCINF_FILE = 'hpccinf.j2'
HPCCINF_FILE = 'hpccinf.txt'
MACHINEFILE = 'machinefile'
BLOCK_SIZE = 192
STREAM_METRICS = ['Copy', 'Scale', 'Add', 'Triad']
MKL_TGZ = 'l_mkl_2018.2.199.tgz'
BENCHMARK_DATA = {
# Intel MKL package downloaded from:
# https://software.intel.com/en-us/mkl
# In order to get "l_mkl_2018.2.199.tgz", please choose the product
# "Intel Performance Libraries for Linux*", choose the version
# "2018 Update 2" and choose the download option "Intel
# Math Kernel Library(Intel Mkl)".
MKL_TGZ: 'e28d12173bef9e615b0ded2f95f59a42b3e9ad0afa713a79f8801da2bfb31936',
}
# File for mpirun to run that calls ./hpcc
HPCC_WRAPPER = 'hpcc_wrapper.sh'
BENCHMARK_NAME = 'hpcc'
BENCHMARK_CONFIG = """
hpcc:
description: Runs HPCC. Specify the number of VMs with --num_vms
vm_groups:
default:
vm_spec: *default_single_core
vm_count: null
"""
SECONDS_PER_HOUR = 60 * 60
@dataclasses.dataclass(frozen=True)
class HpccDimensions:
"""Dimensions for the run.
Replaces values in the data/hpccinf.txt file. For more details see
http://www.netlib.org/benchmark/hpl/tuning.html . The value in quotes after
the field name is the corresponding attribute name in the hpccinf.txt file.
Attributes:
problem_size: 'Ns': the problem size.
block_size: 'NBs': number of blocks.
num_rows: 'Ps': number of rows for each grid.
num_columns: 'Qs': number of columns for each grid.
pfacts: 'PFACTs': matrix-vector operation based factorization.
nbmins: 'NBMINs': the number of columns at which to stop factorization.
rfacts: 'RFACTs': type of recursive panel factorization.
bcasts: 'BCASTs': methodology to broadcast the current panel.
depths: 'DEPTHs': look ahread depth.
swap: swapping algorithm to use.
l1: 'L1': whether the upper triangle of the panel of columns should be
stored in transposed form.
u: 'U': whether the panel of rows U should be stored in transposed form.
equilibration: whether to enable the equilibration phase.
"""
problem_size: int
block_size: int
num_rows: int
num_columns: int
pfacts: int
nbmins: int
rfacts: int
bcasts: int
depths: int
swap: int
l1: int
u: int
equilibration: int
# Translating the --hpcc_ flags into numbers in the HPL configuration file
PFACT_RFACT_MAPPING = {'left': 0, 'crout': 1, 'right': 2}
BCAST_MAPPING = {'1rg': 0, '1rM': 1, '2rg': 2, '2rM': 3, 'Lng': 4, 'LnM': 5}
SWAP_MAPPING = {'bin-exch': 0, 'long': 1, 'mix': 2}
L1_U_MAPPING = {True: 0, False: 1}
EQUILIBRATION_MAPPING = {True: 1, False: 0}
flags.DEFINE_integer(
'memory_size_mb', None,
'The amount of memory in MB on each machine to use. By '
'default it will use the entire system\'s memory.')
flags.DEFINE_string(
'hpcc_binary', None,
'The path of prebuilt hpcc binary to use. If not provided, '
'this benchmark built its own using OpenBLAS.')
flags.DEFINE_list(
'hpcc_mpi_env', [], 'Comma separated list containing environment variables '
'to use with mpirun command. e.g. '
'MKL_DEBUG_CPU_TYPE=7,MKL_ENABLE_INSTRUCTIONS=AVX512')
flags.DEFINE_float(
'hpcc_timeout_hours', 4,
'The number of hours to wait for the HPCC binary to '
'complete before timing out and assuming it failed.')
flags.DEFINE_boolean(
'hpcc_numa_binding', False,
'If True, attempt numa binding with membind and cpunodebind.')
# HPL.dat configuration parameters
CONFIG_PROBLEM_SIZE = flags.DEFINE_integer(
'hpcc_problem_size', None,
'Size of problems to solve. Leave as None to run one single problem '
'whose size is based on the amount of memory.')
CONFIG_BLOCK_SIZE = flags.DEFINE_integer(
'hpcc_block_size', None,
'Block size. Left as None to be based on the amount of memory.')
CONFIG_DIMENSIONS = flags.DEFINE_string(
'hpcc_dimensions', None,
'Number of rows and columns in the array: "1,2" is 1 row, 2 columns. '
'Leave as None for computer to select based on number of CPUs.')
CONFIG_PFACTS = flags.DEFINE_enum(
'hpcc_pfacts', 'right', sorted(PFACT_RFACT_MAPPING),
'What type of matrix-vector operation based factorization to use.')
CONFIG_NBMINS = flags.DEFINE_integer(
'hpcc_nbmins', 4,
'The number of columns at which to stop panel factorization.')
CONFIG_RFACTS = flags.DEFINE_enum(
'hpcc_rfacts', 'crout', sorted(PFACT_RFACT_MAPPING),
'The type of recursive panel factorization to use.')
CONFIG_BCASTS = flags.DEFINE_enum(
'hpcc_bcasts', '1rM', sorted(BCAST_MAPPING),
'The broadcast methodology to use on the current panel.')
CONFIG_DEPTHS = flags.DEFINE_integer(
'hpcc_depths', 1, 'Look ahead depth. '
'0: next panel is factorized after current completely finished. '
'1: next panel is immediately factorized after current is updated.')
CONFIG_SWAP = flags.DEFINE_enum('hpcc_swap', 'mix', sorted(SWAP_MAPPING),
'Swapping algorithm to use.')
CONFIG_L1 = flags.DEFINE_boolean(
'hpcc_l1', True, 'Whether to store the upper triangle as transposed.')
CONFIG_U = flags.DEFINE_boolean('hpcc_u', True,
'Whether to store the U column as transposed.')
CONFIG_EQUILIBRATION = flags.DEFINE_boolean(
'hpcc_equilibration', True, 'Whether to enable the equilibration phase.')
def GetConfig(user_config: Dict[Any, Any]) -> Dict[Any, Any]:
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(_) -> None:
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
NotImplementedError: On certain flag combination not currently supported.
"""
data.ResourcePath(LOCAL_HPCCINF_FILE)
if FLAGS['hpcc_binary'].present:
data.ResourcePath(FLAGS.hpcc_binary)
if FLAGS.hpcc_numa_binding and FLAGS.num_vms > 1:
raise errors.Setup.InvalidFlagConfigurationError(
'Numa binding with with multiple hpcc vm not supported.')
if CONFIG_DIMENSIONS.value:
parts = CONFIG_DIMENSIONS.value.split(',')
if len(parts) != 2:
raise errors.Setup.InvalidFlagConfigurationError(
'For --hpcc_dimensions must have two values like "1,2" '
f'not "{CONFIG_DIMENSIONS.value}"')
if not (parts[0].isnumeric() and parts[1].isnumeric()):
raise errors.Setup.InvalidFlagConfigurationError(
'--hpcc_dimensions must be integers like "1,2" not '
f'"{parts[0]},{parts[1]}"')
if hpcc.USE_INTEL_COMPILED_HPL.value:
if FLAGS.hpcc_benchmarks != ['HPL']:
raise errors.Setup.InvalidFlagConfigurationError(
'Intel compiled HPCC can only run linpack (--hpcc_benchmarks=HPL)')
def _CalculateHpccDimensions(num_vms: int, num_cpus: int,
vm_memory_size_actual: int) -> HpccDimensions:
"""Calculates the HPCC dimensions for the run."""
if FLAGS.memory_size_mb:
total_memory = FLAGS.memory_size_mb * 1024 * 1024 * num_vms
else:
total_memory = vm_memory_size_actual * 1024 * num_vms
total_cpus = num_cpus * num_vms
block_size = CONFIG_BLOCK_SIZE.value or BLOCK_SIZE
if CONFIG_PROBLEM_SIZE.value:
problem_size = CONFIG_PROBLEM_SIZE.value
else:
# Finds a problem size that will fit in memory and is a multiple of the
# block size.
base_problem_size = math.sqrt(total_memory * .1)
blocks = int(base_problem_size / block_size)
blocks = blocks if (blocks % 2) == 0 else blocks - 1
problem_size = block_size * blocks
if CONFIG_DIMENSIONS.value:
num_rows, num_columns = [
int(item) for item in CONFIG_DIMENSIONS.value.split(',')
]
else:
# Makes the grid as 'square' as possible, with rows < columns
sqrt_cpus = int(math.sqrt(total_cpus)) + 1
num_rows = 0
num_columns = 0
for i in reversed(list(range(sqrt_cpus))):
if total_cpus % i == 0:
num_rows = i
num_columns = total_cpus // i
break
return HpccDimensions(
problem_size=problem_size,
block_size=block_size,
num_rows=num_rows,
num_columns=num_columns,
pfacts=PFACT_RFACT_MAPPING[CONFIG_PFACTS.value],
nbmins=CONFIG_NBMINS.value,
rfacts=PFACT_RFACT_MAPPING[CONFIG_RFACTS.value],
bcasts=BCAST_MAPPING[CONFIG_BCASTS.value],
depths=CONFIG_DEPTHS.value,
swap=SWAP_MAPPING[CONFIG_SWAP.value],
l1=L1_U_MAPPING[CONFIG_L1.value],
u=L1_U_MAPPING[CONFIG_U.value],
equilibration=EQUILIBRATION_MAPPING[CONFIG_EQUILIBRATION.value])
def CreateHpccinf(vm: linux_vm.BaseLinuxVirtualMachine,
benchmark_spec: bm_spec.BenchmarkSpec) -> HpccDimensions:
"""Creates the HPCC input file."""
dimensions = _CalculateHpccDimensions(
len(benchmark_spec.vms), vm.NumCpusForBenchmark(),
vm.total_free_memory_kb)
vm.RemoteCommand(f'rm -f {HPCCINF_FILE}')
vm.RenderTemplate(
data.ResourcePath(LOCAL_HPCCINF_FILE),
remote_path=HPCCINF_FILE,
context=dataclasses.asdict(dimensions))
return dimensions
def PrepareHpcc(vm: linux_vm.BaseLinuxVirtualMachine) -> None:
"""Builds HPCC on a single vm."""
logging.info('Building HPCC on %s', vm)
vm.Install('hpcc')
if FLAGS.hpcc_numa_binding:
vm.Install('numactl')
def PrepareBinaries(vms: List[linux_vm.BaseLinuxVirtualMachine]) -> None:
"""Prepare binaries on all vms."""
if hpcc.USE_INTEL_COMPILED_HPL.value:
intelmpi.NfsExportIntelDirectory(vms)
vm_util.RunThreaded(lambda vm: vm.Install('numactl'), vms)
return
headnode_vm = vms[0]
if FLAGS.hpcc_binary:
headnode_vm.PushFile(data.ResourcePath(FLAGS.hpcc_binary), './hpcc')
else:
headnode_vm.RemoteCommand(f'cp {hpcc.HPCC_DIR}/hpcc hpcc')
vm_util.RunThreaded(lambda vm: _PrepareBinaries(headnode_vm, vm), vms[1:])
def _PrepareBinaries(headnode_vm: linux_vm.BaseLinuxVirtualMachine,
vm: linux_vm.BaseLinuxVirtualMachine) -> None:
"""Prepares the binaries on the vm."""
vm.Install('fortran')
headnode_vm.MoveFile(vm, 'hpcc', 'hpcc')
headnode_vm.MoveFile(vm, '/usr/bin/orted', 'orted')
vm.RemoteCommand('sudo mv orted /usr/bin/orted')
if FLAGS.hpcc_math_library == hpcc.HPCC_MATH_LIBRARY_MKL:
intel_repo.CopyIntelFiles(headnode_vm, vm)
def Prepare(benchmark_spec: bm_spec.BenchmarkSpec) -> None:
"""Install HPCC on the target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
headnode_vm = vms[0]
PrepareHpcc(headnode_vm)
CreateHpccinf(headnode_vm, benchmark_spec)
hpc_util.CreateMachineFile(vms, remote_path=MACHINEFILE)
headnode_vm.AuthenticateVm()
PrepareBinaries(vms)
def BaseMetadata() -> Dict[str, str]:
"""Update metadata with hpcc-related flag values."""
metadata = {}
metadata['memory_size_mb'] = FLAGS.memory_size_mb
if FLAGS['hpcc_binary'].present:
metadata['override_binary'] = FLAGS.hpcc_binary
if FLAGS['hpcc_mpi_env'].present:
metadata['mpi_env'] = FLAGS.hpcc_mpi_env
metadata['hpcc_math_library'] = FLAGS.hpcc_math_library
metadata['hpcc_version'] = hpcc.HPCC_VERSION
if FLAGS.hpcc_benchmarks:
metadata['hpcc_benchmarks'] = FLAGS.hpcc_benchmarks
if FLAGS.hpcc_math_library == hpcc.HPCC_MATH_LIBRARY_MKL:
metadata['math_library_version'] = mkl.MKL_VERSION.value
elif FLAGS.hpcc_math_library == hpcc.HPCC_MATH_LIBRARY_OPEN_BLAS:
metadata['math_library_version'] = openblas.GIT_TAG
metadata['openmpi_version'] = FLAGS.openmpi_version
if FLAGS.hpcc_numa_binding:
metadata['hpcc_numa_binding'] = FLAGS.hpcc_numa_binding
if hpcc.USE_INTEL_COMPILED_HPL.value:
metadata['hpcc_origin'] = 'intel'
metadata['intel_mpi_version'] = intelmpi.MPI_VERSION.value
else:
metadata['hpcc_origin'] = 'source'
return metadata
def ParseOutput(hpcc_output: str) -> List[sample.Sample]:
"""Parses the output from HPCC.
Args:
hpcc_output: A string containing the text of hpccoutf.txt.
Returns:
A list of samples to be published (in the same format as Run() returns).
"""
results = []
# Parse all metrics from metric=value lines in the HPCC output.
metric_values = regex_util.ExtractAllFloatMetrics(hpcc_output)
# For each benchmark that is run, collect the metrics and metadata for that
# benchmark from the metric_values map.
benchmarks_run = FLAGS.hpcc_benchmarks or hpcc.HPCC_METRIC_MAP
for benchmark in benchmarks_run:
for metric, units in hpcc.HPCC_METRIC_MAP[benchmark].items():
value = metric_values[metric]
# Common metadata for all runs done in Run's call to _AddCommonMetadata
metadata = {
metadata_item: metric_values[metadata_item]
for metadata_item in hpcc.HPCC_METADATA_MAP[benchmark]
}
results.append(sample.Sample(metric, value, units, metadata))
return results
def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]:
"""Run HPCC on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
# recreate the HPL config file with each run in case parameters change
dimensions = CreateHpccinf(benchmark_spec.vms[0], benchmark_spec)
logging.info('HPL.dat dimensions: %s', dimensions)
if hpcc.USE_INTEL_COMPILED_HPL.value:
samples = [RunIntelLinpack(benchmark_spec.vms, dimensions)]
else:
samples = RunHpccSource(benchmark_spec.vms)
_AddCommonMetadata(samples, benchmark_spec, dataclasses.asdict(dimensions))
return samples
def _AddCommonMetadata(samples: List[sample.Sample],
benchmark_spec: bm_spec.BenchmarkSpec,
dimensions: Dict[str, Any]) -> None:
"""Adds metadata common to all samples."""
for item in samples:
item.metadata.update(BaseMetadata())
item.metadata['num_machines'] = len(benchmark_spec.vms)
item.metadata.update(dimensions)
def RunHpccSource(
vms: List[linux_vm.BaseLinuxVirtualMachine]) -> List[sample.Sample]:
"""Returns the parsed output from running the compiled from source HPCC."""
headnode_vm = vms[0]
# backup existing HPCC output, if any
headnode_vm.RemoteCommand(('if [ -f hpccoutf.txt ]; then '
'mv hpccoutf.txt hpccoutf-$(date +%s).txt; '
'fi'))
num_processes = len(vms) * headnode_vm.NumCpusForBenchmark()
run_as_root = '--allow-run-as-root' if FLAGS.mpirun_allow_run_as_root else ''
mpi_flags = (f'-machinefile {MACHINEFILE} --mca orte_rsh_agent '
f'"ssh -o StrictHostKeyChecking=no" {run_as_root} {_MpiEnv()}')
mpi_cmd = 'mpirun '
hpcc_exec = './hpcc'
if FLAGS.hpcc_math_library == hpcc.HPCC_MATH_LIBRARY_MKL:
# Must exec HPCC wrapper script to pickup location of libiomp5.so
vm_util.RunThreaded(_CreateHpccWrapper, vms)
hpcc_exec = f'./{HPCC_WRAPPER}'
if FLAGS.hpcc_numa_binding:
numa_map = numactl.GetNuma(headnode_vm)
numa_hpcc_cmd = []
for node, num_cpus in numa_map.items():
numa_hpcc_cmd.append(f'-np {num_cpus} {mpi_flags} '
f'numactl --cpunodebind {node} '
f'--membind {node} {hpcc_exec}')
mpi_cmd += ' : '.join(numa_hpcc_cmd)
else:
mpi_cmd += f'-np {num_processes} {mpi_flags} {hpcc_exec}'
headnode_vm.RobustRemoteCommand(
mpi_cmd, timeout=int(FLAGS.hpcc_timeout_hours * SECONDS_PER_HOUR))
logging.info('HPCC Results:')
stdout, _ = headnode_vm.RemoteCommand('cat hpccoutf.txt', should_log=True)
if stdout.startswith('HPL ERROR'):
# Annoyingly the mpi_cmd will succeed when there is an HPL error
raise errors.Benchmarks.RunError(f'Error running HPL: {stdout}')
return ParseOutput(stdout)
def _CreateHpccWrapper(vm: linux_vm.BaseLinuxVirtualMachine) -> None:
"""Creates a bash script to run HPCC on the VM.
This is required for when MKL is installed via the Intel repos as the
libiomp5.so file is not in /lib but rather in one found via sourcing the
mklvars.sh file.
Args:
vm: Virtual machine to put file on.
"""
text = ['#!/bin/bash', mkl.SOURCE_MKL_INTEL64_CMD, './hpcc']
vm_util.CreateRemoteFile(vm, '\n'.join(text), HPCC_WRAPPER)
vm.RemoteCommand(f'chmod +x {HPCC_WRAPPER}')
def _MpiEnv(mpi_flag: str = '-x') -> str:
"""Returns the --hpcc_mpi_env flags as a string for the mpirun command."""
return ' '.join([f'{mpi_flag} {v}' for v in FLAGS.hpcc_mpi_env])
def Cleanup(benchmark_spec: bm_spec.BenchmarkSpec) -> None:
"""Cleanup HPCC on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
headnode_vm = vms[0]
headnode_vm.RemoveFile('hpcc*')
headnode_vm.RemoveFile(MACHINEFILE)
for vm in vms[1:]:
vm.RemoveFile('hpcc')
vm.RemoveFile('/usr/bin/orted')
def RunIntelLinpack(vms: List[linux_vm.BaseLinuxVirtualMachine],
dimensions: HpccDimensions) -> sample.Sample:
"""Returns the parsed output from running the Intel compiled HPCC.
Unlike the compiled from source linpack run the Intel compiled linpack can
handle being cut off after --hpcc_timeout_hours as it parses the continuous
output of linpack, reporting the last value found as the HPL_Tflops.
The metadata argument value for "last_fraction_completed" is how much of the
run was completed before being cut off.
Args:
vms: List of VMs to run benchmark on.
dimensions: The HPCC configuration.
Returns: Sample of the HPL_Tflops for the run.
"""
vm = vms[0]
# Compiled from source HPL uses hpccinf.txt, one from Intel uses HPL.dat
vm.RemoteCommand(f'cp {HPCCINF_FILE} HPL.dat')
mpi_cmd, num_processes = _CreateIntelMpiRunCommand(vms, dimensions)
run_cmd_txt, _ = vm.RobustRemoteCommand(
mpi_cmd,
ignore_failure=True,
timeout=int(FLAGS.hpcc_timeout_hours * SECONDS_PER_HOUR))
file_text, _ = vm.RemoteCommand('cat HPL.out', ignore_failure=True)
tflops, metadata = _ParseIntelLinpackStdout(run_cmd_txt)
if file_text:
# HPL ran to completion, use the tflops from the file output
tflops = _ParseIntelLinpackOutputFile(file_text)
metadata['full'] = True
else:
# HPL timed out but have fractional metadata
metadata['full'] = False
metadata.update({
'num_processes': num_processes,
'per_host': vm.numa_node_count,
'mpi_cmd': mpi_cmd,
})
return sample.Sample('HPL_Tflops', tflops, 'Tflops/s', metadata)
def _CreateIntelMpiRunCommand(vms: List[linux_vm.BaseLinuxVirtualMachine],
dimensions: HpccDimensions) -> Tuple[str, int]:
"""Creates the command to run HPL for Intel compiled linpack.
Args:
vms: List of virtual machines to run on.
dimensions: The HpccDimensions for the run
Returns:
Tuple of the mpirun command and the number of processes to be used.
"""
headnode = vms[0]
# Create the file for mpirun to execute
hpl_path = '/opt/intel/mkl/benchmarks/mp_linpack/xhpl_intel64_static'
bash_script = inspect.cleandoc(f'''
#!/bin/bash
export HPL_HOST_NODE=$((PMI_RANK % {headnode.numa_node_count}))
{hpl_path}
''')
run_file = './hpl_run'
for vm in vms:
vm_util.CreateRemoteFile(vm, bash_script + '\n', run_file)
vm.RemoteCommand(f'chmod +x {run_file}')
logging.info('Using precompiled HPL at %s', hpl_path)
num_processes = dimensions.num_rows * dimensions.num_columns
hosts = ','.join([vm.internal_ip for vm in vms])
mpi_cmd = (f'{intelmpi.SourceMpiVarsCommand(headnode)}; '
'mpirun '
f'-perhost {headnode.numa_node_count} {_MpiEnv('-genv')} '
f'-np {num_processes} -host {hosts} {run_file}')
return mpi_cmd, num_processes
def _ParseIntelLinpackOutputFile(file_text: str) -> float:
"""Returns the tflops for the hpcc run.
The last entry that matches
WR11C2R4 50688 192 6 10 551.85 1.57334e+02
is the Gflops for the run: 157.33
Args:
file_text: The hpcc output file contents.
"""
line_re = re.compile(r'\s+'.join([
r'WR\S+', r'\d+', r'\d+', r'\d+', r'\d+', r'\d+\.\d+', r'([\d\.e\+\-]+)'
]))
gflops = None
for line in file_text.splitlines():
match = line_re.match(line)
if match:
gflops = float(match[1])
return gflops / 1000
def _ParseIntelLinpackStdout(stdout: str) -> Tuple[float, Dict[str, float]]:
"""Parse the stdout of Intel HPL returning a condensed sample of results.
Sample stdout:
pkb-123-0 : Column=000576 Fraction=0.005 Kernel= 0.58 Mflops=1265648.19
pkb-123-0 : Column=001152 Fraction=0.010 Kernel=969908.14 Mflops=1081059.81
pkb-123-0 : Column=001728 Fraction=0.015 Kernel=956391.64 Mflops=1040609.60
Return:
1.0406096,
{'fractions': '0.01,0.015',
'kernel_tflops': '0.96990814,0.95639164',
'last_fraction_completed': 0.015,
'tflops': '1.08105981,1.0406096'
}
Args:
stdout: The stdout text from running HPL
Returns:
Tuple of the tflops/s and a dict of the fractional run information.
Raises:
ValueError: If no metrics could be found.
"""
line_re = re.compile(
r"""Column=\s*(?P<column>\d+)
\s*Fraction=\s*(?P<fraction>[\d\.]+)
\s*Kernel=\s*(?P<kernel>[\d\.]+)
\s*Mflops=\s*(?P<mflops>[\d\.]+)""", re.X)
fractions = []
kernel_tflops = []
tflops = []
line_matches = line_re.finditer(stdout)
try:
next(line_matches) # first outputted values are artificially low
except StopIteration:
raise ValueError(
f'Could not find a line in stdout to match {line_re.pattern}: {stdout}')
for line_match in line_matches:
fractions.append(float(line_match['fraction']))
kernel_tflops.append(float(line_match['kernel']) / 1e6)
tflops.append(float(line_match['mflops']) / 1e6)
if not tflops:
raise ValueError('No metrics found in stdout')
# Grab all the I_MPI* environment variables in debug output to put in metadata
intel_env_re = re.compile(r'(.*MPI startup.*?)?\s*'
r'(?P<key>I_MPI[A-Z_\d]+)=(?P<value>.*)\s*')
env_vars = {row['key']: row['value'] for row in intel_env_re.finditer(stdout)}
env_vars.pop('I_MPI_HYDRA_UUID', None)
metadata = {
'fractions': ','.join([str(x) for x in fractions]),
'kernel_tflops': ','.join([str(x) for x in kernel_tflops]),
'tflops': ','.join([str(x) for x in tflops]),
'last_fraction_completed': fractions[-1],
'intel_mpi_env': vm_util.DictionaryToEnvString(env_vars, ';')
}
return tflops[-1], metadata
| # Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs HPC Challenge.
Homepage: http://icl.cs.utk.edu/hpcc/
Most of the configuration of the HPC-Challenge revolves around HPL, the rest of
the HPCC piggybacks upon the HPL configration.
Homepage: http://www.netlib.org/benchmark/hpl/
HPL requires a BLAS library (Basic Linear Algebra Subprograms)
OpenBlas: http://www.openblas.net/
Intel MKL: https://software.intel.com/en-us/mkl
HPL also requires a MPI (Message Passing Interface) Library
OpenMPI: http://www.open-mpi.org/
MPI needs to be configured:
Configuring MPI:
http://techtinkering.com/2009/12/02/setting-up-a-beowulf-cluster-using-open-mpi-on-linux/
Once HPL is built the configuration file must be created:
Configuring HPL.dat:
http://www.advancedclustering.com/faq/how-do-i-tune-my-hpldat-file.html
http://www.netlib.org/benchmark/hpl/faqs.html
"""
import inspect
import logging
import math
import re
from typing import Any, Dict, List, Tuple
from absl import flags
import dataclasses
from perfkitbenchmarker import benchmark_spec as bm_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import linux_virtual_machine as linux_vm
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import hpcc
from perfkitbenchmarker.linux_packages import intel_repo
from perfkitbenchmarker.linux_packages import intelmpi
from perfkitbenchmarker.linux_packages import mkl
from perfkitbenchmarker.linux_packages import numactl
from perfkitbenchmarker.linux_packages import openblas
FLAGS = flags.FLAGS
LOCAL_HPCCINF_FILE = 'hpccinf.j2'
HPCCINF_FILE = 'hpccinf.txt'
MACHINEFILE = 'machinefile'
BLOCK_SIZE = 192
STREAM_METRICS = ['Copy', 'Scale', 'Add', 'Triad']
MKL_TGZ = 'l_mkl_2018.2.199.tgz'
BENCHMARK_DATA = {
# Intel MKL package downloaded from:
# https://software.intel.com/en-us/mkl
# In order to get "l_mkl_2018.2.199.tgz", please choose the product
# "Intel Performance Libraries for Linux*", choose the version
# "2018 Update 2" and choose the download option "Intel
# Math Kernel Library(Intel Mkl)".
MKL_TGZ: 'e28d12173bef9e615b0ded2f95f59a42b3e9ad0afa713a79f8801da2bfb31936',
}
# File for mpirun to run that calls ./hpcc
HPCC_WRAPPER = 'hpcc_wrapper.sh'
BENCHMARK_NAME = 'hpcc'
BENCHMARK_CONFIG = """
hpcc:
description: Runs HPCC. Specify the number of VMs with --num_vms
vm_groups:
default:
vm_spec: *default_single_core
vm_count: null
"""
SECONDS_PER_HOUR = 60 * 60
@dataclasses.dataclass(frozen=True)
class HpccDimensions:
"""Dimensions for the run.
Replaces values in the data/hpccinf.txt file. For more details see
http://www.netlib.org/benchmark/hpl/tuning.html . The value in quotes after
the field name is the corresponding attribute name in the hpccinf.txt file.
Attributes:
problem_size: 'Ns': the problem size.
block_size: 'NBs': number of blocks.
num_rows: 'Ps': number of rows for each grid.
num_columns: 'Qs': number of columns for each grid.
pfacts: 'PFACTs': matrix-vector operation based factorization.
nbmins: 'NBMINs': the number of columns at which to stop factorization.
rfacts: 'RFACTs': type of recursive panel factorization.
bcasts: 'BCASTs': methodology to broadcast the current panel.
depths: 'DEPTHs': look ahread depth.
swap: swapping algorithm to use.
l1: 'L1': whether the upper triangle of the panel of columns should be
stored in transposed form.
u: 'U': whether the panel of rows U should be stored in transposed form.
equilibration: whether to enable the equilibration phase.
"""
problem_size: int
block_size: int
num_rows: int
num_columns: int
pfacts: int
nbmins: int
rfacts: int
bcasts: int
depths: int
swap: int
l1: int
u: int
equilibration: int
# Translating the --hpcc_ flags into numbers in the HPL configuration file
PFACT_RFACT_MAPPING = {'left': 0, 'crout': 1, 'right': 2}
BCAST_MAPPING = {'1rg': 0, '1rM': 1, '2rg': 2, '2rM': 3, 'Lng': 4, 'LnM': 5}
SWAP_MAPPING = {'bin-exch': 0, 'long': 1, 'mix': 2}
L1_U_MAPPING = {True: 0, False: 1}
EQUILIBRATION_MAPPING = {True: 1, False: 0}
flags.DEFINE_integer(
'memory_size_mb', None,
'The amount of memory in MB on each machine to use. By '
'default it will use the entire system\'s memory.')
flags.DEFINE_string(
'hpcc_binary', None,
'The path of prebuilt hpcc binary to use. If not provided, '
'this benchmark built its own using OpenBLAS.')
flags.DEFINE_list(
'hpcc_mpi_env', [], 'Comma separated list containing environment variables '
'to use with mpirun command. e.g. '
'MKL_DEBUG_CPU_TYPE=7,MKL_ENABLE_INSTRUCTIONS=AVX512')
flags.DEFINE_float(
'hpcc_timeout_hours', 4,
'The number of hours to wait for the HPCC binary to '
'complete before timing out and assuming it failed.')
flags.DEFINE_boolean(
'hpcc_numa_binding', False,
'If True, attempt numa binding with membind and cpunodebind.')
# HPL.dat configuration parameters
CONFIG_PROBLEM_SIZE = flags.DEFINE_integer(
'hpcc_problem_size', None,
'Size of problems to solve. Leave as None to run one single problem '
'whose size is based on the amount of memory.')
CONFIG_BLOCK_SIZE = flags.DEFINE_integer(
'hpcc_block_size', None,
'Block size. Left as None to be based on the amount of memory.')
CONFIG_DIMENSIONS = flags.DEFINE_string(
'hpcc_dimensions', None,
'Number of rows and columns in the array: "1,2" is 1 row, 2 columns. '
'Leave as None for computer to select based on number of CPUs.')
CONFIG_PFACTS = flags.DEFINE_enum(
'hpcc_pfacts', 'right', sorted(PFACT_RFACT_MAPPING),
'What type of matrix-vector operation based factorization to use.')
CONFIG_NBMINS = flags.DEFINE_integer(
'hpcc_nbmins', 4,
'The number of columns at which to stop panel factorization.')
CONFIG_RFACTS = flags.DEFINE_enum(
'hpcc_rfacts', 'crout', sorted(PFACT_RFACT_MAPPING),
'The type of recursive panel factorization to use.')
CONFIG_BCASTS = flags.DEFINE_enum(
'hpcc_bcasts', '1rM', sorted(BCAST_MAPPING),
'The broadcast methodology to use on the current panel.')
CONFIG_DEPTHS = flags.DEFINE_integer(
'hpcc_depths', 1, 'Look ahead depth. '
'0: next panel is factorized after current completely finished. '
'1: next panel is immediately factorized after current is updated.')
CONFIG_SWAP = flags.DEFINE_enum('hpcc_swap', 'mix', sorted(SWAP_MAPPING),
'Swapping algorithm to use.')
CONFIG_L1 = flags.DEFINE_boolean(
'hpcc_l1', True, 'Whether to store the upper triangle as transposed.')
CONFIG_U = flags.DEFINE_boolean('hpcc_u', True,
'Whether to store the U column as transposed.')
CONFIG_EQUILIBRATION = flags.DEFINE_boolean(
'hpcc_equilibration', True, 'Whether to enable the equilibration phase.')
def GetConfig(user_config: Dict[Any, Any]) -> Dict[Any, Any]:
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(_) -> None:
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
NotImplementedError: On certain flag combination not currently supported.
"""
data.ResourcePath(LOCAL_HPCCINF_FILE)
if FLAGS['hpcc_binary'].present:
data.ResourcePath(FLAGS.hpcc_binary)
if FLAGS.hpcc_numa_binding and FLAGS.num_vms > 1:
raise errors.Setup.InvalidFlagConfigurationError(
'Numa binding with with multiple hpcc vm not supported.')
if CONFIG_DIMENSIONS.value:
parts = CONFIG_DIMENSIONS.value.split(',')
if len(parts) != 2:
raise errors.Setup.InvalidFlagConfigurationError(
'For --hpcc_dimensions must have two values like "1,2" '
f'not "{CONFIG_DIMENSIONS.value}"')
if not (parts[0].isnumeric() and parts[1].isnumeric()):
raise errors.Setup.InvalidFlagConfigurationError(
'--hpcc_dimensions must be integers like "1,2" not '
f'"{parts[0]},{parts[1]}"')
if hpcc.USE_INTEL_COMPILED_HPL.value:
if FLAGS.hpcc_benchmarks != ['HPL']:
raise errors.Setup.InvalidFlagConfigurationError(
'Intel compiled HPCC can only run linpack (--hpcc_benchmarks=HPL)')
def _CalculateHpccDimensions(num_vms: int, num_cpus: int,
vm_memory_size_actual: int) -> HpccDimensions:
"""Calculates the HPCC dimensions for the run."""
if FLAGS.memory_size_mb:
total_memory = FLAGS.memory_size_mb * 1024 * 1024 * num_vms
else:
total_memory = vm_memory_size_actual * 1024 * num_vms
total_cpus = num_cpus * num_vms
block_size = CONFIG_BLOCK_SIZE.value or BLOCK_SIZE
if CONFIG_PROBLEM_SIZE.value:
problem_size = CONFIG_PROBLEM_SIZE.value
else:
# Finds a problem size that will fit in memory and is a multiple of the
# block size.
base_problem_size = math.sqrt(total_memory * .1)
blocks = int(base_problem_size / block_size)
blocks = blocks if (blocks % 2) == 0 else blocks - 1
problem_size = block_size * blocks
if CONFIG_DIMENSIONS.value:
num_rows, num_columns = [
int(item) for item in CONFIG_DIMENSIONS.value.split(',')
]
else:
# Makes the grid as 'square' as possible, with rows < columns
sqrt_cpus = int(math.sqrt(total_cpus)) + 1
num_rows = 0
num_columns = 0
for i in reversed(list(range(sqrt_cpus))):
if total_cpus % i == 0:
num_rows = i
num_columns = total_cpus // i
break
return HpccDimensions(
problem_size=problem_size,
block_size=block_size,
num_rows=num_rows,
num_columns=num_columns,
pfacts=PFACT_RFACT_MAPPING[CONFIG_PFACTS.value],
nbmins=CONFIG_NBMINS.value,
rfacts=PFACT_RFACT_MAPPING[CONFIG_RFACTS.value],
bcasts=BCAST_MAPPING[CONFIG_BCASTS.value],
depths=CONFIG_DEPTHS.value,
swap=SWAP_MAPPING[CONFIG_SWAP.value],
l1=L1_U_MAPPING[CONFIG_L1.value],
u=L1_U_MAPPING[CONFIG_U.value],
equilibration=EQUILIBRATION_MAPPING[CONFIG_EQUILIBRATION.value])
def CreateHpccinf(vm: linux_vm.BaseLinuxVirtualMachine,
benchmark_spec: bm_spec.BenchmarkSpec) -> HpccDimensions:
"""Creates the HPCC input file."""
dimensions = _CalculateHpccDimensions(
len(benchmark_spec.vms), vm.NumCpusForBenchmark(),
vm.total_free_memory_kb)
vm.RemoteCommand(f'rm -f {HPCCINF_FILE}')
vm.RenderTemplate(
data.ResourcePath(LOCAL_HPCCINF_FILE),
remote_path=HPCCINF_FILE,
context=dataclasses.asdict(dimensions))
return dimensions
def PrepareHpcc(vm: linux_vm.BaseLinuxVirtualMachine) -> None:
"""Builds HPCC on a single vm."""
logging.info('Building HPCC on %s', vm)
vm.Install('hpcc')
if FLAGS.hpcc_numa_binding:
vm.Install('numactl')
def PrepareBinaries(vms: List[linux_vm.BaseLinuxVirtualMachine]) -> None:
"""Prepare binaries on all vms."""
if hpcc.USE_INTEL_COMPILED_HPL.value:
intelmpi.NfsExportIntelDirectory(vms)
vm_util.RunThreaded(lambda vm: vm.Install('numactl'), vms)
return
headnode_vm = vms[0]
if FLAGS.hpcc_binary:
headnode_vm.PushFile(data.ResourcePath(FLAGS.hpcc_binary), './hpcc')
else:
headnode_vm.RemoteCommand(f'cp {hpcc.HPCC_DIR}/hpcc hpcc')
vm_util.RunThreaded(lambda vm: _PrepareBinaries(headnode_vm, vm), vms[1:])
def _PrepareBinaries(headnode_vm: linux_vm.BaseLinuxVirtualMachine,
vm: linux_vm.BaseLinuxVirtualMachine) -> None:
"""Prepares the binaries on the vm."""
vm.Install('fortran')
headnode_vm.MoveFile(vm, 'hpcc', 'hpcc')
headnode_vm.MoveFile(vm, '/usr/bin/orted', 'orted')
vm.RemoteCommand('sudo mv orted /usr/bin/orted')
if FLAGS.hpcc_math_library == hpcc.HPCC_MATH_LIBRARY_MKL:
intel_repo.CopyIntelFiles(headnode_vm, vm)
def Prepare(benchmark_spec: bm_spec.BenchmarkSpec) -> None:
"""Install HPCC on the target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
headnode_vm = vms[0]
PrepareHpcc(headnode_vm)
CreateHpccinf(headnode_vm, benchmark_spec)
hpc_util.CreateMachineFile(vms, remote_path=MACHINEFILE)
headnode_vm.AuthenticateVm()
PrepareBinaries(vms)
def BaseMetadata() -> Dict[str, str]:
"""Update metadata with hpcc-related flag values."""
metadata = {}
metadata['memory_size_mb'] = FLAGS.memory_size_mb
if FLAGS['hpcc_binary'].present:
metadata['override_binary'] = FLAGS.hpcc_binary
if FLAGS['hpcc_mpi_env'].present:
metadata['mpi_env'] = FLAGS.hpcc_mpi_env
metadata['hpcc_math_library'] = FLAGS.hpcc_math_library
metadata['hpcc_version'] = hpcc.HPCC_VERSION
if FLAGS.hpcc_benchmarks:
metadata['hpcc_benchmarks'] = FLAGS.hpcc_benchmarks
if FLAGS.hpcc_math_library == hpcc.HPCC_MATH_LIBRARY_MKL:
metadata['math_library_version'] = mkl.MKL_VERSION.value
elif FLAGS.hpcc_math_library == hpcc.HPCC_MATH_LIBRARY_OPEN_BLAS:
metadata['math_library_version'] = openblas.GIT_TAG
metadata['openmpi_version'] = FLAGS.openmpi_version
if FLAGS.hpcc_numa_binding:
metadata['hpcc_numa_binding'] = FLAGS.hpcc_numa_binding
if hpcc.USE_INTEL_COMPILED_HPL.value:
metadata['hpcc_origin'] = 'intel'
metadata['intel_mpi_version'] = intelmpi.MPI_VERSION.value
else:
metadata['hpcc_origin'] = 'source'
return metadata
def ParseOutput(hpcc_output: str) -> List[sample.Sample]:
"""Parses the output from HPCC.
Args:
hpcc_output: A string containing the text of hpccoutf.txt.
Returns:
A list of samples to be published (in the same format as Run() returns).
"""
results = []
# Parse all metrics from metric=value lines in the HPCC output.
metric_values = regex_util.ExtractAllFloatMetrics(hpcc_output)
# For each benchmark that is run, collect the metrics and metadata for that
# benchmark from the metric_values map.
benchmarks_run = FLAGS.hpcc_benchmarks or hpcc.HPCC_METRIC_MAP
for benchmark in benchmarks_run:
for metric, units in hpcc.HPCC_METRIC_MAP[benchmark].items():
value = metric_values[metric]
# Common metadata for all runs done in Run's call to _AddCommonMetadata
metadata = {
metadata_item: metric_values[metadata_item]
for metadata_item in hpcc.HPCC_METADATA_MAP[benchmark]
}
results.append(sample.Sample(metric, value, units, metadata))
return results
def Run(benchmark_spec: bm_spec.BenchmarkSpec) -> List[sample.Sample]:
"""Run HPCC on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
# recreate the HPL config file with each run in case parameters change
dimensions = CreateHpccinf(benchmark_spec.vms[0], benchmark_spec)
logging.info('HPL.dat dimensions: %s', dimensions)
if hpcc.USE_INTEL_COMPILED_HPL.value:
samples = [RunIntelLinpack(benchmark_spec.vms, dimensions)]
else:
samples = RunHpccSource(benchmark_spec.vms)
_AddCommonMetadata(samples, benchmark_spec, dataclasses.asdict(dimensions))
return samples
def _AddCommonMetadata(samples: List[sample.Sample],
benchmark_spec: bm_spec.BenchmarkSpec,
dimensions: Dict[str, Any]) -> None:
"""Adds metadata common to all samples."""
for item in samples:
item.metadata.update(BaseMetadata())
item.metadata['num_machines'] = len(benchmark_spec.vms)
item.metadata.update(dimensions)
def RunHpccSource(
vms: List[linux_vm.BaseLinuxVirtualMachine]) -> List[sample.Sample]:
"""Returns the parsed output from running the compiled from source HPCC."""
headnode_vm = vms[0]
# backup existing HPCC output, if any
headnode_vm.RemoteCommand(('if [ -f hpccoutf.txt ]; then '
'mv hpccoutf.txt hpccoutf-$(date +%s).txt; '
'fi'))
num_processes = len(vms) * headnode_vm.NumCpusForBenchmark()
run_as_root = '--allow-run-as-root' if FLAGS.mpirun_allow_run_as_root else ''
mpi_flags = (f'-machinefile {MACHINEFILE} --mca orte_rsh_agent '
f'"ssh -o StrictHostKeyChecking=no" {run_as_root} {_MpiEnv()}')
mpi_cmd = 'mpirun '
hpcc_exec = './hpcc'
if FLAGS.hpcc_math_library == hpcc.HPCC_MATH_LIBRARY_MKL:
# Must exec HPCC wrapper script to pickup location of libiomp5.so
vm_util.RunThreaded(_CreateHpccWrapper, vms)
hpcc_exec = f'./{HPCC_WRAPPER}'
if FLAGS.hpcc_numa_binding:
numa_map = numactl.GetNuma(headnode_vm)
numa_hpcc_cmd = []
for node, num_cpus in numa_map.items():
numa_hpcc_cmd.append(f'-np {num_cpus} {mpi_flags} '
f'numactl --cpunodebind {node} '
f'--membind {node} {hpcc_exec}')
mpi_cmd += ' : '.join(numa_hpcc_cmd)
else:
mpi_cmd += f'-np {num_processes} {mpi_flags} {hpcc_exec}'
headnode_vm.RobustRemoteCommand(
mpi_cmd, timeout=int(FLAGS.hpcc_timeout_hours * SECONDS_PER_HOUR))
logging.info('HPCC Results:')
stdout, _ = headnode_vm.RemoteCommand('cat hpccoutf.txt', should_log=True)
if stdout.startswith('HPL ERROR'):
# Annoyingly the mpi_cmd will succeed when there is an HPL error
raise errors.Benchmarks.RunError(f'Error running HPL: {stdout}')
return ParseOutput(stdout)
def _CreateHpccWrapper(vm: linux_vm.BaseLinuxVirtualMachine) -> None:
"""Creates a bash script to run HPCC on the VM.
This is required for when MKL is installed via the Intel repos as the
libiomp5.so file is not in /lib but rather in one found via sourcing the
mklvars.sh file.
Args:
vm: Virtual machine to put file on.
"""
text = ['#!/bin/bash', mkl.SOURCE_MKL_INTEL64_CMD, './hpcc']
vm_util.CreateRemoteFile(vm, '\n'.join(text), HPCC_WRAPPER)
vm.RemoteCommand(f'chmod +x {HPCC_WRAPPER}')
def _MpiEnv(mpi_flag: str = '-x') -> str:
"""Returns the --hpcc_mpi_env flags as a string for the mpirun command."""
return ' '.join([f'{mpi_flag} {v}' for v in FLAGS.hpcc_mpi_env])
def Cleanup(benchmark_spec: bm_spec.BenchmarkSpec) -> None:
"""Cleanup HPCC on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
headnode_vm = vms[0]
headnode_vm.RemoveFile('hpcc*')
headnode_vm.RemoveFile(MACHINEFILE)
for vm in vms[1:]:
vm.RemoveFile('hpcc')
vm.RemoveFile('/usr/bin/orted')
def RunIntelLinpack(vms: List[linux_vm.BaseLinuxVirtualMachine],
dimensions: HpccDimensions) -> sample.Sample:
"""Returns the parsed output from running the Intel compiled HPCC.
Unlike the compiled from source linpack run the Intel compiled linpack can
handle being cut off after --hpcc_timeout_hours as it parses the continuous
output of linpack, reporting the last value found as the HPL_Tflops.
The metadata argument value for "last_fraction_completed" is how much of the
run was completed before being cut off.
Args:
vms: List of VMs to run benchmark on.
dimensions: The HPCC configuration.
Returns: Sample of the HPL_Tflops for the run.
"""
vm = vms[0]
# Compiled from source HPL uses hpccinf.txt, one from Intel uses HPL.dat
vm.RemoteCommand(f'cp {HPCCINF_FILE} HPL.dat')
mpi_cmd, num_processes = _CreateIntelMpiRunCommand(vms, dimensions)
run_cmd_txt, _ = vm.RobustRemoteCommand(
mpi_cmd,
ignore_failure=True,
timeout=int(FLAGS.hpcc_timeout_hours * SECONDS_PER_HOUR))
file_text, _ = vm.RemoteCommand('cat HPL.out', ignore_failure=True)
tflops, metadata = _ParseIntelLinpackStdout(run_cmd_txt)
if file_text:
# HPL ran to completion, use the tflops from the file output
tflops = _ParseIntelLinpackOutputFile(file_text)
metadata['full'] = True
else:
# HPL timed out but have fractional metadata
metadata['full'] = False
metadata.update({
'num_processes': num_processes,
'per_host': vm.numa_node_count,
'mpi_cmd': mpi_cmd,
})
return sample.Sample('HPL_Tflops', tflops, 'Tflops/s', metadata)
def _CreateIntelMpiRunCommand(vms: List[linux_vm.BaseLinuxVirtualMachine],
dimensions: HpccDimensions) -> Tuple[str, int]:
"""Creates the command to run HPL for Intel compiled linpack.
Args:
vms: List of virtual machines to run on.
dimensions: The HpccDimensions for the run
Returns:
Tuple of the mpirun command and the number of processes to be used.
"""
headnode = vms[0]
# Create the file for mpirun to execute
hpl_path = '/opt/intel/mkl/benchmarks/mp_linpack/xhpl_intel64_static'
bash_script = inspect.cleandoc(f'''
#!/bin/bash
export HPL_HOST_NODE=$((PMI_RANK % {headnode.numa_node_count}))
{hpl_path}
''')
run_file = './hpl_run'
for vm in vms:
vm_util.CreateRemoteFile(vm, bash_script + '\n', run_file)
vm.RemoteCommand(f'chmod +x {run_file}')
logging.info('Using precompiled HPL at %s', hpl_path)
num_processes = dimensions.num_rows * dimensions.num_columns
hosts = ','.join([vm.internal_ip for vm in vms])
mpi_cmd = (f'{intelmpi.SourceMpiVarsCommand(headnode)}; '
'mpirun '
f'-perhost {headnode.numa_node_count} {_MpiEnv("-genv")} '
f'-np {num_processes} -host {hosts} {run_file}')
return mpi_cmd, num_processes
def _ParseIntelLinpackOutputFile(file_text: str) -> float:
"""Returns the tflops for the hpcc run.
The last entry that matches
WR11C2R4 50688 192 6 10 551.85 1.57334e+02
is the Gflops for the run: 157.33
Args:
file_text: The hpcc output file contents.
"""
line_re = re.compile(r'\s+'.join([
r'WR\S+', r'\d+', r'\d+', r'\d+', r'\d+', r'\d+\.\d+', r'([\d\.e\+\-]+)'
]))
gflops = None
for line in file_text.splitlines():
match = line_re.match(line)
if match:
gflops = float(match[1])
return gflops / 1000
def _ParseIntelLinpackStdout(stdout: str) -> Tuple[float, Dict[str, float]]:
"""Parse the stdout of Intel HPL returning a condensed sample of results.
Sample stdout:
pkb-123-0 : Column=000576 Fraction=0.005 Kernel= 0.58 Mflops=1265648.19
pkb-123-0 : Column=001152 Fraction=0.010 Kernel=969908.14 Mflops=1081059.81
pkb-123-0 : Column=001728 Fraction=0.015 Kernel=956391.64 Mflops=1040609.60
Return:
1.0406096,
{'fractions': '0.01,0.015',
'kernel_tflops': '0.96990814,0.95639164',
'last_fraction_completed': 0.015,
'tflops': '1.08105981,1.0406096'
}
Args:
stdout: The stdout text from running HPL
Returns:
Tuple of the tflops/s and a dict of the fractional run information.
Raises:
ValueError: If no metrics could be found.
"""
line_re = re.compile(
r"""Column=\s*(?P<column>\d+)
\s*Fraction=\s*(?P<fraction>[\d\.]+)
\s*Kernel=\s*(?P<kernel>[\d\.]+)
\s*Mflops=\s*(?P<mflops>[\d\.]+)""", re.X)
fractions = []
kernel_tflops = []
tflops = []
line_matches = line_re.finditer(stdout)
try:
next(line_matches) # first outputted values are artificially low
except StopIteration:
raise ValueError(
f'Could not find a line in stdout to match {line_re.pattern}: {stdout}')
for line_match in line_matches:
fractions.append(float(line_match['fraction']))
kernel_tflops.append(float(line_match['kernel']) / 1e6)
tflops.append(float(line_match['mflops']) / 1e6)
if not tflops:
raise ValueError('No metrics found in stdout')
# Grab all the I_MPI* environment variables in debug output to put in metadata
intel_env_re = re.compile(r'(.*MPI startup.*?)?\s*'
r'(?P<key>I_MPI[A-Z_\d]+)=(?P<value>.*)\s*')
env_vars = {row['key']: row['value'] for row in intel_env_re.finditer(stdout)}
env_vars.pop('I_MPI_HYDRA_UUID', None)
metadata = {
'fractions': ','.join([str(x) for x in fractions]),
'kernel_tflops': ','.join([str(x) for x in kernel_tflops]),
'tflops': ','.join([str(x) for x in tflops]),
'last_fraction_completed': fractions[-1],
'intel_mpi_env': vm_util.DictionaryToEnvString(env_vars, ';')
}
return tflops[-1], metadata
|
import copy
import logging
import re
from prettytable import PrettyTable
from collections import defaultdict
from subprocess import TimeoutExpired
from ocs_ci.ocs.machine import get_machine_objs
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import TimeoutExpiredError
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs import constants, exceptions, ocp, defaults
from ocs_ci.utility.utils import TimeoutSampler, convert_device_size
from ocs_ci.ocs import machine
from ocs_ci.ocs.resources import pod
from ocs_ci.utility.utils import set_selinux_permissions
from ocs_ci.ocs.resources.pv import (
get_pv_objs_in_sc,
verify_new_pv_available_in_sc,
delete_released_pvs_in_sc,
)
log = logging.getLogger(__name__)
def get_node_objs(node_names=None):
"""
Get node objects by node names
Args:
node_names (list): The node names to get their objects for.
If None, will return all cluster nodes
Returns:
list: Cluster node OCP objects
"""
nodes_obj = OCP(kind="node")
node_dicts = nodes_obj.get()["items"]
if not node_names:
nodes = [OCS(**node_obj) for node_obj in node_dicts]
else:
nodes = [
OCS(**node_obj)
for node_obj in node_dicts
if (node_obj.get("metadata").get("name") in node_names)
]
assert nodes, "Failed to get the nodes OCS objects"
return nodes
def get_nodes(node_type=constants.WORKER_MACHINE, num_of_nodes=None):
"""
Get cluster's nodes according to the node type (e.g. worker, master) and the
number of requested nodes from that type
Args:
node_type (str): The node type (e.g. worker, master)
num_of_nodes (int): The number of nodes to be returned
Returns:
list: The nodes OCP instances
"""
if (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
and node_type == constants.WORKER_MACHINE
):
typed_nodes = [
node
for node in get_node_objs()
if node_type
in node.ocp.get_resource(resource_name=node.name, column="ROLES")
and constants.INFRA_MACHINE
not in node.ocp.get_resource(resource_name=node.name, column="ROLES")
]
else:
typed_nodes = [
node
for node in get_node_objs()
if node_type
in node.ocp.get_resource(resource_name=node.name, column="ROLES")
]
if num_of_nodes:
typed_nodes = typed_nodes[:num_of_nodes]
return typed_nodes
def get_all_nodes():
"""
Gets the all nodes in cluster
Returns:
list: List of node name
"""
ocp_node_obj = ocp.OCP(kind=constants.NODE)
node_items = ocp_node_obj.get().get("items")
return [node["metadata"]["name"] for node in node_items]
def wait_for_nodes_status(node_names=None, status=constants.NODE_READY, timeout=180):
"""
Wait until all nodes are in the given status
Args:
node_names (list): The node names to wait for to reached the desired state
If None, will wait for all cluster nodes
status (str): The node status to wait for
(e.g. 'Ready', 'NotReady', 'SchedulingDisabled')
timeout (int): The number in seconds to wait for the nodes to reach
the status
Raises:
ResourceWrongStatusException: In case one or more nodes haven't
reached the desired state
"""
try:
if not node_names:
for sample in TimeoutSampler(60, 3, get_node_objs):
if sample:
node_names = [node.name for node in sample]
break
nodes_not_in_state = copy.deepcopy(node_names)
log.info(f"Waiting for nodes {node_names} to reach status {status}")
for sample in TimeoutSampler(timeout, 3, get_node_objs, nodes_not_in_state):
for node in sample:
if node.ocp.get_resource_status(node.name) == status:
log.info(f"Node {node.name} reached status {status}")
nodes_not_in_state.remove(node.name)
if not nodes_not_in_state:
break
log.info(f"The following nodes reached status {status}: {node_names}")
except TimeoutExpiredError:
log.error(
f"The following nodes haven't reached status {status}: "
f"{nodes_not_in_state}"
)
raise exceptions.ResourceWrongStatusException(
node_names, [n.describe() for n in get_node_objs(node_names)]
)
def unschedule_nodes(node_names):
"""
Change nodes to be unscheduled
Args:
node_names (list): The names of the nodes
"""
ocp = OCP(kind="node")
node_names_str = " ".join(node_names)
log.info(f"Unscheduling nodes {node_names_str}")
ocp.exec_oc_cmd(f"adm cordon {node_names_str}")
wait_for_nodes_status(node_names, status=constants.NODE_READY_SCHEDULING_DISABLED)
def schedule_nodes(node_names):
"""
Change nodes to be scheduled
Args:
node_names (list): The names of the nodes
"""
ocp = OCP(kind="node")
node_names_str = " ".join(node_names)
ocp.exec_oc_cmd(f"adm uncordon {node_names_str}")
log.info(f"Scheduling nodes {node_names_str}")
wait_for_nodes_status(node_names)
def drain_nodes(node_names):
"""
Drain nodes
Args:
node_names (list): The names of the nodes
Raises:
TimeoutExpired: in case drain command fails to complete in time
"""
ocp = OCP(kind="node")
node_names_str = " ".join(node_names)
log.info(f"Draining nodes {node_names_str}")
try:
ocp.exec_oc_cmd(
f"adm drain {node_names_str} --force=true --ignore-daemonsets "
f"--delete-local-data",
timeout=1800,
)
except TimeoutExpired:
ct_pod = pod.get_ceph_tools_pod()
ceph_status = ct_pod.exec_cmd_on_pod("ceph status", out_yaml_format=False)
log.error(f"Drain command failed to complete. Ceph status: {ceph_status}")
# TODO: Add re-balance status once pull/1679 is merged
raise
def get_typed_worker_nodes(os_id="rhcos"):
"""
Get worker nodes with specific OS
Args:
os_id (str): OS type like rhcos, RHEL etc...
Returns:
list: list of worker nodes instances having specified os
"""
worker_nodes = get_nodes(node_type="worker")
return [
node
for node in worker_nodes
if node.get().get("metadata").get("labels").get("node.openshift.io/os_id")
== os_id
]
def remove_nodes(nodes):
"""
Remove the nodes from cluster
Args:
nodes (list): list of node instances to remove from cluster
"""
ocp = OCP(kind="node")
node_names = [node.get().get("metadata").get("name") for node in nodes]
node_names_str = " ".join(node_names)
# unschedule node
unschedule_nodes(node_names)
# Drain all the pods from the node
drain_nodes(node_names)
# delete the nodes
log.info(f"Deleting nodes {node_names_str}")
ocp.exec_oc_cmd(f"delete nodes {node_names_str}")
def get_node_ips(node_type="worker"):
"""
Gets the node public IP
Args:
node_type (str): The node type (e.g. worker, master)
Returns:
list: Node IP's
"""
ocp = OCP(kind=constants.NODE)
if node_type == "worker":
nodes = ocp.get(selector=constants.WORKER_LABEL).get("items")
if node_type == "master:":
nodes = ocp.get(selector=constants.MASTER_LABEL).get("items")
if config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM:
raise NotImplementedError
elif config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM:
return [
each["address"]
for node in nodes
for each in node["status"]["addresses"]
if each["type"] == "ExternalIP"
]
else:
raise NotImplementedError
def add_new_node_and_label_it(machineset_name, num_nodes=1, mark_for_ocs_label=True):
"""
Add a new node for ipi and label it
Args:
machineset_name (str): Name of the machine set
num_nodes (int): number of nodes to add
mark_for_ocs_label (bool): True if label the new node
eg: add_new_node_and_label_it("new-tdesala-zlqzn-worker-us-east-2a")
Returns:
list: new spun node names
"""
# Get the initial nodes list
initial_nodes = get_worker_nodes()
log.info(f"Current available worker nodes are {initial_nodes}")
# get machineset replica count
machineset_replica_count = machine.get_replica_count(machineset_name)
log.info(f"{machineset_name} has replica count: {machineset_replica_count}")
# Increase its replica count
log.info(f"Increasing the replica count by {num_nodes}")
machine.add_node(machineset_name, count=machineset_replica_count + num_nodes)
log.info(
f"{machineset_name} now has replica "
f"count: {machineset_replica_count + num_nodes}"
)
# wait for the new node to come to ready state
log.info("Waiting for the new node to be in ready state")
machine.wait_for_new_node_to_be_ready(machineset_name)
# Get the node name of new spun node
nodes_after_new_spun_node = get_worker_nodes()
new_spun_nodes = list(set(nodes_after_new_spun_node) - set(initial_nodes))
log.info(f"New spun nodes: {new_spun_nodes}")
# Label it
if mark_for_ocs_label:
node_obj = ocp.OCP(kind="node")
for new_spun_node in new_spun_nodes:
if is_node_labeled(new_spun_node):
logging.info(
f"node {new_spun_node} is already labeled with the OCS storage label"
)
else:
node_obj.add_label(
resource_name=new_spun_node, label=constants.OPERATOR_NODE_LABEL
)
logging.info(
f"Successfully labeled {new_spun_node} with OCS storage label"
)
return new_spun_nodes
def add_new_node_and_label_upi(
node_type, num_nodes, mark_for_ocs_label=True, node_conf=None
):
"""
Add a new node for aws/vmware upi platform and label it
Args:
node_type (str): Type of node, RHEL or RHCOS
num_nodes (int): number of nodes to add
mark_for_ocs_label (bool): True if label the new node
node_conf (dict): The node configurations.
Returns:
list: new spun node names
"""
node_conf = node_conf or {}
initial_nodes = get_worker_nodes()
from ocs_ci.ocs.platform_nodes import PlatformNodesFactory
plt = PlatformNodesFactory()
node_util = plt.get_nodes_platform()
node_util.create_and_attach_nodes_to_cluster(node_conf, node_type, num_nodes)
for sample in TimeoutSampler(timeout=600, sleep=6, func=get_worker_nodes):
if len(sample) == len(initial_nodes) + num_nodes:
break
nodes_after_exp = get_worker_nodes()
wait_for_nodes_status(node_names=get_worker_nodes(), status=constants.NODE_READY)
new_spun_nodes = list(set(nodes_after_exp) - set(initial_nodes))
log.info(f"New spun nodes: {new_spun_nodes}")
if node_type == constants.RHEL_OS:
set_selinux_permissions(workers=new_spun_nodes)
if mark_for_ocs_label:
node_obj = ocp.OCP(kind="node")
for new_spun_node in new_spun_nodes:
node_obj.add_label(
resource_name=new_spun_node, label=constants.OPERATOR_NODE_LABEL
)
logging.info(f"Successfully labeled {new_spun_node} with OCS storage label")
return new_spun_nodes
def get_node_logs(node_name):
"""
Get logs from a given node
pod_name (str): Name of the node
Returns:
str: Output of 'dmesg' run on node
"""
node = OCP(kind="node")
return node.exec_oc_debug_cmd(node_name, ["dmesg"])
def get_node_resource_utilization_from_adm_top(
nodename=None, node_type=constants.WORKER_MACHINE, print_table=False
):
"""
Gets the node's cpu and memory utilization in percentage using adm top command.
Args:
nodename (str) : The node name
node_type (str) : The node type (e.g. master, worker)
Returns:
dict : Node name and its cpu and memory utilization in
percentage
"""
node_names = (
[nodename]
if nodename
else [node.name for node in get_nodes(node_type=node_type)]
)
# Validate node is in Ready state
wait_for_nodes_status(node_names, status=constants.NODE_READY, timeout=30)
obj = ocp.OCP()
resource_utilization_all_nodes = obj.exec_oc_cmd(
command="adm top nodes", out_yaml_format=False
).split("\n")
utilization_dict = {}
for node in node_names:
for value in resource_utilization_all_nodes:
if node in value:
value = re.findall(r"(\d{1,3})%", value.strip())
cpu_utilization = value[0]
log.info(
"The CPU utilized by the node " f"{node} is {cpu_utilization}%"
)
memory_utilization = value[1]
log.info(
"The memory utilized of the node "
f"{node} is {memory_utilization}%"
)
utilization_dict[node] = {
"cpu": int(cpu_utilization),
"memory": int(memory_utilization),
}
if print_table:
print_table_node_resource_utilization(
utilization_dict=utilization_dict,
field_names=["Node Name", "CPU USAGE adm_top", "Memory USAGE adm_top"],
)
return utilization_dict
def get_node_resource_utilization_from_oc_describe(
nodename=None, node_type=constants.WORKER_MACHINE, print_table=False
):
"""
Gets the node's cpu and memory utilization in percentage using oc describe node
Args:
nodename (str) : The node name
node_type (str) : The node type (e.g. master, worker)
Returns:
dict : Node name and its cpu and memory utilization in
percentage
"""
node_names = (
[nodename]
if nodename
else [node.name for node in get_nodes(node_type=node_type)]
)
obj = ocp.OCP()
utilization_dict = {}
for node in node_names:
output = obj.exec_oc_cmd(
command=f"describe node {node}", out_yaml_format=False
).split("\n")
for line in output:
if "cpu " in line:
cpu_data = line.split(" ")
cpu = re.findall(r"\d+", [i for i in cpu_data if i][2])
if "memory " in line:
mem_data = line.split(" ")
mem = re.findall(r"\d+", [i for i in mem_data if i][2])
utilization_dict[node] = {"cpu": int(cpu[0]), "memory": int(mem[0])}
if print_table:
print_table_node_resource_utilization(
utilization_dict=utilization_dict,
field_names=[
"Node Name",
"CPU USAGE oc_describe",
"Memory USAGE oc_describe",
],
)
return utilization_dict
def get_running_pod_count_from_node(nodename=None, node_type=constants.WORKER_MACHINE):
"""
Gets the node running pod count using oc describe node
Args:
nodename (str) : The node name
node_type (str) : The node type (e.g. master, worker)
Returns:
dict : Node name and its pod_count
"""
node_names = (
[nodename]
if nodename
else [node.name for node in get_nodes(node_type=node_type)]
)
obj = ocp.OCP()
pod_count_dict = {}
for node in node_names:
output = obj.exec_oc_cmd(
command=f"describe node {node}", out_yaml_format=False
).split("\n")
for line in output:
if "Non-terminated Pods: " in line:
count_line = line.split(" ")
pod_count = re.findall(r"\d+", [i for i in count_line if i][2])
pod_count_dict[node] = int(pod_count[0])
return pod_count_dict
def print_table_node_resource_utilization(utilization_dict, field_names):
"""
Print table of node utilization
Args:
utilization_dict (dict) : CPU and Memory utilization per Node
field_names (list) : The field names of the table
"""
usage_memory_table = PrettyTable()
usage_memory_table.field_names = field_names
for node, util_node in utilization_dict.items():
usage_memory_table.add_row(
[node, f'{util_node['cpu']}%', f'{util_node['memory']}%']
)
log.info(f"\n{usage_memory_table}\n")
def node_network_failure(node_names, wait=True):
"""
Induce node network failure
Bring node network interface down, making the node unresponsive
Args:
node_names (list): The names of the nodes
wait (bool): True in case wait for status is needed, False otherwise
Returns:
bool: True if node network fail is successful
"""
if not isinstance(node_names, list):
node_names = [node_names]
ocp = OCP(kind="node")
fail_nw_cmd = "ifconfig $(route | grep default | awk '{print $(NF)}') down"
for node_name in node_names:
try:
ocp.exec_oc_debug_cmd(node=node_name, cmd_list=[fail_nw_cmd], timeout=15)
except TimeoutExpired:
pass
if wait:
wait_for_nodes_status(node_names=node_names, status=constants.NODE_NOT_READY)
return True
def get_osd_running_nodes():
"""
Gets the osd running node names
Returns:
list: OSD node names
"""
return [pod.get_pod_node(osd_node).name for osd_node in pod.get_osd_pods()]
def get_osds_per_node():
"""
Gets the osd running pod names per node name
Returns:
dict: {"Node name":["osd running pod name running on the node",..,]}
"""
dic_node_osd = defaultdict(list)
for osd_pod in pod.get_osd_pods():
dic_node_osd[pod.get_pod_node(osd_pod).name].append(osd_pod.name)
return dic_node_osd
def get_app_pod_running_nodes(pod_obj):
"""
Gets the app pod running node names
Args:
pod_obj (list): List of app pod objects
Returns:
list: App pod running node names
"""
return [pod.get_pod_node(obj_pod).name for obj_pod in pod_obj]
def get_both_osd_and_app_pod_running_node(osd_running_nodes, app_pod_running_nodes):
"""
Gets both osd and app pod running node names
Args:
osd_running_nodes(list): List of osd running node names
app_pod_running_nodes(list): List of app pod running node names
Returns:
list: Both OSD and app pod running node names
"""
common_nodes = list(set(osd_running_nodes) & set(app_pod_running_nodes))
log.info(f"Common node is {common_nodes}")
return common_nodes
def get_node_from_machine_name(machine_name):
"""
Get node name from a given machine_name.
Args:
machine_name (str): Name of Machine
Returns:
str: Name of Node (or None if not found)
"""
machine_objs = get_machine_objs()
for machine_obj in machine_objs:
if machine_obj.name == machine_name:
machine_dict = machine_obj.get()
node_name = machine_dict["status"]["nodeRef"]["name"]
return node_name
def get_provider():
"""
Return the OCP Provider (Platform)
Returns:
str: The Provider that the OCP is running on
"""
ocp_cluster = OCP(kind="", resource_name="nodes")
results = ocp_cluster.get("nodes")["items"][0]["spec"]
if "providerID" in results:
return results["providerID"].split(":")[0]
else:
return "BareMetal"
def get_compute_node_names(no_replace=False):
"""
Gets the compute node names
Args:
no_replace (bool): If False '.' will replaced with '-'
Returns:
list: List of compute node names
"""
platform = config.ENV_DATA.get("platform").lower()
compute_node_objs = get_nodes()
if platform in [constants.VSPHERE_PLATFORM, constants.AWS_PLATFORM]:
return [
compute_obj.get()["metadata"]["labels"][constants.HOSTNAME_LABEL]
for compute_obj in compute_node_objs
]
elif platform in [
constants.BAREMETAL_PLATFORM,
constants.BAREMETALPSI_PLATFORM,
constants.IBM_POWER_PLATFORM,
]:
if no_replace:
return [
compute_obj.get()["metadata"]["labels"][constants.HOSTNAME_LABEL]
for compute_obj in compute_node_objs
]
else:
return [
compute_obj.get()["metadata"]["labels"][
constants.HOSTNAME_LABEL
].replace(".", "-")
for compute_obj in compute_node_objs
]
else:
raise NotImplementedError
def get_ocs_nodes(num_of_nodes=None):
"""
Gets the ocs nodes
Args:
num_of_nodes (int): The number of ocs nodes to return. If not specified,
it returns all the ocs nodes.
Returns:
list: List of ocs nodes
"""
ocs_node_names = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
ocs_nodes = get_node_objs(ocs_node_names)
num_of_nodes = num_of_nodes or len(ocs_nodes)
return ocs_nodes[:num_of_nodes]
def get_node_name(node_obj):
"""
Get oc node's name
Args:
node_obj (node_obj): oc node object
Returns:
str: node's name
"""
node_items = node_obj.get("items")
return node_items["metadata"]["name"]
def check_nodes_specs(min_memory, min_cpu):
"""
Check that the cluster worker nodes meet the required minimum CPU and memory
Args:
min_memory (int): The required minimum memory in bytes
min_cpu (int): The required minimum number of vCPUs
Returns:
bool: True if all nodes meet the required minimum specs, False otherwise
"""
nodes = get_nodes()
log.info(
f"Checking following nodes with worker selector (assuming that "
f"this is ran in CI and there are no worker nodes without OCS):\n"
f"{[node.get().get("metadata").get("name") for node in nodes]}"
)
for node in nodes:
real_cpu = int(node.get()["status"]["capacity"]["cpu"])
real_memory = convert_device_size(
node.get()["status"]["capacity"]["memory"], "B"
)
if real_cpu < min_cpu or real_memory < min_memory:
log.warning(
f"Node {node.get().get("metadata").get("name")} specs don't meet "
f" the minimum required specs.\n The requirements are: "
f"{min_cpu} CPUs and {min_memory} Memory\nThe node has: {real_cpu} "
f"CPUs and {real_memory} Memory"
)
return False
log.info(
f"Cluster worker nodes meet the minimum requirements of "
f"{min_cpu} CPUs and {min_memory} Memory"
)
return True
def delete_and_create_osd_node_ipi(osd_node_name):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for any IPI platform.
Args:
osd_node_name (str): the name of the osd node
Returns:
str: The new node name
"""
log.info("Going to unschedule, drain and delete %s node", osd_node_name)
# Unscheduling node
unschedule_nodes([osd_node_name])
# Draining Node
drain_nodes([osd_node_name])
log.info("Getting machine name from specified node name")
machine_name = machine.get_machine_from_node_name(osd_node_name)
log.info(f"Node {osd_node_name} associated machine is {machine_name}")
log.info(f"Deleting machine {machine_name} and waiting for new machine to come up")
machine.delete_machine_and_check_state_of_new_spinned_machine(machine_name)
new_machine_list = machine.get_machines()
for machines in new_machine_list:
# Trimming is done to get just machine name
# eg:- machine_name:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b-nlgkr
# After trimming:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b
if re.match(machines.name[:-6], machine_name):
new_machine_name = machines.name
machineset_name = machine.get_machineset_from_machine_name(new_machine_name)
log.info("Waiting for new worker node to be in ready state")
machine.wait_for_new_node_to_be_ready(machineset_name)
new_node_name = get_node_from_machine_name(new_machine_name)
log.info("Adding ocs label to newly created worker node")
node_obj = ocp.OCP(kind="node")
node_obj.add_label(resource_name=new_node_name, label=constants.OPERATOR_NODE_LABEL)
log.info(f"Successfully labeled {new_node_name} with OCS storage label")
return new_node_name
def delete_and_create_osd_node_aws_upi(osd_node_name):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for AWS UPI.
Args:
osd_node_name (str): the name of the osd node
Returns:
str: The new node name
"""
osd_node = get_node_objs(node_names=[osd_node_name])[0]
az = get_node_az(osd_node)
from ocs_ci.ocs.platform_nodes import AWSNodes
aws_nodes = AWSNodes()
stack_name_of_deleted_node = aws_nodes.get_stack_name_of_node(osd_node_name)
remove_nodes([osd_node])
log.info(f"name of deleted node = {osd_node_name}")
log.info(f"availability zone of deleted node = {az}")
log.info(f"stack name of deleted node = {stack_name_of_deleted_node}")
if config.ENV_DATA.get("rhel_workers"):
node_type = constants.RHEL_OS
else:
node_type = constants.RHCOS
log.info("Preparing to create a new node...")
node_conf = {"stack_name": stack_name_of_deleted_node}
new_node_names = add_new_node_and_label_upi(node_type, 1, node_conf=node_conf)
return new_node_names[0]
def get_node_az(node):
"""
Get the node availability zone
Args:
node (ocs_ci.ocs.resources.ocs.OCS): The node object
Returns:
str: The name of the node availability zone
"""
labels = node.get().get("metadata", {}).get("labels", {})
return labels.get("topology.kubernetes.io/zone")
def delete_and_create_osd_node_vsphere_upi(osd_node_name, use_existing_node=False):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for vSphere UPI.
Args:
osd_node_name (str): the name of the osd node
use_existing_node (bool): If False, create a new node and label it.
If True, use an existing node to replace the deleted node
and label it.
Returns:
str: The new node name
"""
osd_node = get_node_objs(node_names=[osd_node_name])[0]
remove_nodes([osd_node])
log.info(f"name of deleted node = {osd_node_name}")
if config.ENV_DATA.get("rhel_workers"):
node_type = constants.RHEL_OS
else:
node_type = constants.RHCOS
if not use_existing_node:
log.info("Preparing to create a new node...")
new_node_names = add_new_node_and_label_upi(node_type, 1)
new_node_name = new_node_names[0]
else:
node_not_in_ocs = get_worker_nodes_not_in_ocs()[0]
log.info(
f"Preparing to replace the node {osd_node_name} "
f"with an existing node {node_not_in_ocs.name}"
)
if node_type == constants.RHEL_OS:
set_selinux_permissions(workers=[node_not_in_ocs])
label_nodes([node_not_in_ocs])
new_node_name = node_not_in_ocs.name
return new_node_name
def delete_and_create_osd_node_vsphere_upi_lso(osd_node_name, use_existing_node=False):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for vSphere UPI.
Args:
osd_node_name (str): the name of the osd node
use_existing_node (bool): If False, create a new node and label it.
If True, use an existing node to replace the deleted node
and label it.
Returns:
str: The new node name
"""
from ocs_ci.ocs.platform_nodes import PlatformNodesFactory
from ocs_ci.ocs.resources.storage_cluster import get_osd_size
sc_name = constants.LOCAL_BLOCK_RESOURCE
old_pv_objs = get_pv_objs_in_sc(sc_name)
osd_node = get_node_objs(node_names=[osd_node_name])[0]
osd_pod = get_node_pods(osd_node_name, pods_to_search=pod.get_osd_pods())[0]
osd_id = pod.get_osd_pod_id(osd_pod)
log.info(f"osd id to remove = {osd_id}")
# Save the node hostname before deleting the node
osd_node_hostname_label = get_node_hostname_label(osd_node)
log.info("Scale down node deployments...")
scale_down_deployments(osd_node_name)
log.info("Scale down deployments finished successfully")
new_node_name = delete_and_create_osd_node_vsphere_upi(
osd_node_name, use_existing_node
)
assert new_node_name, "Failed to create a new node"
log.info(f"New node created successfully. Node name: {new_node_name}")
# If we use LSO, we need to create and attach a new disk manually
new_node = get_node_objs(node_names=[new_node_name])[0]
plt = PlatformNodesFactory()
node_util = plt.get_nodes_platform()
osd_size = get_osd_size()
log.info(
f"Create a new disk with size {osd_size}, and attach to node {new_node_name}"
)
node_util.create_and_attach_volume(node=new_node, size=osd_size)
new_node_hostname_label = get_node_hostname_label(new_node)
log.info(
"Replace the old node with the new worker node in localVolumeDiscovery and localVolumeSet"
)
res = add_new_node_to_lvd_and_lvs(
old_node_name=osd_node_hostname_label,
new_node_name=new_node_hostname_label,
)
assert res, "Failed to add the new node to LVD and LVS"
log.info("Verify new pv is available...")
is_new_pv_available = verify_new_pv_available_in_sc(old_pv_objs, sc_name)
assert is_new_pv_available, "New pv is not available"
log.info("Finished verifying that the new pv is available")
osd_removal_job = pod.run_osd_removal_job(osd_id)
assert osd_removal_job, "ocs-osd-removal failed to create"
is_completed = (pod.verify_osd_removal_job_completed_successfully(osd_id),)
assert is_completed, "ocs-osd-removal-job is not in status 'completed'"
log.info("ocs-osd-removal-job completed successfully")
expected_num_of_deleted_pvs = 1
num_of_deleted_pvs = delete_released_pvs_in_sc(sc_name)
assert (
num_of_deleted_pvs == expected_num_of_deleted_pvs
), f"num of deleted PVs is {num_of_deleted_pvs} instead of {expected_num_of_deleted_pvs}"
log.info("Successfully deleted old pv")
is_deleted = pod.delete_osd_removal_job(osd_id)
assert is_deleted, "Failed to delete ocs-osd-removal-job"
log.info("ocs-osd-removal-job deleted successfully")
return new_node_name
def label_nodes(nodes, label=constants.OPERATOR_NODE_LABEL):
"""
Label nodes
Args:
nodes (list): list of node objects need to label
label (str): New label to be assigned for these nodes.
Default value is the OCS label
"""
node_obj = ocp.OCP(kind="node")
for new_node_to_label in nodes:
node_obj.add_label(resource_name=new_node_to_label.name, label=label)
logging.info(
f"Successfully labeled {new_node_to_label.name} " f"with OCS storage label"
)
def get_master_nodes():
"""
Fetches all master nodes.
Returns:
list: List of names of master nodes
"""
label = "node-role.kubernetes.io/master"
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get("items")
master_nodes_list = [node.get("metadata").get("name") for node in nodes]
return master_nodes_list
def get_worker_nodes():
"""
Fetches all worker nodes.
Returns:
list: List of names of worker nodes
"""
label = "node-role.kubernetes.io/worker"
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get("items")
# Eliminate infra nodes from worker nodes in case of openshift dedicated
if config.ENV_DATA["platform"].lower() == "openshiftdedicated":
infra_nodes = ocp_node_obj.get(selector=constants.INFRA_NODE_LABEL).get("items")
infra_node_ids = [
infra_node.get("metadata").get("name") for infra_node in infra_nodes
]
nodes = [
node
for node in nodes
if node.get("metadata").get("name") not in infra_node_ids
]
worker_nodes_list = [node.get("metadata").get("name") for node in nodes]
return worker_nodes_list
def get_worker_nodes_not_in_ocs():
"""
Get the worker nodes that are not ocs labeled.
Returns:
list: list of worker node objects that are not ocs labeled
"""
ocs_nodes = get_ocs_nodes()
ocs_node_names = [n.name for n in ocs_nodes]
worker_nodes = get_nodes(constants.WORKER_MACHINE)
return [n for n in worker_nodes if n.name not in ocs_node_names]
def node_replacement_verification_steps_user_side(
old_node_name, new_node_name, new_osd_node_name, old_osd_id
):
"""
Check the verification steps that the user should perform after the process
of node replacement as described in the docs
Args:
old_node_name (str): The name of the old node that has been deleted
new_node_name (str): The name of the new node that has been created
new_osd_node_name (str): The name of the new node that has been added to osd nodes
old_osd_id (str): The old osd id
Returns:
bool: True if all the verification steps passed. False otherwise
"""
ocs_nodes = get_ocs_nodes()
ocs_node_names = [n.name for n in ocs_nodes]
if new_node_name not in ocs_node_names:
log.warning("The new node not found in ocs nodes")
return False
if old_node_name in ocs_node_names:
log.warning("The old node name found in ocs nodes")
return False
csi_cephfsplugin_pods = pod.get_plugin_pods(interface=constants.CEPHFILESYSTEM)
csi_rbdplugin_pods = pod.get_plugin_pods(interface=constants.CEPHBLOCKPOOL)
csi_plugin_pods = csi_cephfsplugin_pods + csi_rbdplugin_pods
if not all([p.status() == constants.STATUS_RUNNING for p in csi_plugin_pods]):
log.warning("Not all csi rbd and cephfs plugin pods in status running")
return False
# It can take some time until all the ocs pods are up and running
# after the process of node replacement
if not pod.wait_for_pods_to_be_running():
log.warning("Not all the pods in running state")
return False
new_osd_pod = get_node_pods(new_osd_node_name, pods_to_search=pod.get_osd_pods())[0]
if not new_osd_pod:
log.warning("Didn't find any osd pods running on the new node")
return False
new_osd_id = pod.get_osd_pod_id(new_osd_pod)
if old_osd_id != new_osd_id:
log.warning(
f"The osd pod, that associated to the new node, has the id {new_osd_id} "
f"instead of the expected osd id {old_osd_id}"
)
return False
log.info("Verification steps from the user side finish successfully")
return True
def node_replacement_verification_steps_ceph_side(
old_node_name, new_node_name, new_osd_node_name
):
"""
Check the verification steps from the Ceph side, after the process
of node replacement as described in the docs
Args:
old_node_name (str): The name of the old node that has been deleted
new_node_name (str): The name of the new node that has been created
new_osd_node_name (str): The name of the new node that has been added to osd nodes
Returns:
bool: True if all the verification steps passed. False otherwise
"""
if old_node_name == new_node_name:
log.warning("Hostname didn't change")
return False
wait_for_nodes_status([new_node_name, new_osd_node_name])
# It can take some time until all the ocs pods are up and running
# after the process of node replacement
if not pod.wait_for_pods_to_be_running():
log.warning("Not all the pods in running state")
return False
ct_pod = pod.get_ceph_tools_pod()
ceph_osd_status = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd status")
if new_osd_node_name not in ceph_osd_status:
log.warning("new osd node name not found in 'ceph osd status' output")
return False
if old_node_name in ceph_osd_status:
log.warning("old node name found in 'ceph osd status' output")
return False
osd_node_names = get_osd_running_nodes()
if new_osd_node_name not in osd_node_names:
log.warning("the new osd hostname not found in osd node names")
return False
if old_node_name in osd_node_names:
log.warning("the old hostname found in osd node names")
return False
from ocs_ci.ocs.cluster import check_ceph_osd_tree_after_node_replacement
if not check_ceph_osd_tree_after_node_replacement():
return False
log.info("Verification steps from the ceph side finish successfully")
return True
def is_node_labeled(node_name, label=constants.OPERATOR_NODE_LABEL):
"""
Check if the node is labeled with a specified label.
Args:
node_name (str): The node name to check if it has the specific label
label (str): The name of the label. Default value is the OCS label.
Returns:
bool: True if the node is labeled with the specified label. False otherwise
"""
node_names_with_label = machine.get_labeled_nodes(label=label)
return node_name in node_names_with_label
def taint_nodes(nodes, taint_label=constants.OCS_TAINT):
"""
Taint nodes
Args:
nodes (list): list of node names need to taint
taint_label (str): New taint label to be assigned for these nodes.
Default value is the OCS taint
"""
ocp_obj = ocp.OCP()
for node in nodes:
command = f"adm taint node {node} {taint_label}"
try:
ocp_obj.exec_oc_cmd(command)
logging.info(f"Successfully tainted {node} with OCS storage taint")
except Exception as e:
logging.info(f"{node} was not tainted - {e}")
def check_taint_on_ocs_nodes(taint=constants.OPERATOR_NODE_TAINT):
"""
Function to check for particular taint on nodes
Args:
taint (str): The taint to check on nodes
Return:
bool: True if taint is present on node. False otherwise
"""
ocs_nodes = get_ocs_nodes()
flag = -1
for node_obj in ocs_nodes:
if node_obj.get().get("spec").get("taints"):
if taint in node_obj.get().get("spec").get("taints")[0].get("key"):
log.info(f"Node {node_obj.name} has taint {taint}")
flag = 1
else:
flag = 0
return bool(flag)
def taint_ocs_nodes(nodes_to_taint=None):
"""
Function to taint nodes with "node.ocs.openshift.io/storage=true:NoSchedule"
Args:
nodes_to_taint (list): Nodes to taint
"""
if not check_taint_on_ocs_nodes():
ocp = OCP()
ocs_nodes = get_ocs_nodes()
nodes_to_taint = nodes_to_taint if nodes_to_taint else ocs_nodes
log.info(f"Taint nodes with taint: " f"{constants.OPERATOR_NODE_TAINT}")
for node in nodes_to_taint:
taint_cmd = f"adm taint nodes {node.name} {constants.OPERATOR_NODE_TAINT}"
ocp.exec_oc_cmd(command=taint_cmd)
else:
log.info(
f"One or more nodes already have taint {constants.OPERATOR_NODE_TAINT} "
)
def untaint_ocs_nodes(taint=constants.OPERATOR_NODE_TAINT, nodes_to_untaint=None):
"""
Function to remove taints from nodes
Args:
taint (str): taint to use
nodes_to_taint (list): list of nodes to untaint
Return:
bool: True if untainted, false otherwise
"""
if check_taint_on_ocs_nodes():
ocp = OCP()
ocs_nodes = get_ocs_nodes()
nodes_to_taint = nodes_to_untaint if nodes_to_untaint else ocs_nodes
for node in nodes_to_taint:
taint_cmd = f"adm taint nodes {node.name} {taint}-"
ocp.exec_oc_cmd(command=taint_cmd)
log.info(f"Untainted {node.name}")
return True
return False
def get_node_pods(node_name, pods_to_search=None):
"""
Get all the pods of a specified node
Args:
node_name (str): The node name to get the pods
pods_to_search (list): list of pods to search for the node pods.
If not specified, will search in all the pods.
Returns:
list: list of all the pods of the specified node
"""
pods_to_search = pods_to_search or pod.get_all_pods()
return [p for p in pods_to_search if pod.get_pod_node(p).name == node_name]
def get_node_pods_to_scale_down(node_name):
"""
Get the pods of a node to scale down as described in the documents
of node replacement with LSO
Args:
node_name (str): The node name
Returns:
list: The node's pods to scale down
"""
pods_to_scale_down = [
*pod.get_mon_pods(),
*pod.get_osd_pods(),
*pod.get_mgr_pods(),
]
return get_node_pods(node_name, pods_to_scale_down)
def scale_down_deployments(node_name):
"""
Scale down the deployments of a node as described in the documents
of node replacement with LSO
Args:
node_name (str): The node name
"""
ocp = OCP(kind="node", namespace=defaults.ROOK_CLUSTER_NAMESPACE)
pods_to_scale_down = get_node_pods_to_scale_down(node_name)
for p in pods_to_scale_down:
deployment_name = pod.get_deployment_name(p.name)
log.info(f"Scale down deploymet {deployment_name}")
ocp.exec_oc_cmd(f"scale deployment {deployment_name} --replicas=0")
log.info("Scale down rook-ceph-crashcollector")
ocp.exec_oc_cmd(
f"scale deployment --selector=app=rook-ceph-crashcollector,"
f"node_name='{node_name}' --replicas=0"
)
def get_node_index_in_local_block(node_name):
"""
Get the node index in the node values as it appears in the local block resource
Args:
node_name (str): The node name to search for his index
Returns:
int: The node index in the nodeSelector values
"""
ocp_lvs_obj = OCP(
kind=constants.LOCAL_VOLUME_SET,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
resource_name=constants.LOCAL_BLOCK_RESOURCE,
)
node_selector = ocp_lvs_obj.get().get("spec").get("nodeSelector")
node_values = (
node_selector.get("nodeSelectorTerms")[0]
.get("matchExpressions")[0]
.get("values")
)
return node_values.index(node_name)
def add_new_node_to_lvd_and_lvs(old_node_name, new_node_name):
"""
Replace the old node with the new node in localVolumeDiscovery and localVolumeSet,
as described in the documents of node replacement with LSO
Args:
old_node_name (str): The old node name to remove from the local volume
new_node_name (str): the new node name to add to the local volume
Returns:
bool: True in case if changes are applied. False otherwise
"""
old_node_index = get_node_index_in_local_block(old_node_name)
path_to_old_node = f"/spec/nodeSelector/nodeSelectorTerms/0/matchExpressions/0/values/{old_node_index}"
params = f"""[{{"op": "replace", "path": "{path_to_old_node}", "value": "{new_node_name}"}}]"""
ocp_lvd_obj = OCP(
kind=constants.LOCAL_VOLUME_DISCOVERY,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
)
ocp_lvs_obj = OCP(
kind=constants.LOCAL_VOLUME_SET,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
resource_name=constants.LOCAL_BLOCK_RESOURCE,
)
lvd_result = ocp_lvd_obj.patch(params=params, format_type="json")
lvs_result = ocp_lvs_obj.patch(params=params, format_type="json")
return lvd_result and lvs_result
def get_node_hostname_label(node_obj):
"""
Get the hostname label of a node
Args:
node_obj (ocs_ci.ocs.resources.ocs.OCS): The node object
Returns:
str: The node's hostname label
"""
return node_obj.get().get("metadata").get("labels").get(constants.HOSTNAME_LABEL)
def wait_for_new_osd_node(old_osd_node_names, timeout=180):
"""
Wait for the new osd node to appear.
Args:
old_osd_node_names (list): List of the old osd node names
timeout (int): time to wait for the new osd node to appear
Returns:
str: The new osd node name if the new osd node appear in the specific timeout.
Else it returns None
"""
try:
for current_osd_node_names in TimeoutSampler(
timeout=timeout, sleep=10, func=get_osd_running_nodes
):
new_osd_node_names = [
node_name
for node_name in current_osd_node_names
if node_name not in old_osd_node_names
]
if new_osd_node_names:
log.info(f"New osd node is {new_osd_node_names[0]}")
return new_osd_node_names[0]
except TimeoutExpiredError:
log.warning(f"New osd node didn't appear after {timeout} seconds")
return None
| import copy
import logging
import re
from prettytable import PrettyTable
from collections import defaultdict
from subprocess import TimeoutExpired
from ocs_ci.ocs.machine import get_machine_objs
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import TimeoutExpiredError
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs import constants, exceptions, ocp, defaults
from ocs_ci.utility.utils import TimeoutSampler, convert_device_size
from ocs_ci.ocs import machine
from ocs_ci.ocs.resources import pod
from ocs_ci.utility.utils import set_selinux_permissions
from ocs_ci.ocs.resources.pv import (
get_pv_objs_in_sc,
verify_new_pv_available_in_sc,
delete_released_pvs_in_sc,
)
log = logging.getLogger(__name__)
def get_node_objs(node_names=None):
"""
Get node objects by node names
Args:
node_names (list): The node names to get their objects for.
If None, will return all cluster nodes
Returns:
list: Cluster node OCP objects
"""
nodes_obj = OCP(kind="node")
node_dicts = nodes_obj.get()["items"]
if not node_names:
nodes = [OCS(**node_obj) for node_obj in node_dicts]
else:
nodes = [
OCS(**node_obj)
for node_obj in node_dicts
if (node_obj.get("metadata").get("name") in node_names)
]
assert nodes, "Failed to get the nodes OCS objects"
return nodes
def get_nodes(node_type=constants.WORKER_MACHINE, num_of_nodes=None):
"""
Get cluster's nodes according to the node type (e.g. worker, master) and the
number of requested nodes from that type
Args:
node_type (str): The node type (e.g. worker, master)
num_of_nodes (int): The number of nodes to be returned
Returns:
list: The nodes OCP instances
"""
if (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
and node_type == constants.WORKER_MACHINE
):
typed_nodes = [
node
for node in get_node_objs()
if node_type
in node.ocp.get_resource(resource_name=node.name, column="ROLES")
and constants.INFRA_MACHINE
not in node.ocp.get_resource(resource_name=node.name, column="ROLES")
]
else:
typed_nodes = [
node
for node in get_node_objs()
if node_type
in node.ocp.get_resource(resource_name=node.name, column="ROLES")
]
if num_of_nodes:
typed_nodes = typed_nodes[:num_of_nodes]
return typed_nodes
def get_all_nodes():
"""
Gets the all nodes in cluster
Returns:
list: List of node name
"""
ocp_node_obj = ocp.OCP(kind=constants.NODE)
node_items = ocp_node_obj.get().get("items")
return [node["metadata"]["name"] for node in node_items]
def wait_for_nodes_status(node_names=None, status=constants.NODE_READY, timeout=180):
"""
Wait until all nodes are in the given status
Args:
node_names (list): The node names to wait for to reached the desired state
If None, will wait for all cluster nodes
status (str): The node status to wait for
(e.g. 'Ready', 'NotReady', 'SchedulingDisabled')
timeout (int): The number in seconds to wait for the nodes to reach
the status
Raises:
ResourceWrongStatusException: In case one or more nodes haven't
reached the desired state
"""
try:
if not node_names:
for sample in TimeoutSampler(60, 3, get_node_objs):
if sample:
node_names = [node.name for node in sample]
break
nodes_not_in_state = copy.deepcopy(node_names)
log.info(f"Waiting for nodes {node_names} to reach status {status}")
for sample in TimeoutSampler(timeout, 3, get_node_objs, nodes_not_in_state):
for node in sample:
if node.ocp.get_resource_status(node.name) == status:
log.info(f"Node {node.name} reached status {status}")
nodes_not_in_state.remove(node.name)
if not nodes_not_in_state:
break
log.info(f"The following nodes reached status {status}: {node_names}")
except TimeoutExpiredError:
log.error(
f"The following nodes haven't reached status {status}: "
f"{nodes_not_in_state}"
)
raise exceptions.ResourceWrongStatusException(
node_names, [n.describe() for n in get_node_objs(node_names)]
)
def unschedule_nodes(node_names):
"""
Change nodes to be unscheduled
Args:
node_names (list): The names of the nodes
"""
ocp = OCP(kind="node")
node_names_str = " ".join(node_names)
log.info(f"Unscheduling nodes {node_names_str}")
ocp.exec_oc_cmd(f"adm cordon {node_names_str}")
wait_for_nodes_status(node_names, status=constants.NODE_READY_SCHEDULING_DISABLED)
def schedule_nodes(node_names):
"""
Change nodes to be scheduled
Args:
node_names (list): The names of the nodes
"""
ocp = OCP(kind="node")
node_names_str = " ".join(node_names)
ocp.exec_oc_cmd(f"adm uncordon {node_names_str}")
log.info(f"Scheduling nodes {node_names_str}")
wait_for_nodes_status(node_names)
def drain_nodes(node_names):
"""
Drain nodes
Args:
node_names (list): The names of the nodes
Raises:
TimeoutExpired: in case drain command fails to complete in time
"""
ocp = OCP(kind="node")
node_names_str = " ".join(node_names)
log.info(f"Draining nodes {node_names_str}")
try:
ocp.exec_oc_cmd(
f"adm drain {node_names_str} --force=true --ignore-daemonsets "
f"--delete-local-data",
timeout=1800,
)
except TimeoutExpired:
ct_pod = pod.get_ceph_tools_pod()
ceph_status = ct_pod.exec_cmd_on_pod("ceph status", out_yaml_format=False)
log.error(f"Drain command failed to complete. Ceph status: {ceph_status}")
# TODO: Add re-balance status once pull/1679 is merged
raise
def get_typed_worker_nodes(os_id="rhcos"):
"""
Get worker nodes with specific OS
Args:
os_id (str): OS type like rhcos, RHEL etc...
Returns:
list: list of worker nodes instances having specified os
"""
worker_nodes = get_nodes(node_type="worker")
return [
node
for node in worker_nodes
if node.get().get("metadata").get("labels").get("node.openshift.io/os_id")
== os_id
]
def remove_nodes(nodes):
"""
Remove the nodes from cluster
Args:
nodes (list): list of node instances to remove from cluster
"""
ocp = OCP(kind="node")
node_names = [node.get().get("metadata").get("name") for node in nodes]
node_names_str = " ".join(node_names)
# unschedule node
unschedule_nodes(node_names)
# Drain all the pods from the node
drain_nodes(node_names)
# delete the nodes
log.info(f"Deleting nodes {node_names_str}")
ocp.exec_oc_cmd(f"delete nodes {node_names_str}")
def get_node_ips(node_type="worker"):
"""
Gets the node public IP
Args:
node_type (str): The node type (e.g. worker, master)
Returns:
list: Node IP's
"""
ocp = OCP(kind=constants.NODE)
if node_type == "worker":
nodes = ocp.get(selector=constants.WORKER_LABEL).get("items")
if node_type == "master:":
nodes = ocp.get(selector=constants.MASTER_LABEL).get("items")
if config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM:
raise NotImplementedError
elif config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM:
return [
each["address"]
for node in nodes
for each in node["status"]["addresses"]
if each["type"] == "ExternalIP"
]
else:
raise NotImplementedError
def add_new_node_and_label_it(machineset_name, num_nodes=1, mark_for_ocs_label=True):
"""
Add a new node for ipi and label it
Args:
machineset_name (str): Name of the machine set
num_nodes (int): number of nodes to add
mark_for_ocs_label (bool): True if label the new node
eg: add_new_node_and_label_it("new-tdesala-zlqzn-worker-us-east-2a")
Returns:
list: new spun node names
"""
# Get the initial nodes list
initial_nodes = get_worker_nodes()
log.info(f"Current available worker nodes are {initial_nodes}")
# get machineset replica count
machineset_replica_count = machine.get_replica_count(machineset_name)
log.info(f"{machineset_name} has replica count: {machineset_replica_count}")
# Increase its replica count
log.info(f"Increasing the replica count by {num_nodes}")
machine.add_node(machineset_name, count=machineset_replica_count + num_nodes)
log.info(
f"{machineset_name} now has replica "
f"count: {machineset_replica_count + num_nodes}"
)
# wait for the new node to come to ready state
log.info("Waiting for the new node to be in ready state")
machine.wait_for_new_node_to_be_ready(machineset_name)
# Get the node name of new spun node
nodes_after_new_spun_node = get_worker_nodes()
new_spun_nodes = list(set(nodes_after_new_spun_node) - set(initial_nodes))
log.info(f"New spun nodes: {new_spun_nodes}")
# Label it
if mark_for_ocs_label:
node_obj = ocp.OCP(kind="node")
for new_spun_node in new_spun_nodes:
if is_node_labeled(new_spun_node):
logging.info(
f"node {new_spun_node} is already labeled with the OCS storage label"
)
else:
node_obj.add_label(
resource_name=new_spun_node, label=constants.OPERATOR_NODE_LABEL
)
logging.info(
f"Successfully labeled {new_spun_node} with OCS storage label"
)
return new_spun_nodes
def add_new_node_and_label_upi(
node_type, num_nodes, mark_for_ocs_label=True, node_conf=None
):
"""
Add a new node for aws/vmware upi platform and label it
Args:
node_type (str): Type of node, RHEL or RHCOS
num_nodes (int): number of nodes to add
mark_for_ocs_label (bool): True if label the new node
node_conf (dict): The node configurations.
Returns:
list: new spun node names
"""
node_conf = node_conf or {}
initial_nodes = get_worker_nodes()
from ocs_ci.ocs.platform_nodes import PlatformNodesFactory
plt = PlatformNodesFactory()
node_util = plt.get_nodes_platform()
node_util.create_and_attach_nodes_to_cluster(node_conf, node_type, num_nodes)
for sample in TimeoutSampler(timeout=600, sleep=6, func=get_worker_nodes):
if len(sample) == len(initial_nodes) + num_nodes:
break
nodes_after_exp = get_worker_nodes()
wait_for_nodes_status(node_names=get_worker_nodes(), status=constants.NODE_READY)
new_spun_nodes = list(set(nodes_after_exp) - set(initial_nodes))
log.info(f"New spun nodes: {new_spun_nodes}")
if node_type == constants.RHEL_OS:
set_selinux_permissions(workers=new_spun_nodes)
if mark_for_ocs_label:
node_obj = ocp.OCP(kind="node")
for new_spun_node in new_spun_nodes:
node_obj.add_label(
resource_name=new_spun_node, label=constants.OPERATOR_NODE_LABEL
)
logging.info(f"Successfully labeled {new_spun_node} with OCS storage label")
return new_spun_nodes
def get_node_logs(node_name):
"""
Get logs from a given node
pod_name (str): Name of the node
Returns:
str: Output of 'dmesg' run on node
"""
node = OCP(kind="node")
return node.exec_oc_debug_cmd(node_name, ["dmesg"])
def get_node_resource_utilization_from_adm_top(
nodename=None, node_type=constants.WORKER_MACHINE, print_table=False
):
"""
Gets the node's cpu and memory utilization in percentage using adm top command.
Args:
nodename (str) : The node name
node_type (str) : The node type (e.g. master, worker)
Returns:
dict : Node name and its cpu and memory utilization in
percentage
"""
node_names = (
[nodename]
if nodename
else [node.name for node in get_nodes(node_type=node_type)]
)
# Validate node is in Ready state
wait_for_nodes_status(node_names, status=constants.NODE_READY, timeout=30)
obj = ocp.OCP()
resource_utilization_all_nodes = obj.exec_oc_cmd(
command="adm top nodes", out_yaml_format=False
).split("\n")
utilization_dict = {}
for node in node_names:
for value in resource_utilization_all_nodes:
if node in value:
value = re.findall(r"(\d{1,3})%", value.strip())
cpu_utilization = value[0]
log.info(
"The CPU utilized by the node " f"{node} is {cpu_utilization}%"
)
memory_utilization = value[1]
log.info(
"The memory utilized of the node "
f"{node} is {memory_utilization}%"
)
utilization_dict[node] = {
"cpu": int(cpu_utilization),
"memory": int(memory_utilization),
}
if print_table:
print_table_node_resource_utilization(
utilization_dict=utilization_dict,
field_names=["Node Name", "CPU USAGE adm_top", "Memory USAGE adm_top"],
)
return utilization_dict
def get_node_resource_utilization_from_oc_describe(
nodename=None, node_type=constants.WORKER_MACHINE, print_table=False
):
"""
Gets the node's cpu and memory utilization in percentage using oc describe node
Args:
nodename (str) : The node name
node_type (str) : The node type (e.g. master, worker)
Returns:
dict : Node name and its cpu and memory utilization in
percentage
"""
node_names = (
[nodename]
if nodename
else [node.name for node in get_nodes(node_type=node_type)]
)
obj = ocp.OCP()
utilization_dict = {}
for node in node_names:
output = obj.exec_oc_cmd(
command=f"describe node {node}", out_yaml_format=False
).split("\n")
for line in output:
if "cpu " in line:
cpu_data = line.split(" ")
cpu = re.findall(r"\d+", [i for i in cpu_data if i][2])
if "memory " in line:
mem_data = line.split(" ")
mem = re.findall(r"\d+", [i for i in mem_data if i][2])
utilization_dict[node] = {"cpu": int(cpu[0]), "memory": int(mem[0])}
if print_table:
print_table_node_resource_utilization(
utilization_dict=utilization_dict,
field_names=[
"Node Name",
"CPU USAGE oc_describe",
"Memory USAGE oc_describe",
],
)
return utilization_dict
def get_running_pod_count_from_node(nodename=None, node_type=constants.WORKER_MACHINE):
"""
Gets the node running pod count using oc describe node
Args:
nodename (str) : The node name
node_type (str) : The node type (e.g. master, worker)
Returns:
dict : Node name and its pod_count
"""
node_names = (
[nodename]
if nodename
else [node.name for node in get_nodes(node_type=node_type)]
)
obj = ocp.OCP()
pod_count_dict = {}
for node in node_names:
output = obj.exec_oc_cmd(
command=f"describe node {node}", out_yaml_format=False
).split("\n")
for line in output:
if "Non-terminated Pods: " in line:
count_line = line.split(" ")
pod_count = re.findall(r"\d+", [i for i in count_line if i][2])
pod_count_dict[node] = int(pod_count[0])
return pod_count_dict
def print_table_node_resource_utilization(utilization_dict, field_names):
"""
Print table of node utilization
Args:
utilization_dict (dict) : CPU and Memory utilization per Node
field_names (list) : The field names of the table
"""
usage_memory_table = PrettyTable()
usage_memory_table.field_names = field_names
for node, util_node in utilization_dict.items():
usage_memory_table.add_row(
[node, f'{util_node["cpu"]}%', f'{util_node["memory"]}%']
)
log.info(f"\n{usage_memory_table}\n")
def node_network_failure(node_names, wait=True):
"""
Induce node network failure
Bring node network interface down, making the node unresponsive
Args:
node_names (list): The names of the nodes
wait (bool): True in case wait for status is needed, False otherwise
Returns:
bool: True if node network fail is successful
"""
if not isinstance(node_names, list):
node_names = [node_names]
ocp = OCP(kind="node")
fail_nw_cmd = "ifconfig $(route | grep default | awk '{print $(NF)}') down"
for node_name in node_names:
try:
ocp.exec_oc_debug_cmd(node=node_name, cmd_list=[fail_nw_cmd], timeout=15)
except TimeoutExpired:
pass
if wait:
wait_for_nodes_status(node_names=node_names, status=constants.NODE_NOT_READY)
return True
def get_osd_running_nodes():
"""
Gets the osd running node names
Returns:
list: OSD node names
"""
return [pod.get_pod_node(osd_node).name for osd_node in pod.get_osd_pods()]
def get_osds_per_node():
"""
Gets the osd running pod names per node name
Returns:
dict: {"Node name":["osd running pod name running on the node",..,]}
"""
dic_node_osd = defaultdict(list)
for osd_pod in pod.get_osd_pods():
dic_node_osd[pod.get_pod_node(osd_pod).name].append(osd_pod.name)
return dic_node_osd
def get_app_pod_running_nodes(pod_obj):
"""
Gets the app pod running node names
Args:
pod_obj (list): List of app pod objects
Returns:
list: App pod running node names
"""
return [pod.get_pod_node(obj_pod).name for obj_pod in pod_obj]
def get_both_osd_and_app_pod_running_node(osd_running_nodes, app_pod_running_nodes):
"""
Gets both osd and app pod running node names
Args:
osd_running_nodes(list): List of osd running node names
app_pod_running_nodes(list): List of app pod running node names
Returns:
list: Both OSD and app pod running node names
"""
common_nodes = list(set(osd_running_nodes) & set(app_pod_running_nodes))
log.info(f"Common node is {common_nodes}")
return common_nodes
def get_node_from_machine_name(machine_name):
"""
Get node name from a given machine_name.
Args:
machine_name (str): Name of Machine
Returns:
str: Name of Node (or None if not found)
"""
machine_objs = get_machine_objs()
for machine_obj in machine_objs:
if machine_obj.name == machine_name:
machine_dict = machine_obj.get()
node_name = machine_dict["status"]["nodeRef"]["name"]
return node_name
def get_provider():
"""
Return the OCP Provider (Platform)
Returns:
str: The Provider that the OCP is running on
"""
ocp_cluster = OCP(kind="", resource_name="nodes")
results = ocp_cluster.get("nodes")["items"][0]["spec"]
if "providerID" in results:
return results["providerID"].split(":")[0]
else:
return "BareMetal"
def get_compute_node_names(no_replace=False):
"""
Gets the compute node names
Args:
no_replace (bool): If False '.' will replaced with '-'
Returns:
list: List of compute node names
"""
platform = config.ENV_DATA.get("platform").lower()
compute_node_objs = get_nodes()
if platform in [constants.VSPHERE_PLATFORM, constants.AWS_PLATFORM]:
return [
compute_obj.get()["metadata"]["labels"][constants.HOSTNAME_LABEL]
for compute_obj in compute_node_objs
]
elif platform in [
constants.BAREMETAL_PLATFORM,
constants.BAREMETALPSI_PLATFORM,
constants.IBM_POWER_PLATFORM,
]:
if no_replace:
return [
compute_obj.get()["metadata"]["labels"][constants.HOSTNAME_LABEL]
for compute_obj in compute_node_objs
]
else:
return [
compute_obj.get()["metadata"]["labels"][
constants.HOSTNAME_LABEL
].replace(".", "-")
for compute_obj in compute_node_objs
]
else:
raise NotImplementedError
def get_ocs_nodes(num_of_nodes=None):
"""
Gets the ocs nodes
Args:
num_of_nodes (int): The number of ocs nodes to return. If not specified,
it returns all the ocs nodes.
Returns:
list: List of ocs nodes
"""
ocs_node_names = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
ocs_nodes = get_node_objs(ocs_node_names)
num_of_nodes = num_of_nodes or len(ocs_nodes)
return ocs_nodes[:num_of_nodes]
def get_node_name(node_obj):
"""
Get oc node's name
Args:
node_obj (node_obj): oc node object
Returns:
str: node's name
"""
node_items = node_obj.get("items")
return node_items["metadata"]["name"]
def check_nodes_specs(min_memory, min_cpu):
"""
Check that the cluster worker nodes meet the required minimum CPU and memory
Args:
min_memory (int): The required minimum memory in bytes
min_cpu (int): The required minimum number of vCPUs
Returns:
bool: True if all nodes meet the required minimum specs, False otherwise
"""
nodes = get_nodes()
log.info(
f"Checking following nodes with worker selector (assuming that "
f"this is ran in CI and there are no worker nodes without OCS):\n"
f"{[node.get().get('metadata').get('name') for node in nodes]}"
)
for node in nodes:
real_cpu = int(node.get()["status"]["capacity"]["cpu"])
real_memory = convert_device_size(
node.get()["status"]["capacity"]["memory"], "B"
)
if real_cpu < min_cpu or real_memory < min_memory:
log.warning(
f"Node {node.get().get('metadata').get('name')} specs don't meet "
f" the minimum required specs.\n The requirements are: "
f"{min_cpu} CPUs and {min_memory} Memory\nThe node has: {real_cpu} "
f"CPUs and {real_memory} Memory"
)
return False
log.info(
f"Cluster worker nodes meet the minimum requirements of "
f"{min_cpu} CPUs and {min_memory} Memory"
)
return True
def delete_and_create_osd_node_ipi(osd_node_name):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for any IPI platform.
Args:
osd_node_name (str): the name of the osd node
Returns:
str: The new node name
"""
log.info("Going to unschedule, drain and delete %s node", osd_node_name)
# Unscheduling node
unschedule_nodes([osd_node_name])
# Draining Node
drain_nodes([osd_node_name])
log.info("Getting machine name from specified node name")
machine_name = machine.get_machine_from_node_name(osd_node_name)
log.info(f"Node {osd_node_name} associated machine is {machine_name}")
log.info(f"Deleting machine {machine_name} and waiting for new machine to come up")
machine.delete_machine_and_check_state_of_new_spinned_machine(machine_name)
new_machine_list = machine.get_machines()
for machines in new_machine_list:
# Trimming is done to get just machine name
# eg:- machine_name:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b-nlgkr
# After trimming:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b
if re.match(machines.name[:-6], machine_name):
new_machine_name = machines.name
machineset_name = machine.get_machineset_from_machine_name(new_machine_name)
log.info("Waiting for new worker node to be in ready state")
machine.wait_for_new_node_to_be_ready(machineset_name)
new_node_name = get_node_from_machine_name(new_machine_name)
log.info("Adding ocs label to newly created worker node")
node_obj = ocp.OCP(kind="node")
node_obj.add_label(resource_name=new_node_name, label=constants.OPERATOR_NODE_LABEL)
log.info(f"Successfully labeled {new_node_name} with OCS storage label")
return new_node_name
def delete_and_create_osd_node_aws_upi(osd_node_name):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for AWS UPI.
Args:
osd_node_name (str): the name of the osd node
Returns:
str: The new node name
"""
osd_node = get_node_objs(node_names=[osd_node_name])[0]
az = get_node_az(osd_node)
from ocs_ci.ocs.platform_nodes import AWSNodes
aws_nodes = AWSNodes()
stack_name_of_deleted_node = aws_nodes.get_stack_name_of_node(osd_node_name)
remove_nodes([osd_node])
log.info(f"name of deleted node = {osd_node_name}")
log.info(f"availability zone of deleted node = {az}")
log.info(f"stack name of deleted node = {stack_name_of_deleted_node}")
if config.ENV_DATA.get("rhel_workers"):
node_type = constants.RHEL_OS
else:
node_type = constants.RHCOS
log.info("Preparing to create a new node...")
node_conf = {"stack_name": stack_name_of_deleted_node}
new_node_names = add_new_node_and_label_upi(node_type, 1, node_conf=node_conf)
return new_node_names[0]
def get_node_az(node):
"""
Get the node availability zone
Args:
node (ocs_ci.ocs.resources.ocs.OCS): The node object
Returns:
str: The name of the node availability zone
"""
labels = node.get().get("metadata", {}).get("labels", {})
return labels.get("topology.kubernetes.io/zone")
def delete_and_create_osd_node_vsphere_upi(osd_node_name, use_existing_node=False):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for vSphere UPI.
Args:
osd_node_name (str): the name of the osd node
use_existing_node (bool): If False, create a new node and label it.
If True, use an existing node to replace the deleted node
and label it.
Returns:
str: The new node name
"""
osd_node = get_node_objs(node_names=[osd_node_name])[0]
remove_nodes([osd_node])
log.info(f"name of deleted node = {osd_node_name}")
if config.ENV_DATA.get("rhel_workers"):
node_type = constants.RHEL_OS
else:
node_type = constants.RHCOS
if not use_existing_node:
log.info("Preparing to create a new node...")
new_node_names = add_new_node_and_label_upi(node_type, 1)
new_node_name = new_node_names[0]
else:
node_not_in_ocs = get_worker_nodes_not_in_ocs()[0]
log.info(
f"Preparing to replace the node {osd_node_name} "
f"with an existing node {node_not_in_ocs.name}"
)
if node_type == constants.RHEL_OS:
set_selinux_permissions(workers=[node_not_in_ocs])
label_nodes([node_not_in_ocs])
new_node_name = node_not_in_ocs.name
return new_node_name
def delete_and_create_osd_node_vsphere_upi_lso(osd_node_name, use_existing_node=False):
"""
Unschedule, drain and delete osd node, and creating a new osd node.
At the end of the function there should be the same number of osd nodes as
it was in the beginning, and also ceph health should be OK.
This function is for vSphere UPI.
Args:
osd_node_name (str): the name of the osd node
use_existing_node (bool): If False, create a new node and label it.
If True, use an existing node to replace the deleted node
and label it.
Returns:
str: The new node name
"""
from ocs_ci.ocs.platform_nodes import PlatformNodesFactory
from ocs_ci.ocs.resources.storage_cluster import get_osd_size
sc_name = constants.LOCAL_BLOCK_RESOURCE
old_pv_objs = get_pv_objs_in_sc(sc_name)
osd_node = get_node_objs(node_names=[osd_node_name])[0]
osd_pod = get_node_pods(osd_node_name, pods_to_search=pod.get_osd_pods())[0]
osd_id = pod.get_osd_pod_id(osd_pod)
log.info(f"osd id to remove = {osd_id}")
# Save the node hostname before deleting the node
osd_node_hostname_label = get_node_hostname_label(osd_node)
log.info("Scale down node deployments...")
scale_down_deployments(osd_node_name)
log.info("Scale down deployments finished successfully")
new_node_name = delete_and_create_osd_node_vsphere_upi(
osd_node_name, use_existing_node
)
assert new_node_name, "Failed to create a new node"
log.info(f"New node created successfully. Node name: {new_node_name}")
# If we use LSO, we need to create and attach a new disk manually
new_node = get_node_objs(node_names=[new_node_name])[0]
plt = PlatformNodesFactory()
node_util = plt.get_nodes_platform()
osd_size = get_osd_size()
log.info(
f"Create a new disk with size {osd_size}, and attach to node {new_node_name}"
)
node_util.create_and_attach_volume(node=new_node, size=osd_size)
new_node_hostname_label = get_node_hostname_label(new_node)
log.info(
"Replace the old node with the new worker node in localVolumeDiscovery and localVolumeSet"
)
res = add_new_node_to_lvd_and_lvs(
old_node_name=osd_node_hostname_label,
new_node_name=new_node_hostname_label,
)
assert res, "Failed to add the new node to LVD and LVS"
log.info("Verify new pv is available...")
is_new_pv_available = verify_new_pv_available_in_sc(old_pv_objs, sc_name)
assert is_new_pv_available, "New pv is not available"
log.info("Finished verifying that the new pv is available")
osd_removal_job = pod.run_osd_removal_job(osd_id)
assert osd_removal_job, "ocs-osd-removal failed to create"
is_completed = (pod.verify_osd_removal_job_completed_successfully(osd_id),)
assert is_completed, "ocs-osd-removal-job is not in status 'completed'"
log.info("ocs-osd-removal-job completed successfully")
expected_num_of_deleted_pvs = 1
num_of_deleted_pvs = delete_released_pvs_in_sc(sc_name)
assert (
num_of_deleted_pvs == expected_num_of_deleted_pvs
), f"num of deleted PVs is {num_of_deleted_pvs} instead of {expected_num_of_deleted_pvs}"
log.info("Successfully deleted old pv")
is_deleted = pod.delete_osd_removal_job(osd_id)
assert is_deleted, "Failed to delete ocs-osd-removal-job"
log.info("ocs-osd-removal-job deleted successfully")
return new_node_name
def label_nodes(nodes, label=constants.OPERATOR_NODE_LABEL):
"""
Label nodes
Args:
nodes (list): list of node objects need to label
label (str): New label to be assigned for these nodes.
Default value is the OCS label
"""
node_obj = ocp.OCP(kind="node")
for new_node_to_label in nodes:
node_obj.add_label(resource_name=new_node_to_label.name, label=label)
logging.info(
f"Successfully labeled {new_node_to_label.name} " f"with OCS storage label"
)
def get_master_nodes():
"""
Fetches all master nodes.
Returns:
list: List of names of master nodes
"""
label = "node-role.kubernetes.io/master"
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get("items")
master_nodes_list = [node.get("metadata").get("name") for node in nodes]
return master_nodes_list
def get_worker_nodes():
"""
Fetches all worker nodes.
Returns:
list: List of names of worker nodes
"""
label = "node-role.kubernetes.io/worker"
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get("items")
# Eliminate infra nodes from worker nodes in case of openshift dedicated
if config.ENV_DATA["platform"].lower() == "openshiftdedicated":
infra_nodes = ocp_node_obj.get(selector=constants.INFRA_NODE_LABEL).get("items")
infra_node_ids = [
infra_node.get("metadata").get("name") for infra_node in infra_nodes
]
nodes = [
node
for node in nodes
if node.get("metadata").get("name") not in infra_node_ids
]
worker_nodes_list = [node.get("metadata").get("name") for node in nodes]
return worker_nodes_list
def get_worker_nodes_not_in_ocs():
"""
Get the worker nodes that are not ocs labeled.
Returns:
list: list of worker node objects that are not ocs labeled
"""
ocs_nodes = get_ocs_nodes()
ocs_node_names = [n.name for n in ocs_nodes]
worker_nodes = get_nodes(constants.WORKER_MACHINE)
return [n for n in worker_nodes if n.name not in ocs_node_names]
def node_replacement_verification_steps_user_side(
old_node_name, new_node_name, new_osd_node_name, old_osd_id
):
"""
Check the verification steps that the user should perform after the process
of node replacement as described in the docs
Args:
old_node_name (str): The name of the old node that has been deleted
new_node_name (str): The name of the new node that has been created
new_osd_node_name (str): The name of the new node that has been added to osd nodes
old_osd_id (str): The old osd id
Returns:
bool: True if all the verification steps passed. False otherwise
"""
ocs_nodes = get_ocs_nodes()
ocs_node_names = [n.name for n in ocs_nodes]
if new_node_name not in ocs_node_names:
log.warning("The new node not found in ocs nodes")
return False
if old_node_name in ocs_node_names:
log.warning("The old node name found in ocs nodes")
return False
csi_cephfsplugin_pods = pod.get_plugin_pods(interface=constants.CEPHFILESYSTEM)
csi_rbdplugin_pods = pod.get_plugin_pods(interface=constants.CEPHBLOCKPOOL)
csi_plugin_pods = csi_cephfsplugin_pods + csi_rbdplugin_pods
if not all([p.status() == constants.STATUS_RUNNING for p in csi_plugin_pods]):
log.warning("Not all csi rbd and cephfs plugin pods in status running")
return False
# It can take some time until all the ocs pods are up and running
# after the process of node replacement
if not pod.wait_for_pods_to_be_running():
log.warning("Not all the pods in running state")
return False
new_osd_pod = get_node_pods(new_osd_node_name, pods_to_search=pod.get_osd_pods())[0]
if not new_osd_pod:
log.warning("Didn't find any osd pods running on the new node")
return False
new_osd_id = pod.get_osd_pod_id(new_osd_pod)
if old_osd_id != new_osd_id:
log.warning(
f"The osd pod, that associated to the new node, has the id {new_osd_id} "
f"instead of the expected osd id {old_osd_id}"
)
return False
log.info("Verification steps from the user side finish successfully")
return True
def node_replacement_verification_steps_ceph_side(
old_node_name, new_node_name, new_osd_node_name
):
"""
Check the verification steps from the Ceph side, after the process
of node replacement as described in the docs
Args:
old_node_name (str): The name of the old node that has been deleted
new_node_name (str): The name of the new node that has been created
new_osd_node_name (str): The name of the new node that has been added to osd nodes
Returns:
bool: True if all the verification steps passed. False otherwise
"""
if old_node_name == new_node_name:
log.warning("Hostname didn't change")
return False
wait_for_nodes_status([new_node_name, new_osd_node_name])
# It can take some time until all the ocs pods are up and running
# after the process of node replacement
if not pod.wait_for_pods_to_be_running():
log.warning("Not all the pods in running state")
return False
ct_pod = pod.get_ceph_tools_pod()
ceph_osd_status = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd status")
if new_osd_node_name not in ceph_osd_status:
log.warning("new osd node name not found in 'ceph osd status' output")
return False
if old_node_name in ceph_osd_status:
log.warning("old node name found in 'ceph osd status' output")
return False
osd_node_names = get_osd_running_nodes()
if new_osd_node_name not in osd_node_names:
log.warning("the new osd hostname not found in osd node names")
return False
if old_node_name in osd_node_names:
log.warning("the old hostname found in osd node names")
return False
from ocs_ci.ocs.cluster import check_ceph_osd_tree_after_node_replacement
if not check_ceph_osd_tree_after_node_replacement():
return False
log.info("Verification steps from the ceph side finish successfully")
return True
def is_node_labeled(node_name, label=constants.OPERATOR_NODE_LABEL):
"""
Check if the node is labeled with a specified label.
Args:
node_name (str): The node name to check if it has the specific label
label (str): The name of the label. Default value is the OCS label.
Returns:
bool: True if the node is labeled with the specified label. False otherwise
"""
node_names_with_label = machine.get_labeled_nodes(label=label)
return node_name in node_names_with_label
def taint_nodes(nodes, taint_label=constants.OCS_TAINT):
"""
Taint nodes
Args:
nodes (list): list of node names need to taint
taint_label (str): New taint label to be assigned for these nodes.
Default value is the OCS taint
"""
ocp_obj = ocp.OCP()
for node in nodes:
command = f"adm taint node {node} {taint_label}"
try:
ocp_obj.exec_oc_cmd(command)
logging.info(f"Successfully tainted {node} with OCS storage taint")
except Exception as e:
logging.info(f"{node} was not tainted - {e}")
def check_taint_on_ocs_nodes(taint=constants.OPERATOR_NODE_TAINT):
"""
Function to check for particular taint on nodes
Args:
taint (str): The taint to check on nodes
Return:
bool: True if taint is present on node. False otherwise
"""
ocs_nodes = get_ocs_nodes()
flag = -1
for node_obj in ocs_nodes:
if node_obj.get().get("spec").get("taints"):
if taint in node_obj.get().get("spec").get("taints")[0].get("key"):
log.info(f"Node {node_obj.name} has taint {taint}")
flag = 1
else:
flag = 0
return bool(flag)
def taint_ocs_nodes(nodes_to_taint=None):
"""
Function to taint nodes with "node.ocs.openshift.io/storage=true:NoSchedule"
Args:
nodes_to_taint (list): Nodes to taint
"""
if not check_taint_on_ocs_nodes():
ocp = OCP()
ocs_nodes = get_ocs_nodes()
nodes_to_taint = nodes_to_taint if nodes_to_taint else ocs_nodes
log.info(f"Taint nodes with taint: " f"{constants.OPERATOR_NODE_TAINT}")
for node in nodes_to_taint:
taint_cmd = f"adm taint nodes {node.name} {constants.OPERATOR_NODE_TAINT}"
ocp.exec_oc_cmd(command=taint_cmd)
else:
log.info(
f"One or more nodes already have taint {constants.OPERATOR_NODE_TAINT} "
)
def untaint_ocs_nodes(taint=constants.OPERATOR_NODE_TAINT, nodes_to_untaint=None):
"""
Function to remove taints from nodes
Args:
taint (str): taint to use
nodes_to_taint (list): list of nodes to untaint
Return:
bool: True if untainted, false otherwise
"""
if check_taint_on_ocs_nodes():
ocp = OCP()
ocs_nodes = get_ocs_nodes()
nodes_to_taint = nodes_to_untaint if nodes_to_untaint else ocs_nodes
for node in nodes_to_taint:
taint_cmd = f"adm taint nodes {node.name} {taint}-"
ocp.exec_oc_cmd(command=taint_cmd)
log.info(f"Untainted {node.name}")
return True
return False
def get_node_pods(node_name, pods_to_search=None):
"""
Get all the pods of a specified node
Args:
node_name (str): The node name to get the pods
pods_to_search (list): list of pods to search for the node pods.
If not specified, will search in all the pods.
Returns:
list: list of all the pods of the specified node
"""
pods_to_search = pods_to_search or pod.get_all_pods()
return [p for p in pods_to_search if pod.get_pod_node(p).name == node_name]
def get_node_pods_to_scale_down(node_name):
"""
Get the pods of a node to scale down as described in the documents
of node replacement with LSO
Args:
node_name (str): The node name
Returns:
list: The node's pods to scale down
"""
pods_to_scale_down = [
*pod.get_mon_pods(),
*pod.get_osd_pods(),
*pod.get_mgr_pods(),
]
return get_node_pods(node_name, pods_to_scale_down)
def scale_down_deployments(node_name):
"""
Scale down the deployments of a node as described in the documents
of node replacement with LSO
Args:
node_name (str): The node name
"""
ocp = OCP(kind="node", namespace=defaults.ROOK_CLUSTER_NAMESPACE)
pods_to_scale_down = get_node_pods_to_scale_down(node_name)
for p in pods_to_scale_down:
deployment_name = pod.get_deployment_name(p.name)
log.info(f"Scale down deploymet {deployment_name}")
ocp.exec_oc_cmd(f"scale deployment {deployment_name} --replicas=0")
log.info("Scale down rook-ceph-crashcollector")
ocp.exec_oc_cmd(
f"scale deployment --selector=app=rook-ceph-crashcollector,"
f"node_name='{node_name}' --replicas=0"
)
def get_node_index_in_local_block(node_name):
"""
Get the node index in the node values as it appears in the local block resource
Args:
node_name (str): The node name to search for his index
Returns:
int: The node index in the nodeSelector values
"""
ocp_lvs_obj = OCP(
kind=constants.LOCAL_VOLUME_SET,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
resource_name=constants.LOCAL_BLOCK_RESOURCE,
)
node_selector = ocp_lvs_obj.get().get("spec").get("nodeSelector")
node_values = (
node_selector.get("nodeSelectorTerms")[0]
.get("matchExpressions")[0]
.get("values")
)
return node_values.index(node_name)
def add_new_node_to_lvd_and_lvs(old_node_name, new_node_name):
"""
Replace the old node with the new node in localVolumeDiscovery and localVolumeSet,
as described in the documents of node replacement with LSO
Args:
old_node_name (str): The old node name to remove from the local volume
new_node_name (str): the new node name to add to the local volume
Returns:
bool: True in case if changes are applied. False otherwise
"""
old_node_index = get_node_index_in_local_block(old_node_name)
path_to_old_node = f"/spec/nodeSelector/nodeSelectorTerms/0/matchExpressions/0/values/{old_node_index}"
params = f"""[{{"op": "replace", "path": "{path_to_old_node}", "value": "{new_node_name}"}}]"""
ocp_lvd_obj = OCP(
kind=constants.LOCAL_VOLUME_DISCOVERY,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
)
ocp_lvs_obj = OCP(
kind=constants.LOCAL_VOLUME_SET,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
resource_name=constants.LOCAL_BLOCK_RESOURCE,
)
lvd_result = ocp_lvd_obj.patch(params=params, format_type="json")
lvs_result = ocp_lvs_obj.patch(params=params, format_type="json")
return lvd_result and lvs_result
def get_node_hostname_label(node_obj):
"""
Get the hostname label of a node
Args:
node_obj (ocs_ci.ocs.resources.ocs.OCS): The node object
Returns:
str: The node's hostname label
"""
return node_obj.get().get("metadata").get("labels").get(constants.HOSTNAME_LABEL)
def wait_for_new_osd_node(old_osd_node_names, timeout=180):
"""
Wait for the new osd node to appear.
Args:
old_osd_node_names (list): List of the old osd node names
timeout (int): time to wait for the new osd node to appear
Returns:
str: The new osd node name if the new osd node appear in the specific timeout.
Else it returns None
"""
try:
for current_osd_node_names in TimeoutSampler(
timeout=timeout, sleep=10, func=get_osd_running_nodes
):
new_osd_node_names = [
node_name
for node_name in current_osd_node_names
if node_name not in old_osd_node_names
]
if new_osd_node_names:
log.info(f"New osd node is {new_osd_node_names[0]}")
return new_osd_node_names[0]
except TimeoutExpiredError:
log.warning(f"New osd node didn't appear after {timeout} seconds")
return None
|
#!/usr/bin/env python3
import sys
import yaml
import ipaddress
import urllib.parse
import subprocess
import socket
import requests
import re
import logging
import random
import string
import hashlib
import os
def checksum(filename, hashfunc):
with open(filename,"rb") as f:
for byte_block in iter(lambda: f.read(4096),b""):
hashfunc.update(byte_block)
return hashfunc.hexdigest()
def main(args):
retcode = 0
FAIL = "\033[1;91m" + "FAIL" + "\033[0m"
PASS = "\033[1;92m" + "PASS" + "\033[0m"
SSLVerify = False
logging.captureWarnings(True)
with open('config.yml') as file:
config = yaml.load(file, Loader=yaml.Loader)
for i in config['hosts']:
try:
addr = i['address']
try:
addr = str(ipaddress.ip_address(addr))
except ValueError:
url = urllib.parse.urlparse(addr,scheme='http')
if url.netloc=='' and url.path != '':
url = urllib.parse.urlparse(f'{url.scheme}://{url.path}')
addr = url.hostname
except KeyError:
continue
if i['prot'] == 'icmp':
print(f'ping {addr:30}: ', end='', flush=True)
cp = subprocess.run(['ping','-c1','-w2',addr],stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
print(f'{FAIL if cp.returncode else PASS}')
retcode = retcode + cp.returncode
elif i['prot'] == "tcp":
print(f'tcp/{i['tcpport']:<6} {addr:30}: ', end='', flush=True)
s = None
for res in socket.getaddrinfo(addr, i['tcpport'], socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
s.settimeout(5)
except socket.error:
s = None
continue
try:
s.connect(sa)
except socket.error:
s.close()
s = None
continue
break
print(f'{PASS if s else FAIL}')
retcode = retcode + (0 if s else 1)
if s: s.close()
elif i['prot'] == 'httpstatus':
print(f'httpstatus {url.geturl():30}: ', end='', flush=True)
r = requests.get(url.geturl(), verify=SSLVerify)
print(f'{PASS if r.status_code==i['httpstatus'] else FAIL}')
retcode = retcode + (0 if r.status_code==i["httpstatus"] else 1)
elif i['prot'] == 'httpstring':
print(f'httpstring {url.geturl():30}: ', end='', flush=True)
r = requests.get(url.geturl(), verify=SSLVerify)
print(f'{PASS if re.search(i['httpstring'],r.text) else FAIL}')
retcode = retcode + (0 if re.search(i["httpstring"],r.text) else 1)
elif i['prot'] == 'icap':
print(f'icap {addr:30}: ', end='', flush=True)
suffix = ''.join(random.choices(string.ascii_uppercase + string.digits, k=4))
cp = subprocess.run(['c-icap-client','-i',addr,'-s',i["icapservice"],'-f',i["icaptestfile"],'-o',i["icaptestfile"]+suffix],stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
if cp.returncode == 0:
if os.path.isfile(i['icaptestfile']+suffix):
c2 = checksum(i['icaptestfile']+suffix,hashlib.md5())
os.remove(i['icaptestfile']+suffix)
if checksum(i['icaptestfile'],hashlib.md5()) != c2:
print(PASS)
continue
print(FAIL)
retcode += 1
continue
return retcode
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| #!/usr/bin/env python3
import sys
import yaml
import ipaddress
import urllib.parse
import subprocess
import socket
import requests
import re
import logging
import random
import string
import hashlib
import os
def checksum(filename, hashfunc):
with open(filename,"rb") as f:
for byte_block in iter(lambda: f.read(4096),b""):
hashfunc.update(byte_block)
return hashfunc.hexdigest()
def main(args):
retcode = 0
FAIL = "\033[1;91m" + "FAIL" + "\033[0m"
PASS = "\033[1;92m" + "PASS" + "\033[0m"
SSLVerify = False
logging.captureWarnings(True)
with open('config.yml') as file:
config = yaml.load(file, Loader=yaml.Loader)
for i in config['hosts']:
try:
addr = i['address']
try:
addr = str(ipaddress.ip_address(addr))
except ValueError:
url = urllib.parse.urlparse(addr,scheme='http')
if url.netloc=='' and url.path != '':
url = urllib.parse.urlparse(f'{url.scheme}://{url.path}')
addr = url.hostname
except KeyError:
continue
if i['prot'] == 'icmp':
print(f'ping {addr:30}: ', end='', flush=True)
cp = subprocess.run(['ping','-c1','-w2',addr],stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
print(f'{FAIL if cp.returncode else PASS}')
retcode = retcode + cp.returncode
elif i['prot'] == "tcp":
print(f'tcp/{i["tcpport"]:<6} {addr:30}: ', end='', flush=True)
s = None
for res in socket.getaddrinfo(addr, i['tcpport'], socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
s.settimeout(5)
except socket.error:
s = None
continue
try:
s.connect(sa)
except socket.error:
s.close()
s = None
continue
break
print(f'{PASS if s else FAIL}')
retcode = retcode + (0 if s else 1)
if s: s.close()
elif i['prot'] == 'httpstatus':
print(f'httpstatus {url.geturl():30}: ', end='', flush=True)
r = requests.get(url.geturl(), verify=SSLVerify)
print(f'{PASS if r.status_code==i["httpstatus"] else FAIL}')
retcode = retcode + (0 if r.status_code==i["httpstatus"] else 1)
elif i['prot'] == 'httpstring':
print(f'httpstring {url.geturl():30}: ', end='', flush=True)
r = requests.get(url.geturl(), verify=SSLVerify)
print(f'{PASS if re.search(i["httpstring"],r.text) else FAIL}')
retcode = retcode + (0 if re.search(i["httpstring"],r.text) else 1)
elif i['prot'] == 'icap':
print(f'icap {addr:30}: ', end='', flush=True)
suffix = ''.join(random.choices(string.ascii_uppercase + string.digits, k=4))
cp = subprocess.run(['c-icap-client','-i',addr,'-s',i["icapservice"],'-f',i["icaptestfile"],'-o',i["icaptestfile"]+suffix],stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
if cp.returncode == 0:
if os.path.isfile(i['icaptestfile']+suffix):
c2 = checksum(i['icaptestfile']+suffix,hashlib.md5())
os.remove(i['icaptestfile']+suffix)
if checksum(i['icaptestfile'],hashlib.md5()) != c2:
print(PASS)
continue
print(FAIL)
retcode += 1
continue
return retcode
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
import json
import logging
from logging import Logger
from typing import Optional
from botocore.client import BaseClient
from slack_sdk.oauth.installation_store.async_installation_store import (
AsyncInstallationStore,
)
from slack_sdk.oauth.installation_store.installation_store import InstallationStore
from slack_sdk.oauth.installation_store.models.bot import Bot
from slack_sdk.oauth.installation_store.models.installation import Installation
class AmazonS3InstallationStore(InstallationStore, AsyncInstallationStore):
def __init__(
self,
*,
s3_client: BaseClient,
bucket_name: str,
client_id: str,
historical_data_enabled: bool = True,
logger: Logger = logging.getLogger(__name__),
):
self.s3_client = s3_client
self.bucket_name = bucket_name
self.historical_data_enabled = historical_data_enabled
self.client_id = client_id
self._logger = logger
@property
def logger(self) -> Logger:
if self._logger is None:
self._logger = logging.getLogger(__name__)
return self._logger
async def async_save(self, installation: Installation):
return self.save(installation)
def save(self, installation: Installation):
none = "none"
e_id = installation.enterprise_id or none
t_id = installation.team_id or none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
if self.historical_data_enabled:
history_version: str = str(installation.installed_at)
entity: str = json.dumps(installation.to_bot().__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/bot-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/bot-{history_version}",
)
self.logger.debug(f"S3 put_object response: {response}")
# per workspace
entity: str = json.dumps(installation.__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-{history_version}",
)
self.logger.debug(f"S3 put_object response: {response}")
# per workspace per user
u_id = installation.user_id or none
entity: str = json.dumps(installation.__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-{u_id}-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-{u_id}-{history_version}",
)
self.logger.debug(f"S3 put_object response: {response}")
else:
entity: str = json.dumps(installation.to_bot().__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/bot-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
# per workspace
entity: str = json.dumps(installation.__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
# per workspace per user
u_id = installation.user_id or none
entity: str = json.dumps(installation.__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-{u_id}-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
async def async_find_bot(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
is_enterprise_install: Optional[bool] = False,
) -> Optional[Bot]:
return self.find_bot(
enterprise_id=enterprise_id,
team_id=team_id,
is_enterprise_install=is_enterprise_install,
)
def find_bot(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
is_enterprise_install: Optional[bool] = False,
) -> Optional[Bot]:
none = "none"
e_id = enterprise_id or none
t_id = team_id or none
if is_enterprise_install:
t_id = none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
try:
fetch_response = self.s3_client.get_object(
Bucket=self.bucket_name,
Key=f"{workspace_path}/bot-latest",
)
self.logger.debug(f"S3 get_object response: {fetch_response}")
body = fetch_response["Body"].read().decode("utf-8")
data = json.loads(body)
return Bot(**data)
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find bot installation data for enterprise: {e_id}, team: {t_id}: {e}"
self.logger.warning(message)
return None
async def async_find_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
is_enterprise_install: Optional[bool] = False,
) -> Optional[Installation]:
return self.find_installation(
enterprise_id=enterprise_id,
team_id=team_id,
user_id=user_id,
is_enterprise_install=is_enterprise_install,
)
def find_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
is_enterprise_install: Optional[bool] = False,
) -> Optional[Installation]:
none = "none"
e_id = enterprise_id or none
t_id = team_id or none
if is_enterprise_install:
t_id = none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
try:
key = (
f"{workspace_path}/installer-{user_id}-latest"
if user_id
else f"{workspace_path}/installer-latest"
)
fetch_response = self.s3_client.get_object(
Bucket=self.bucket_name,
Key=key,
)
self.logger.debug(f"S3 get_object response: {fetch_response}")
body = fetch_response["Body"].read().decode("utf-8")
data = json.loads(body)
return Installation(**data)
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find an installation data for enterprise: {e_id}, team: {t_id}: {e}"
self.logger.warning(message)
return None
async def async_delete_bot(
self, *, enterprise_id: Optional[str], team_id: Optional[str]
) -> None:
return self.delete_bot(
enterprise_id=enterprise_id,
team_id=team_id,
)
def delete_bot(
self, *, enterprise_id: Optional[str], team_id: Optional[str]
) -> None:
none = "none"
e_id = enterprise_id or none
t_id = team_id or none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
objects = self.s3_client.list_objects(
Bucket=self.bucket_name,
Prefix=f"{workspace_path}/bot-",
)
for content in objects.get("Contents", []):
key = content.get("Key")
if key is not None:
self.logger.info(f"Going to delete bot installation ({key})")
try:
self.s3_client.delete_object(
Bucket=self.bucket_name,
Key=content.get("Key"),
)
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find bot installation data for enterprise: {e_id}, team: {t_id}: {e}"
self.logger.warning(message)
async def async_delete_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
) -> None:
return self.delete_installation(
enterprise_id=enterprise_id,
team_id=team_id,
user_id=user_id,
)
def delete_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
) -> None:
none = "none"
e_id = enterprise_id or none
t_id = team_id or none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
objects = self.s3_client.list_objects(
Bucket=self.bucket_name,
Prefix=f"{workspace_path}/installer-{user_id or ""}",
)
for content in objects.get("Contents", []):
key = content.get("Key")
if key is not None:
self.logger.info(f"Going to delete installation ({key})")
try:
self.s3_client.delete_object(
Bucket=self.bucket_name,
Key=content.get("Key"),
)
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find bot installation data for enterprise: {e_id}, team: {t_id}: {e}"
self.logger.warning(message)
| import json
import logging
from logging import Logger
from typing import Optional
from botocore.client import BaseClient
from slack_sdk.oauth.installation_store.async_installation_store import (
AsyncInstallationStore,
)
from slack_sdk.oauth.installation_store.installation_store import InstallationStore
from slack_sdk.oauth.installation_store.models.bot import Bot
from slack_sdk.oauth.installation_store.models.installation import Installation
class AmazonS3InstallationStore(InstallationStore, AsyncInstallationStore):
def __init__(
self,
*,
s3_client: BaseClient,
bucket_name: str,
client_id: str,
historical_data_enabled: bool = True,
logger: Logger = logging.getLogger(__name__),
):
self.s3_client = s3_client
self.bucket_name = bucket_name
self.historical_data_enabled = historical_data_enabled
self.client_id = client_id
self._logger = logger
@property
def logger(self) -> Logger:
if self._logger is None:
self._logger = logging.getLogger(__name__)
return self._logger
async def async_save(self, installation: Installation):
return self.save(installation)
def save(self, installation: Installation):
none = "none"
e_id = installation.enterprise_id or none
t_id = installation.team_id or none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
if self.historical_data_enabled:
history_version: str = str(installation.installed_at)
entity: str = json.dumps(installation.to_bot().__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/bot-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/bot-{history_version}",
)
self.logger.debug(f"S3 put_object response: {response}")
# per workspace
entity: str = json.dumps(installation.__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-{history_version}",
)
self.logger.debug(f"S3 put_object response: {response}")
# per workspace per user
u_id = installation.user_id or none
entity: str = json.dumps(installation.__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-{u_id}-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-{u_id}-{history_version}",
)
self.logger.debug(f"S3 put_object response: {response}")
else:
entity: str = json.dumps(installation.to_bot().__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/bot-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
# per workspace
entity: str = json.dumps(installation.__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
# per workspace per user
u_id = installation.user_id or none
entity: str = json.dumps(installation.__dict__)
response = self.s3_client.put_object(
Bucket=self.bucket_name,
Body=entity,
Key=f"{workspace_path}/installer-{u_id}-latest",
)
self.logger.debug(f"S3 put_object response: {response}")
async def async_find_bot(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
is_enterprise_install: Optional[bool] = False,
) -> Optional[Bot]:
return self.find_bot(
enterprise_id=enterprise_id,
team_id=team_id,
is_enterprise_install=is_enterprise_install,
)
def find_bot(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
is_enterprise_install: Optional[bool] = False,
) -> Optional[Bot]:
none = "none"
e_id = enterprise_id or none
t_id = team_id or none
if is_enterprise_install:
t_id = none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
try:
fetch_response = self.s3_client.get_object(
Bucket=self.bucket_name,
Key=f"{workspace_path}/bot-latest",
)
self.logger.debug(f"S3 get_object response: {fetch_response}")
body = fetch_response["Body"].read().decode("utf-8")
data = json.loads(body)
return Bot(**data)
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find bot installation data for enterprise: {e_id}, team: {t_id}: {e}"
self.logger.warning(message)
return None
async def async_find_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
is_enterprise_install: Optional[bool] = False,
) -> Optional[Installation]:
return self.find_installation(
enterprise_id=enterprise_id,
team_id=team_id,
user_id=user_id,
is_enterprise_install=is_enterprise_install,
)
def find_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
is_enterprise_install: Optional[bool] = False,
) -> Optional[Installation]:
none = "none"
e_id = enterprise_id or none
t_id = team_id or none
if is_enterprise_install:
t_id = none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
try:
key = (
f"{workspace_path}/installer-{user_id}-latest"
if user_id
else f"{workspace_path}/installer-latest"
)
fetch_response = self.s3_client.get_object(
Bucket=self.bucket_name,
Key=key,
)
self.logger.debug(f"S3 get_object response: {fetch_response}")
body = fetch_response["Body"].read().decode("utf-8")
data = json.loads(body)
return Installation(**data)
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find an installation data for enterprise: {e_id}, team: {t_id}: {e}"
self.logger.warning(message)
return None
async def async_delete_bot(
self, *, enterprise_id: Optional[str], team_id: Optional[str]
) -> None:
return self.delete_bot(
enterprise_id=enterprise_id,
team_id=team_id,
)
def delete_bot(
self, *, enterprise_id: Optional[str], team_id: Optional[str]
) -> None:
none = "none"
e_id = enterprise_id or none
t_id = team_id or none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
objects = self.s3_client.list_objects(
Bucket=self.bucket_name,
Prefix=f"{workspace_path}/bot-",
)
for content in objects.get("Contents", []):
key = content.get("Key")
if key is not None:
self.logger.info(f"Going to delete bot installation ({key})")
try:
self.s3_client.delete_object(
Bucket=self.bucket_name,
Key=content.get("Key"),
)
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find bot installation data for enterprise: {e_id}, team: {t_id}: {e}"
self.logger.warning(message)
async def async_delete_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
) -> None:
return self.delete_installation(
enterprise_id=enterprise_id,
team_id=team_id,
user_id=user_id,
)
def delete_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
) -> None:
none = "none"
e_id = enterprise_id or none
t_id = team_id or none
workspace_path = f"{self.client_id}/{e_id}-{t_id}"
objects = self.s3_client.list_objects(
Bucket=self.bucket_name,
Prefix=f"{workspace_path}/installer-{user_id or ''}",
)
for content in objects.get("Contents", []):
key = content.get("Key")
if key is not None:
self.logger.info(f"Going to delete installation ({key})")
try:
self.s3_client.delete_object(
Bucket=self.bucket_name,
Key=content.get("Key"),
)
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find bot installation data for enterprise: {e_id}, team: {t_id}: {e}"
self.logger.warning(message)
|
# The functions detailed below are all translation from Javascript.
# The original code was done by alvaro-cuesta here https://github.com/alvaro-cuesta/townsclipper
# Thank you very much for it !
# IMPORTANT NOTES:
#
# Input looks like base64url but IT IS NOT! Notice the alphabet's out-of-order "w"
# and the swapped "_" and "-" when compared to RFC 4648 §5.
#
# Townscaper decodes clip strings left-to-right and LSB first (both in BitArray index
# and character bits). This means the "base64" output is reversed.
#
# Later, when reading values, they're read in bit blocks from LSB to MSB. I.e. values
# are read right-to-left (but their bits are still LSB on the right).
from logging import getLogger
from math import ceil, inf, log2
from re import search
# Logging
root = getLogger("Town.clipper")
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvxyzw0123456789_-"
BITS_PER_CHAR = 6
def clipToBits(clip):
#root.debug(f"clipToBits to do :\n{clip}")
reversed_clip = list(clip)[::-1]
bits = []
for x in reversed_clip:
value = ALPHABET.find(x)
if value == -1:
root.debug(f"Invalid clip string character {x}")
return
bits.append("{0:b}".format(value).rjust(BITS_PER_CHAR, "0"))
#root.debug(f"clipToBits done :\n{"".join(bits)}")
return "".join(bits)
def bitsToClip(bits):
if any(bit not in ("0", "1") for bit in bits):
root.debug("Invalid bits")
return
if len(bits) % BITS_PER_CHAR != 0:
root.debug(
f"Bit string length {len(bits)} must be a multiple of {BITS_PER_CHAR}"
)
clip = []
for i, b in enumerate(bits):
charBits = bits[i * BITS_PER_CHAR : (i + 1) * BITS_PER_CHAR]
if charBits != "":
clip.append(ALPHABET[int(charBits, 2)])
#root.debug(f"bitsToClip done :\n{"".join(clip[::-1])}")
return "".join(clip[::-1])
###
EMPTY_TYPE = -1
GROUND_TYPE = 15
MAX_TYPE_COUNT = 15
MAX_TYPE = 15 # Technically this should be 14 but it works in-game (with glitches)
MAX_HEIGHT = 255
GROUND_HEIGHT = 0
###
class BitReader:
def __init__(self, bits):
self.bits = bits
self.cursor = 0
def get_remaining(self):
return len(self.bits) - self.cursor
def read(self, n, lenient=False):
if n == 0:
root.debug("Trying to read zero bytes")
return
if self.get_remaining() < n and lenient is False:
root.debug(
"Could not read {} bits (only {} remaining) at position {}".format(
n, self.get_remaining(), self.cursor
)
)
readBits = self.bits[
-self.cursor - n : -self.cursor if self.cursor > 0 else None
]
self.cursor += n
if len(readBits) == 0:
return 0
else:
return int(readBits, 2)
# Given a signed int bit length, get the number to subtract from the raw uint
# to transform it into its signed value.
#
# https:#en.wikipedia.org/wiki/Offset_binary
def getSignOffset(bitLength):
return 1 << (bitLength - 1)
# Get the max value representable with some bit length.
def getMaxValue(bitLength):
return (1 << bitLength) - 1
# Get the bit length needed to represent some max value.
def getBitLength(maxValue):
return ceil(log2(maxValue + 1))
# Get the bit length needed to represent some max value in "offset binary"
# form (see getSignOffset).
def getSignedBitLength(minValue, maxValue):
if minValue == 0 and maxValue == 0:
return 0
unsignedMaxValue = max(
abs(minValue) if minValue < 0 else minValue + 1,
abs(maxValue) if maxValue < 0 else maxValue + 1,
)
return getBitLength(unsignedMaxValue * 2 - 1)
# Get info for the type list indices. Used in voxel lists to index the types.
def getTypeIndexInfo(typeCount):
typeIndexBitLength = getBitLength(typeCount + 1)
typeStopIndex = getMaxValue(typeIndexBitLength)
return typeIndexBitLength, typeStopIndex
# Constants
BIT_LENGTH_BIT_LENGTH = 5
TYPE_COUNT_BIT_LENGTH = getBitLength(MAX_TYPE_COUNT - 1)
TYPE_BIT_LENGTH = getBitLength(MAX_TYPE)
BOOLEAN_BIT_LENGTH = 1
# Remove zero-padding from a bit string. Assumes no extranous characters.
def removePadding(bits):
first1Index = bits.find("1")
if first1Index == -1:
return ""
return bits[first1Index:]
def bitsToDense(bits):
nonBit = search(r"[^01]", bits)
if nonBit is not None:
root.debug("Invalid bit character" + nonBit[0])
return
if len(bits) % BITS_PER_CHAR != 0:
root.debug(
"Bit string length {} must be a multiple of {}".format(
len(bits), BITS_PER_CHAR
)
)
return
dense = {}
bitReader = BitReader(removePadding(bits))
# Bit lengths
posBitLength = bitReader.read(BIT_LENGTH_BIT_LENGTH)
xDeltaBitLength = bitReader.read(BIT_LENGTH_BIT_LENGTH)
yOffsetBitLength = bitReader.read(BIT_LENGTH_BIT_LENGTH)
# Initial position (optional)
if posBitLength > 0:
signOffset = getSignOffset(posBitLength)
dense["xInitial"] = bitReader.read(posBitLength) - signOffset
dense["yInitial"] = bitReader.read(posBitLength) - signOffset
else:
dense["xInitial"] = 0
dense["yInitial"] = 0
# Types
typeCount = bitReader.read(TYPE_COUNT_BIT_LENGTH)
dense["types"] = []
for i in range(typeCount):
typee = bitReader.read(TYPE_BIT_LENGTH)
if typee > MAX_TYPE:
root.debug(
"Invalid voxel type {}, max {} near bit {}".format(
typee, MAX_TYPE, bitReader.cursor
)
)
return
dense["types"].append(typee)
typeIndexBitLength, typeStopIndex = getTypeIndexInfo(typeCount)
# Corners
dense["corners"] = []
xPos = dense["xInitial"]
isFirst = True
while bitReader.get_remaining() > 0:
corner = {}
# First corner does not have xDelta (0 assumed)
if isFirst:
corner["xDelta"] = None
isFirst = False
else:
isMoveX = bool(bitReader.read(BOOLEAN_BIT_LENGTH))
if isMoveX and xDeltaBitLength > 0:
corner["xDelta"] = bitReader.read(xDeltaBitLength)
xPos += corner["xDelta"]
else:
corner["xDelta"] = 0
if yOffsetBitLength > 0:
corner["yOffset"] = bitReader.read(yOffsetBitLength)
else:
corner["yOffset"] = 0
corner["hasGround"] = bool(bitReader.read(BOOLEAN_BIT_LENGTH))
# Voxels
corner["voxels"] = []
while len(corner["voxels"]) < MAX_HEIGHT:
# Special case: when reading the last voxel in the last corner, if it is located exactly
# at MAX_HEIGHT, the next typeStopIndex will be omitted. If we're trying to read a
# typeIndex that has leading zeroes, given that there's no '1' from typeStopIndex, we
# might have removed the leading 0s as if they were padding and `.read` will throw.
#
# We could always return implicit zeroes from BitReader but then we'd lose the ability to
# detect wrong reads.
typeIndex = bitReader.read(typeIndexBitLength, True)
if typeIndex == typeStopIndex:
break
typeIndexZeroBased = typeIndex - 1
if typeIndexZeroBased < EMPTY_TYPE or typeIndexZeroBased >= typeCount:
yPos = dense["yInitial"] + corner["yOffset"]
root.debug(
"Invalid voxel type {}/{} at ({}, {}, {}) near bit {}".format(
typeIndexZeroBased,
typeCount,
xPos,
yPos,
len(corner["voxels"]) + 1,
bitReader.cursor,
)
)
return
corner["voxels"].append(typeIndexZeroBased)
# Only push the corner if it has voxels
nonEmptyVoxels = [voxel for voxel in corner["voxels"] if voxel != EMPTY_TYPE]
if corner["hasGround"] or len(nonEmptyVoxels) > 0:
dense["corners"].append(corner)
#root.debug(f"bitsToDense done :\n{dense}")
return dense
# Get the uintN representation of a given value.
def bits(value, bitLength):
if value < 0:
root.debug(f"Trying to get bits for negative value {value}")
return
maxValue = getMaxValue(bitLength)
if value > maxValue:
root.debug(f"Trying to get bits for too large value {value} (max {maxValue})")
return
return "{0:b}".format(value).rjust(bitLength, "0")
def denseToBits(dense, pad=True):
outString = ""
# Detect bit lengths from values
posBitLength = getSignedBitLength(
min(dense["xInitial"], dense["yInitial"]),
max(dense["xInitial"], dense["yInitial"]),
)
maxXDelta = 0
maxYOffset = 0
for corner in dense["corners"]:
maxXDelta = max(
maxXDelta, corner["xDelta"] if corner["xDelta"] is not None else 0
)
maxYOffset = max(maxYOffset, corner["yOffset"])
xDeltaBitLength = getBitLength(maxXDelta)
yOffsetBitLength = getBitLength(maxYOffset)
# Bit lengths
posBitLengthBits = bits(posBitLength, BIT_LENGTH_BIT_LENGTH)
xDeltaBitLengthBits = bits(xDeltaBitLength, BIT_LENGTH_BIT_LENGTH)
yOffsetBitLengthBits = bits(yOffsetBitLength, BIT_LENGTH_BIT_LENGTH)
outString = (
yOffsetBitLengthBits + xDeltaBitLengthBits + posBitLengthBits + outString
)
# Initial position (optional)
if posBitLength > 0:
signOffset = getSignOffset(posBitLength)
xInitialBits = bits(dense["xInitial"] + signOffset, posBitLength)
yInitialBits = bits(dense["yInitial"] + signOffset, posBitLength)
outString = yInitialBits + xInitialBits + outString
# Types
if len(dense["types"]) > MAX_TYPE_COUNT:
root.debug(f"Invalid types.length {dense.types.length}, max {MAX_TYPE_COUNT}")
return
typeCountBits = bits(len(dense["types"]), TYPE_COUNT_BIT_LENGTH)
outString = typeCountBits + outString
for typee in dense["types"]:
if typee < 0 or typee > MAX_TYPE:
root.debug(f"Invalid type {typee}, max {MAX_TYPE}")
return
typeBits = bits(typee, TYPE_BIT_LENGTH)
outString = typeBits + outString
typeIndexBitLength, typeStopIndex = getTypeIndexInfo(len(dense["types"]))
# Corners
cornersWithData = [
corner
for i, corner in enumerate(dense["corners"])
if i == 0 or corner["hasGround"] or len(corner["voxels"]) > 0
]
# Ensure there's at least one (empty) corner
# Townscaper encodes the empty map as AAAE, but without this we'd output AAAA
corners = (
cornersWithData
if len(cornersWithData) > 0
else [{"xDelta": None, "hasGround": False, "voxels": []}]
)
isFirst = True
for corner in corners:
# First corner does not have xDelta (must be null)
if isFirst:
if corner["xDelta"] is not None:
root.debug("xDelta on first corner")
return
isFirst = False
else:
hasXDeltaBits = bits(1 if corner["xDelta"] else 0, BOOLEAN_BIT_LENGTH)
outString = hasXDeltaBits + outString
if corner["xDelta"]:
xDeltaBits = bits(corner["xDelta"], xDeltaBitLength)
outString = xDeltaBits + outString
if yOffsetBitLength > 0:
yOffsetBits = bits(corner["yOffset"], yOffsetBitLength)
outString = yOffsetBits + outString
hasGroundBits = bits(1 if corner["hasGround"] else 0, BOOLEAN_BIT_LENGTH)
outString = hasGroundBits + outString
# Voxels
if len(corner["voxels"]) > MAX_HEIGHT:
root.debug(f"Too many voxels ({corner.voxels.length}), max {MAX_HEIGHT}")
return
for typeIndexZeroBased in corner["voxels"]:
if typeIndexZeroBased < EMPTY_TYPE or typeIndexZeroBased >= len(
dense["types"]
):
root.debug(
f"Invalid type {typeIndexZeroBased}, min 0, max {len(dense["types"])}"
)
return
typeIndexZeroBasedBits = bits(typeIndexZeroBased + 1, typeIndexBitLength)
outString = typeIndexZeroBasedBits + outString
# Omit typeStopIndexBits if we reached the max height
if len(corner["voxels"]) < MAX_HEIGHT:
typeStopIndexBits = bits(typeStopIndex, typeIndexBitLength)
outString = typeStopIndexBits + outString
# Padding
if pad:
paddingLength = (
BITS_PER_CHAR - (len(outString) % BITS_PER_CHAR)
) % BITS_PER_CHAR
if paddingLength > 0:
paddingBits = "0" * paddingLength
outString = paddingBits + outString
#
#root.debug(f"denseToBits done :\n{outString}")
return outString
def denseToSparse(dense):
if len(dense["types"]) > MAX_TYPE_COUNT:
root.debug(f"Too many types ({len(dense["types"])}), max {MAX_TYPE_COUNT}")
return
if dense["corners"][0]["xDelta"] is not None:
root.debug(f"First xDelta ({dense["corners"][0]["xDelta"]}) is not null")
return
sparse = []
x = dense["xInitial"]
# Corners
for denseCorner in dense["corners"]:
if denseCorner["xDelta"]:
x += denseCorner["xDelta"]
y = dense["yInitial"] + (denseCorner["yOffset"] or 0)
# Voxels
voxels = {}
if denseCorner["hasGround"]:
voxels[GROUND_HEIGHT] = GROUND_TYPE
if len(denseCorner["voxels"]) > MAX_HEIGHT:
root.debug(
f"Too many voxels ({len(denseCorner["voxels"])}), max {MAX_HEIGHT}"
)
return
for h in range(len(denseCorner["voxels"])):
typee = denseCorner["voxels"][h]
if typee == EMPTY_TYPE:
continue
if not isinstance(typee, int):
root.debug(f"Invalid voxel typee {typee} at ({x}, {y}, {h + 1})")
return
if typee < EMPTY_TYPE or typee >= len(dense["types"]):
root.debug(
f"Invalid voxel typee {typee} at ({x}, {y}, {h + 1}), min {EMPTY_TYPE}, max {len(dense["types"]) - 1}"
)
return
voxels[h + (GROUND_HEIGHT + 1)] = dense["types"][typee]
if len(voxels) > 0:
sparse.append({"x": x, "y": y, "voxels": voxels})
#root.debug(f"denseToSparse done :\n{sparse}")
return sparse
def sparseToDense(sparse):
dense = {}
sortedSparseCorners = []
for sparseCorner in sparse:
groundVoxel = (
sparseCorner["voxels"][GROUND_HEIGHT]
if GROUND_HEIGHT in sparseCorner["voxels"]
else None
)
if groundVoxel != None and groundVoxel != GROUND_TYPE:
root.debug(
f"Trying to set voxel typee {sparseCorner["voxels"][0]} on ground, only {GROUND_TYPE} allowed"
)
return
sparseCorner["hasGround"] = groundVoxel == GROUND_TYPE
sparseCorner["voxels"] = {
h: t for h, t in sparseCorner["voxels"].items() if t is not None
}
if sparseCorner["hasGround"] or len(sparseCorner["voxels"]) > 0:
sortedSparseCorners.append(sparseCorner)
sortedSparseCorners.sort(key=lambda a: (a["x"], a["y"]))
# TODO: Throw here? We shouldn't have two corners with the same coordinates. Merge maybe?
if len(sortedSparseCorners) == 0:
return {"xInitial": 0, "yInitial": 0, "types": [], "corners": []}
# Analyze voxels for yInitial and types
minY = inf
types = []
for sparseCorner in sortedSparseCorners:
minY = min(minY, sparseCorner["y"])
for heightString, typee in sparseCorner["voxels"].items():
if typee is None:
continue
height = int(heightString)
if height > MAX_HEIGHT:
root.debug(f"Invalid height {height}, max {MAX_HEIGHT}")
return
if not isinstance(typee, int):
root.debug(f"Voxel typee {typee} is not an index")
return
if typee < 0 or typee > MAX_TYPE:
root.debug(f"Invalid voxel typee {type}, min 0, max {MAX_TYPE}")
return
if typee not in types and (height != GROUND_HEIGHT or typee != GROUND_TYPE):
types.append(typee)
if len(types) > MAX_TYPE_COUNT:
root.debug(f"Too many types ({types.length}), max {MAX_TYPE_COUNT}")
return
dense["xInitial"] = sortedSparseCorners[0]["x"]
dense["yInitial"] = minY
types.sort()
dense["types"] = types
# Corners
dense["corners"] = []
currentX = dense["xInitial"]
for sparseCorner in sortedSparseCorners:
xDelta = sparseCorner["x"] - currentX if len(dense["corners"]) != 0 else None
currentX = sparseCorner["x"]
yOffset = sparseCorner["y"] - dense["yInitial"]
maxHeight = max(sparseCorner["voxels"].keys())
voxels = []
for i in range((GROUND_HEIGHT + 1), maxHeight + 1):
typee = sparseCorner["voxels"][i] if i in sparseCorner["voxels"] else None
if typee is None:
voxels.append(EMPTY_TYPE)
else:
voxels.append(types.index(typee))
dense["corners"].append(
{
"xDelta": xDelta,
"yOffset": yOffset,
"hasGround": sparseCorner["hasGround"],
"voxels": voxels,
}
)
#root.debug(f"sparseToDense done :\n{dense}")
return dense
# TEMPORARY CONVERSION : from sparse to corvox
def sparseToCorvox(sparse):
corvox = {}
for sparseCorner in sparse:
key = sparseCorner["x"], sparseCorner["y"]
count = len(sparseCorner["voxels"])
corvox[key] = {"count": count, "voxels": sparseCorner["voxels"]}
return corvox
# TEMPORARY CONVERSION : from corvox to sparse
def corvoxToSparse(corvox):
sparse = []
for (x, y), countAndVoxels in corvox.items():
if countAndVoxels['voxels'] != {}:
sparse.append({"x": x, "y": y, "voxels": countAndVoxels["voxels"]})
return sparse
| # The functions detailed below are all translation from Javascript.
# The original code was done by alvaro-cuesta here https://github.com/alvaro-cuesta/townsclipper
# Thank you very much for it !
# IMPORTANT NOTES:
#
# Input looks like base64url but IT IS NOT! Notice the alphabet's out-of-order "w"
# and the swapped "_" and "-" when compared to RFC 4648 §5.
#
# Townscaper decodes clip strings left-to-right and LSB first (both in BitArray index
# and character bits). This means the "base64" output is reversed.
#
# Later, when reading values, they're read in bit blocks from LSB to MSB. I.e. values
# are read right-to-left (but their bits are still LSB on the right).
from logging import getLogger
from math import ceil, inf, log2
from re import search
# Logging
root = getLogger("Town.clipper")
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvxyzw0123456789_-"
BITS_PER_CHAR = 6
def clipToBits(clip):
#root.debug(f"clipToBits to do :\n{clip}")
reversed_clip = list(clip)[::-1]
bits = []
for x in reversed_clip:
value = ALPHABET.find(x)
if value == -1:
root.debug(f"Invalid clip string character {x}")
return
bits.append("{0:b}".format(value).rjust(BITS_PER_CHAR, "0"))
#root.debug(f"clipToBits done :\n{''.join(bits)}")
return "".join(bits)
def bitsToClip(bits):
if any(bit not in ("0", "1") for bit in bits):
root.debug("Invalid bits")
return
if len(bits) % BITS_PER_CHAR != 0:
root.debug(
f"Bit string length {len(bits)} must be a multiple of {BITS_PER_CHAR}"
)
clip = []
for i, b in enumerate(bits):
charBits = bits[i * BITS_PER_CHAR : (i + 1) * BITS_PER_CHAR]
if charBits != "":
clip.append(ALPHABET[int(charBits, 2)])
#root.debug(f"bitsToClip done :\n{''.join(clip[::-1])}")
return "".join(clip[::-1])
###
EMPTY_TYPE = -1
GROUND_TYPE = 15
MAX_TYPE_COUNT = 15
MAX_TYPE = 15 # Technically this should be 14 but it works in-game (with glitches)
MAX_HEIGHT = 255
GROUND_HEIGHT = 0
###
class BitReader:
def __init__(self, bits):
self.bits = bits
self.cursor = 0
def get_remaining(self):
return len(self.bits) - self.cursor
def read(self, n, lenient=False):
if n == 0:
root.debug("Trying to read zero bytes")
return
if self.get_remaining() < n and lenient is False:
root.debug(
"Could not read {} bits (only {} remaining) at position {}".format(
n, self.get_remaining(), self.cursor
)
)
readBits = self.bits[
-self.cursor - n : -self.cursor if self.cursor > 0 else None
]
self.cursor += n
if len(readBits) == 0:
return 0
else:
return int(readBits, 2)
# Given a signed int bit length, get the number to subtract from the raw uint
# to transform it into its signed value.
#
# https:#en.wikipedia.org/wiki/Offset_binary
def getSignOffset(bitLength):
return 1 << (bitLength - 1)
# Get the max value representable with some bit length.
def getMaxValue(bitLength):
return (1 << bitLength) - 1
# Get the bit length needed to represent some max value.
def getBitLength(maxValue):
return ceil(log2(maxValue + 1))
# Get the bit length needed to represent some max value in "offset binary"
# form (see getSignOffset).
def getSignedBitLength(minValue, maxValue):
if minValue == 0 and maxValue == 0:
return 0
unsignedMaxValue = max(
abs(minValue) if minValue < 0 else minValue + 1,
abs(maxValue) if maxValue < 0 else maxValue + 1,
)
return getBitLength(unsignedMaxValue * 2 - 1)
# Get info for the type list indices. Used in voxel lists to index the types.
def getTypeIndexInfo(typeCount):
typeIndexBitLength = getBitLength(typeCount + 1)
typeStopIndex = getMaxValue(typeIndexBitLength)
return typeIndexBitLength, typeStopIndex
# Constants
BIT_LENGTH_BIT_LENGTH = 5
TYPE_COUNT_BIT_LENGTH = getBitLength(MAX_TYPE_COUNT - 1)
TYPE_BIT_LENGTH = getBitLength(MAX_TYPE)
BOOLEAN_BIT_LENGTH = 1
# Remove zero-padding from a bit string. Assumes no extranous characters.
def removePadding(bits):
first1Index = bits.find("1")
if first1Index == -1:
return ""
return bits[first1Index:]
def bitsToDense(bits):
nonBit = search(r"[^01]", bits)
if nonBit is not None:
root.debug("Invalid bit character" + nonBit[0])
return
if len(bits) % BITS_PER_CHAR != 0:
root.debug(
"Bit string length {} must be a multiple of {}".format(
len(bits), BITS_PER_CHAR
)
)
return
dense = {}
bitReader = BitReader(removePadding(bits))
# Bit lengths
posBitLength = bitReader.read(BIT_LENGTH_BIT_LENGTH)
xDeltaBitLength = bitReader.read(BIT_LENGTH_BIT_LENGTH)
yOffsetBitLength = bitReader.read(BIT_LENGTH_BIT_LENGTH)
# Initial position (optional)
if posBitLength > 0:
signOffset = getSignOffset(posBitLength)
dense["xInitial"] = bitReader.read(posBitLength) - signOffset
dense["yInitial"] = bitReader.read(posBitLength) - signOffset
else:
dense["xInitial"] = 0
dense["yInitial"] = 0
# Types
typeCount = bitReader.read(TYPE_COUNT_BIT_LENGTH)
dense["types"] = []
for i in range(typeCount):
typee = bitReader.read(TYPE_BIT_LENGTH)
if typee > MAX_TYPE:
root.debug(
"Invalid voxel type {}, max {} near bit {}".format(
typee, MAX_TYPE, bitReader.cursor
)
)
return
dense["types"].append(typee)
typeIndexBitLength, typeStopIndex = getTypeIndexInfo(typeCount)
# Corners
dense["corners"] = []
xPos = dense["xInitial"]
isFirst = True
while bitReader.get_remaining() > 0:
corner = {}
# First corner does not have xDelta (0 assumed)
if isFirst:
corner["xDelta"] = None
isFirst = False
else:
isMoveX = bool(bitReader.read(BOOLEAN_BIT_LENGTH))
if isMoveX and xDeltaBitLength > 0:
corner["xDelta"] = bitReader.read(xDeltaBitLength)
xPos += corner["xDelta"]
else:
corner["xDelta"] = 0
if yOffsetBitLength > 0:
corner["yOffset"] = bitReader.read(yOffsetBitLength)
else:
corner["yOffset"] = 0
corner["hasGround"] = bool(bitReader.read(BOOLEAN_BIT_LENGTH))
# Voxels
corner["voxels"] = []
while len(corner["voxels"]) < MAX_HEIGHT:
# Special case: when reading the last voxel in the last corner, if it is located exactly
# at MAX_HEIGHT, the next typeStopIndex will be omitted. If we're trying to read a
# typeIndex that has leading zeroes, given that there's no '1' from typeStopIndex, we
# might have removed the leading 0s as if they were padding and `.read` will throw.
#
# We could always return implicit zeroes from BitReader but then we'd lose the ability to
# detect wrong reads.
typeIndex = bitReader.read(typeIndexBitLength, True)
if typeIndex == typeStopIndex:
break
typeIndexZeroBased = typeIndex - 1
if typeIndexZeroBased < EMPTY_TYPE or typeIndexZeroBased >= typeCount:
yPos = dense["yInitial"] + corner["yOffset"]
root.debug(
"Invalid voxel type {}/{} at ({}, {}, {}) near bit {}".format(
typeIndexZeroBased,
typeCount,
xPos,
yPos,
len(corner["voxels"]) + 1,
bitReader.cursor,
)
)
return
corner["voxels"].append(typeIndexZeroBased)
# Only push the corner if it has voxels
nonEmptyVoxels = [voxel for voxel in corner["voxels"] if voxel != EMPTY_TYPE]
if corner["hasGround"] or len(nonEmptyVoxels) > 0:
dense["corners"].append(corner)
#root.debug(f"bitsToDense done :\n{dense}")
return dense
# Get the uintN representation of a given value.
def bits(value, bitLength):
if value < 0:
root.debug(f"Trying to get bits for negative value {value}")
return
maxValue = getMaxValue(bitLength)
if value > maxValue:
root.debug(f"Trying to get bits for too large value {value} (max {maxValue})")
return
return "{0:b}".format(value).rjust(bitLength, "0")
def denseToBits(dense, pad=True):
outString = ""
# Detect bit lengths from values
posBitLength = getSignedBitLength(
min(dense["xInitial"], dense["yInitial"]),
max(dense["xInitial"], dense["yInitial"]),
)
maxXDelta = 0
maxYOffset = 0
for corner in dense["corners"]:
maxXDelta = max(
maxXDelta, corner["xDelta"] if corner["xDelta"] is not None else 0
)
maxYOffset = max(maxYOffset, corner["yOffset"])
xDeltaBitLength = getBitLength(maxXDelta)
yOffsetBitLength = getBitLength(maxYOffset)
# Bit lengths
posBitLengthBits = bits(posBitLength, BIT_LENGTH_BIT_LENGTH)
xDeltaBitLengthBits = bits(xDeltaBitLength, BIT_LENGTH_BIT_LENGTH)
yOffsetBitLengthBits = bits(yOffsetBitLength, BIT_LENGTH_BIT_LENGTH)
outString = (
yOffsetBitLengthBits + xDeltaBitLengthBits + posBitLengthBits + outString
)
# Initial position (optional)
if posBitLength > 0:
signOffset = getSignOffset(posBitLength)
xInitialBits = bits(dense["xInitial"] + signOffset, posBitLength)
yInitialBits = bits(dense["yInitial"] + signOffset, posBitLength)
outString = yInitialBits + xInitialBits + outString
# Types
if len(dense["types"]) > MAX_TYPE_COUNT:
root.debug(f"Invalid types.length {dense.types.length}, max {MAX_TYPE_COUNT}")
return
typeCountBits = bits(len(dense["types"]), TYPE_COUNT_BIT_LENGTH)
outString = typeCountBits + outString
for typee in dense["types"]:
if typee < 0 or typee > MAX_TYPE:
root.debug(f"Invalid type {typee}, max {MAX_TYPE}")
return
typeBits = bits(typee, TYPE_BIT_LENGTH)
outString = typeBits + outString
typeIndexBitLength, typeStopIndex = getTypeIndexInfo(len(dense["types"]))
# Corners
cornersWithData = [
corner
for i, corner in enumerate(dense["corners"])
if i == 0 or corner["hasGround"] or len(corner["voxels"]) > 0
]
# Ensure there's at least one (empty) corner
# Townscaper encodes the empty map as AAAE, but without this we'd output AAAA
corners = (
cornersWithData
if len(cornersWithData) > 0
else [{"xDelta": None, "hasGround": False, "voxels": []}]
)
isFirst = True
for corner in corners:
# First corner does not have xDelta (must be null)
if isFirst:
if corner["xDelta"] is not None:
root.debug("xDelta on first corner")
return
isFirst = False
else:
hasXDeltaBits = bits(1 if corner["xDelta"] else 0, BOOLEAN_BIT_LENGTH)
outString = hasXDeltaBits + outString
if corner["xDelta"]:
xDeltaBits = bits(corner["xDelta"], xDeltaBitLength)
outString = xDeltaBits + outString
if yOffsetBitLength > 0:
yOffsetBits = bits(corner["yOffset"], yOffsetBitLength)
outString = yOffsetBits + outString
hasGroundBits = bits(1 if corner["hasGround"] else 0, BOOLEAN_BIT_LENGTH)
outString = hasGroundBits + outString
# Voxels
if len(corner["voxels"]) > MAX_HEIGHT:
root.debug(f"Too many voxels ({corner.voxels.length}), max {MAX_HEIGHT}")
return
for typeIndexZeroBased in corner["voxels"]:
if typeIndexZeroBased < EMPTY_TYPE or typeIndexZeroBased >= len(
dense["types"]
):
root.debug(
f"Invalid type {typeIndexZeroBased}, min 0, max {len(dense['types'])}"
)
return
typeIndexZeroBasedBits = bits(typeIndexZeroBased + 1, typeIndexBitLength)
outString = typeIndexZeroBasedBits + outString
# Omit typeStopIndexBits if we reached the max height
if len(corner["voxels"]) < MAX_HEIGHT:
typeStopIndexBits = bits(typeStopIndex, typeIndexBitLength)
outString = typeStopIndexBits + outString
# Padding
if pad:
paddingLength = (
BITS_PER_CHAR - (len(outString) % BITS_PER_CHAR)
) % BITS_PER_CHAR
if paddingLength > 0:
paddingBits = "0" * paddingLength
outString = paddingBits + outString
#
#root.debug(f"denseToBits done :\n{outString}")
return outString
def denseToSparse(dense):
if len(dense["types"]) > MAX_TYPE_COUNT:
root.debug(f"Too many types ({len(dense['types'])}), max {MAX_TYPE_COUNT}")
return
if dense["corners"][0]["xDelta"] is not None:
root.debug(f"First xDelta ({dense['corners'][0]['xDelta']}) is not null")
return
sparse = []
x = dense["xInitial"]
# Corners
for denseCorner in dense["corners"]:
if denseCorner["xDelta"]:
x += denseCorner["xDelta"]
y = dense["yInitial"] + (denseCorner["yOffset"] or 0)
# Voxels
voxels = {}
if denseCorner["hasGround"]:
voxels[GROUND_HEIGHT] = GROUND_TYPE
if len(denseCorner["voxels"]) > MAX_HEIGHT:
root.debug(
f"Too many voxels ({len(denseCorner['voxels'])}), max {MAX_HEIGHT}"
)
return
for h in range(len(denseCorner["voxels"])):
typee = denseCorner["voxels"][h]
if typee == EMPTY_TYPE:
continue
if not isinstance(typee, int):
root.debug(f"Invalid voxel typee {typee} at ({x}, {y}, {h + 1})")
return
if typee < EMPTY_TYPE or typee >= len(dense["types"]):
root.debug(
f"Invalid voxel typee {typee} at ({x}, {y}, {h + 1}), min {EMPTY_TYPE}, max {len(dense['types']) - 1}"
)
return
voxels[h + (GROUND_HEIGHT + 1)] = dense["types"][typee]
if len(voxels) > 0:
sparse.append({"x": x, "y": y, "voxels": voxels})
#root.debug(f"denseToSparse done :\n{sparse}")
return sparse
def sparseToDense(sparse):
dense = {}
sortedSparseCorners = []
for sparseCorner in sparse:
groundVoxel = (
sparseCorner["voxels"][GROUND_HEIGHT]
if GROUND_HEIGHT in sparseCorner["voxels"]
else None
)
if groundVoxel != None and groundVoxel != GROUND_TYPE:
root.debug(
f"Trying to set voxel typee {sparseCorner['voxels'][0]} on ground, only {GROUND_TYPE} allowed"
)
return
sparseCorner["hasGround"] = groundVoxel == GROUND_TYPE
sparseCorner["voxels"] = {
h: t for h, t in sparseCorner["voxels"].items() if t is not None
}
if sparseCorner["hasGround"] or len(sparseCorner["voxels"]) > 0:
sortedSparseCorners.append(sparseCorner)
sortedSparseCorners.sort(key=lambda a: (a["x"], a["y"]))
# TODO: Throw here? We shouldn't have two corners with the same coordinates. Merge maybe?
if len(sortedSparseCorners) == 0:
return {"xInitial": 0, "yInitial": 0, "types": [], "corners": []}
# Analyze voxels for yInitial and types
minY = inf
types = []
for sparseCorner in sortedSparseCorners:
minY = min(minY, sparseCorner["y"])
for heightString, typee in sparseCorner["voxels"].items():
if typee is None:
continue
height = int(heightString)
if height > MAX_HEIGHT:
root.debug(f"Invalid height {height}, max {MAX_HEIGHT}")
return
if not isinstance(typee, int):
root.debug(f"Voxel typee {typee} is not an index")
return
if typee < 0 or typee > MAX_TYPE:
root.debug(f"Invalid voxel typee {type}, min 0, max {MAX_TYPE}")
return
if typee not in types and (height != GROUND_HEIGHT or typee != GROUND_TYPE):
types.append(typee)
if len(types) > MAX_TYPE_COUNT:
root.debug(f"Too many types ({types.length}), max {MAX_TYPE_COUNT}")
return
dense["xInitial"] = sortedSparseCorners[0]["x"]
dense["yInitial"] = minY
types.sort()
dense["types"] = types
# Corners
dense["corners"] = []
currentX = dense["xInitial"]
for sparseCorner in sortedSparseCorners:
xDelta = sparseCorner["x"] - currentX if len(dense["corners"]) != 0 else None
currentX = sparseCorner["x"]
yOffset = sparseCorner["y"] - dense["yInitial"]
maxHeight = max(sparseCorner["voxels"].keys())
voxels = []
for i in range((GROUND_HEIGHT + 1), maxHeight + 1):
typee = sparseCorner["voxels"][i] if i in sparseCorner["voxels"] else None
if typee is None:
voxels.append(EMPTY_TYPE)
else:
voxels.append(types.index(typee))
dense["corners"].append(
{
"xDelta": xDelta,
"yOffset": yOffset,
"hasGround": sparseCorner["hasGround"],
"voxels": voxels,
}
)
#root.debug(f"sparseToDense done :\n{dense}")
return dense
# TEMPORARY CONVERSION : from sparse to corvox
def sparseToCorvox(sparse):
corvox = {}
for sparseCorner in sparse:
key = sparseCorner["x"], sparseCorner["y"]
count = len(sparseCorner["voxels"])
corvox[key] = {"count": count, "voxels": sparseCorner["voxels"]}
return corvox
# TEMPORARY CONVERSION : from corvox to sparse
def corvoxToSparse(corvox):
sparse = []
for (x, y), countAndVoxels in corvox.items():
if countAndVoxels['voxels'] != {}:
sparse.append({"x": x, "y": y, "voxels": countAndVoxels["voxels"]})
return sparse
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
2020-07-06-extrapolation.py: based on small subsampled DoEs, extrapolate a best
suggested next DoE size and compare this with results from enumerated DoEs.
"""
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from pyprojroot import here
from parse import parse
import processing as proc
print(f'Running script: {__file__}')
__author__ = 'Sander van Rijn'
__email__ = 's.j.van.rijn@liacs.leidenuniv.nl'
regular_dir = here('files/2019-09-mse-nc')
adjustables_dir = here('files/2019-10-07-adjustables')
subsampling_dir = here('files/2020-03-03-cv-subsampling/')
adjustables_subsampling_dir = here("files/2020-03-04-cv-adjustables-subsampling/")
plot_dir = here('plots/2020-07-06-extrapolation/', warn=False)
plot_dir.mkdir(exist_ok=True, parents=True)
def atan_deg(*args):
return np.rad2deg(np.arctan2(*args))
def angle_from_linreg(reg):
return atan_deg(*reg.coef_[:2]) % 180
def calc_intercept(b, h0, l0, gradient, costratio):
l = (b - h0 + gradient*l0) / (gradient + costratio)
h = b - costratio*l
return h, l
# Currently Hardcoded:
small_h, small_l = 30, 75
large_h, large_l = 50, 125
costratio = 0.4
num_lows = xr.DataArray(np.arange(small_l, large_l+1), dims=["idx"])
num_highs = xr.DataArray(np.floor(np.linspace(large_h, small_h, len(num_lows))), dims=["idx"])
angles = atan_deg(num_highs-small_h, num_lows-small_l)
def plot_extrapolation_suggestion(file_small, file_large):
with xr.open_dataset(file_small) as ds:
da = ds['mses'].sel(model='high_hier')
with da.load() as da:
reg = proc.fit_lin_reg(da)
gradient = reg.coef_[0] / reg.coef_[1]
deg_small = angle_from_linreg(reg)
with xr.open_dataset(file_large) as ds:
da = ds['mses'].sel(model='high_hier')
with da.load() as da:
deg_large = angle_from_linreg(proc.fit_lin_reg(da))
data_along_budget_line = da.median(dim='rep').sel(n_low=num_lows, n_high=num_highs).values
if 'Adjustable' in file_large.stem:
kernel, ndim, fname, param = parse("{}-{:d}d-Adjustable-{}-{:f}", file_large.stem)
title = f"{ndim}d Adjustable {fname} (A={param})"
else:
kernel, ndim, fname = parse("{}-{:d}d-{}", file_large.stem)
title = f"{ndim}d {fname.replace("-", " ")}"
smallest_at_angle = angles[np.argmin(data_along_budget_line)]
gradient_budget_intercept = calc_intercept(80, small_h, small_l, gradient, costratio)
plt.figure(figsize=(4.8, 2.4), constrained_layout=True)
plt.plot(angles, data_along_budget_line, marker='o', label='MSEs from DoE enumeration')
plt.ylabel('MSE')
plt.xlabel('angle measured from (30, 75)')
plt.yscale('log')
plt.axvline(deg_small, ls=':', label='Predicted best angle', color='C1')
plt.legend(loc=0)
plt.xlim([0,90])
plt.title(title)
for ext in proc.extensions:
plt.savefig(plot_dir / f'{file_small.stem.replace('.', '')}.{ext}', dpi=300, bbox_inches='tight')
plt.close()
for file_small in filter(lambda f: 'sub30-75' in str(f), subsampling_dir.iterdir()):
file_large = regular_dir / file_small.name.replace('-sub30-75-seed0', '')
plot_extrapolation_suggestion(file_small, file_large)
for file_small in filter(lambda f: 'sub30-75' in str(f), adjustables_subsampling_dir.iterdir()):
file_large = adjustables_dir / file_small.name.replace('-sub30-75-seed0', '')
plot_extrapolation_suggestion(file_small, file_large)
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
2020-07-06-extrapolation.py: based on small subsampled DoEs, extrapolate a best
suggested next DoE size and compare this with results from enumerated DoEs.
"""
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from pyprojroot import here
from parse import parse
import processing as proc
print(f'Running script: {__file__}')
__author__ = 'Sander van Rijn'
__email__ = 's.j.van.rijn@liacs.leidenuniv.nl'
regular_dir = here('files/2019-09-mse-nc')
adjustables_dir = here('files/2019-10-07-adjustables')
subsampling_dir = here('files/2020-03-03-cv-subsampling/')
adjustables_subsampling_dir = here("files/2020-03-04-cv-adjustables-subsampling/")
plot_dir = here('plots/2020-07-06-extrapolation/', warn=False)
plot_dir.mkdir(exist_ok=True, parents=True)
def atan_deg(*args):
return np.rad2deg(np.arctan2(*args))
def angle_from_linreg(reg):
return atan_deg(*reg.coef_[:2]) % 180
def calc_intercept(b, h0, l0, gradient, costratio):
l = (b - h0 + gradient*l0) / (gradient + costratio)
h = b - costratio*l
return h, l
# Currently Hardcoded:
small_h, small_l = 30, 75
large_h, large_l = 50, 125
costratio = 0.4
num_lows = xr.DataArray(np.arange(small_l, large_l+1), dims=["idx"])
num_highs = xr.DataArray(np.floor(np.linspace(large_h, small_h, len(num_lows))), dims=["idx"])
angles = atan_deg(num_highs-small_h, num_lows-small_l)
def plot_extrapolation_suggestion(file_small, file_large):
with xr.open_dataset(file_small) as ds:
da = ds['mses'].sel(model='high_hier')
with da.load() as da:
reg = proc.fit_lin_reg(da)
gradient = reg.coef_[0] / reg.coef_[1]
deg_small = angle_from_linreg(reg)
with xr.open_dataset(file_large) as ds:
da = ds['mses'].sel(model='high_hier')
with da.load() as da:
deg_large = angle_from_linreg(proc.fit_lin_reg(da))
data_along_budget_line = da.median(dim='rep').sel(n_low=num_lows, n_high=num_highs).values
if 'Adjustable' in file_large.stem:
kernel, ndim, fname, param = parse("{}-{:d}d-Adjustable-{}-{:f}", file_large.stem)
title = f"{ndim}d Adjustable {fname} (A={param})"
else:
kernel, ndim, fname = parse("{}-{:d}d-{}", file_large.stem)
title = f"{ndim}d {fname.replace('-', ' ')}"
smallest_at_angle = angles[np.argmin(data_along_budget_line)]
gradient_budget_intercept = calc_intercept(80, small_h, small_l, gradient, costratio)
plt.figure(figsize=(4.8, 2.4), constrained_layout=True)
plt.plot(angles, data_along_budget_line, marker='o', label='MSEs from DoE enumeration')
plt.ylabel('MSE')
plt.xlabel('angle measured from (30, 75)')
plt.yscale('log')
plt.axvline(deg_small, ls=':', label='Predicted best angle', color='C1')
plt.legend(loc=0)
plt.xlim([0,90])
plt.title(title)
for ext in proc.extensions:
plt.savefig(plot_dir / f'{file_small.stem.replace(".", "")}.{ext}', dpi=300, bbox_inches='tight')
plt.close()
for file_small in filter(lambda f: 'sub30-75' in str(f), subsampling_dir.iterdir()):
file_large = regular_dir / file_small.name.replace('-sub30-75-seed0', '')
plot_extrapolation_suggestion(file_small, file_large)
for file_small in filter(lambda f: 'sub30-75' in str(f), adjustables_subsampling_dir.iterdir()):
file_large = adjustables_dir / file_small.name.replace('-sub30-75-seed0', '')
plot_extrapolation_suggestion(file_small, file_large)
|
"""A simple HTML visualizer.
It is based on the Cycle-GAN codebase:
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
"""
import os
import numpy as np
from pathlib import Path
from . import util, html
class Visualizer:
"""This class includes several functions that can display/save images.
It uses a Python library 'visdom' for display, and a Python library 'dominate'
(wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, exp_name, web_dir, src_video_dir, vis_vid_freq, num_samples=50):
"""Initialize the Visualizer class
Create an HTML object for saveing HTML filters
"""
self.name = exp_name
self.web_dir = web_dir
self.vis_vid_freq = vis_vid_freq
self.img_dir = os.path.join(self.web_dir, "images")
self.num_samples = num_samples
print(f"create web directory {self.web_dir}...")
util.mkdirs([self.web_dir, self.img_dir])
src_dir = Path(src_video_dir).absolute()
print(f"symlinking videos from {src_dir}...")
sym_dir = (Path(self.web_dir) / "videos").absolute()
if sym_dir.is_symlink():
os.remove(sym_dir)
sym_dir.symlink_to(src_dir)
def visualize_ranking(self, sims, epoch, meta, nested_metrics):
if not (self.vis_vid_freq and epoch % self.vis_vid_freq == 0):
return
dists = -sims
np.random.seed(0)
sorted_ranks = np.argsort(dists, axis=1)
gt_dists = np.diag(dists)
rankings = []
vis_top_k = 5
hide_gt = False
# num_indep_samples = 1
# random_seeds = np.arange(num_indep_samples)
sample = np.random.choice(np.arange(dists.shape[0]), size=self.num_samples,
replace=False)
for ii in sample:
ranked_idx = sorted_ranks[ii][:vis_top_k]
gt_captions = meta["raw_captions"][ii]
# if args.sample_single_gt_caption:
# gt_captions = np.random.choice(gt_captions, 1).tolist()
datum = {
"gt-sim": -gt_dists[ii],
"gt-captions": gt_captions,
"gt-rank": np.where(sorted_ranks[ii] == ii)[0][0],
"gt-path": meta["paths"][ii],
"top-k-sims": -dists[ii][ranked_idx],
"top-k-paths": np.array(meta["paths"])[ranked_idx],
"hide-gt": hide_gt,
}
rankings.append(datum)
self.display_current_results(
rankings,
epoch=epoch,
metrics=nested_metrics["t2v_metrics"],
)
def display_current_results(self, rankings, epoch, metrics):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if not Path(self.web_dir).exists():
Path(self.web_dir).mkdir(exist_ok=True, parents=True)
print(f"updating webpage at {self.web_dir}")
title = f"Experiment name = {self.name}"
refresh = True
if not refresh:
print("DISABLING WEB PAGE REFRESH")
webpage = html.HTML(web_dir=self.web_dir, title=title, refresh=refresh)
msg = f"epoch [{epoch}] - {self.name}"
webpage.add_header(msg)
msg = (f"R1: {metrics["R1"]:.1f}, "
f"R5: {metrics["R5"]:.1f}, "
f"R10: {metrics["R10"]:.1f}, "
f"MedR: {metrics["MedR"]}")
webpage.add_header(msg)
print(f"Top {len(rankings[0])} retreived videos at epoch: {epoch}")
for ranking in rankings:
vids, txts, links = [], [], []
gt_vid_path = ranking["gt-path"]
gt_captions = [" ".join(x) for x in ranking["gt-captions"]]
gt_captions = "<br>".join(gt_captions)
if ranking["hide-gt"]:
txts.append(gt_captions)
links.append("hidden")
vids.append("hidden")
else:
txt = (f"{gt_captions}<br><b>Rank: {ranking["gt-rank"]}, "
f"Sim: {ranking["gt-sim"]:.3f} [{Path(ranking["gt-path"]).stem}]")
txts.append(txt)
links.append(gt_vid_path)
vids.append(gt_vid_path)
for idx, (vid_path, sim) in enumerate(zip(ranking["top-k-paths"],
ranking["top-k-sims"])):
vid_path = Path(vid_path)
if ranking["hide-gt"]:
txt = f"choice: {idx}"
else:
txt = f"<b>Rank: {idx}, Sim: {sim:.3f}, [{Path(vid_path).stem}]"
txts.append(txt)
vids.append(vid_path)
links.append(vid_path)
webpage.add_videos(vids, txts, links, width=200)
print(f"added {len(vids)} videos")
webpage.save()
| """A simple HTML visualizer.
It is based on the Cycle-GAN codebase:
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
"""
import os
import numpy as np
from pathlib import Path
from . import util, html
class Visualizer:
"""This class includes several functions that can display/save images.
It uses a Python library 'visdom' for display, and a Python library 'dominate'
(wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, exp_name, web_dir, src_video_dir, vis_vid_freq, num_samples=50):
"""Initialize the Visualizer class
Create an HTML object for saveing HTML filters
"""
self.name = exp_name
self.web_dir = web_dir
self.vis_vid_freq = vis_vid_freq
self.img_dir = os.path.join(self.web_dir, "images")
self.num_samples = num_samples
print(f"create web directory {self.web_dir}...")
util.mkdirs([self.web_dir, self.img_dir])
src_dir = Path(src_video_dir).absolute()
print(f"symlinking videos from {src_dir}...")
sym_dir = (Path(self.web_dir) / "videos").absolute()
if sym_dir.is_symlink():
os.remove(sym_dir)
sym_dir.symlink_to(src_dir)
def visualize_ranking(self, sims, epoch, meta, nested_metrics):
if not (self.vis_vid_freq and epoch % self.vis_vid_freq == 0):
return
dists = -sims
np.random.seed(0)
sorted_ranks = np.argsort(dists, axis=1)
gt_dists = np.diag(dists)
rankings = []
vis_top_k = 5
hide_gt = False
# num_indep_samples = 1
# random_seeds = np.arange(num_indep_samples)
sample = np.random.choice(np.arange(dists.shape[0]), size=self.num_samples,
replace=False)
for ii in sample:
ranked_idx = sorted_ranks[ii][:vis_top_k]
gt_captions = meta["raw_captions"][ii]
# if args.sample_single_gt_caption:
# gt_captions = np.random.choice(gt_captions, 1).tolist()
datum = {
"gt-sim": -gt_dists[ii],
"gt-captions": gt_captions,
"gt-rank": np.where(sorted_ranks[ii] == ii)[0][0],
"gt-path": meta["paths"][ii],
"top-k-sims": -dists[ii][ranked_idx],
"top-k-paths": np.array(meta["paths"])[ranked_idx],
"hide-gt": hide_gt,
}
rankings.append(datum)
self.display_current_results(
rankings,
epoch=epoch,
metrics=nested_metrics["t2v_metrics"],
)
def display_current_results(self, rankings, epoch, metrics):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if not Path(self.web_dir).exists():
Path(self.web_dir).mkdir(exist_ok=True, parents=True)
print(f"updating webpage at {self.web_dir}")
title = f"Experiment name = {self.name}"
refresh = True
if not refresh:
print("DISABLING WEB PAGE REFRESH")
webpage = html.HTML(web_dir=self.web_dir, title=title, refresh=refresh)
msg = f"epoch [{epoch}] - {self.name}"
webpage.add_header(msg)
msg = (f"R1: {metrics['R1']:.1f}, "
f"R5: {metrics['R5']:.1f}, "
f"R10: {metrics['R10']:.1f}, "
f"MedR: {metrics['MedR']}")
webpage.add_header(msg)
print(f"Top {len(rankings[0])} retreived videos at epoch: {epoch}")
for ranking in rankings:
vids, txts, links = [], [], []
gt_vid_path = ranking["gt-path"]
gt_captions = [" ".join(x) for x in ranking["gt-captions"]]
gt_captions = "<br>".join(gt_captions)
if ranking["hide-gt"]:
txts.append(gt_captions)
links.append("hidden")
vids.append("hidden")
else:
txt = (f"{gt_captions}<br><b>Rank: {ranking['gt-rank']}, "
f"Sim: {ranking['gt-sim']:.3f} [{Path(ranking['gt-path']).stem}]")
txts.append(txt)
links.append(gt_vid_path)
vids.append(gt_vid_path)
for idx, (vid_path, sim) in enumerate(zip(ranking["top-k-paths"],
ranking["top-k-sims"])):
vid_path = Path(vid_path)
if ranking["hide-gt"]:
txt = f"choice: {idx}"
else:
txt = f"<b>Rank: {idx}, Sim: {sim:.3f}, [{Path(vid_path).stem}]"
txts.append(txt)
vids.append(vid_path)
links.append(vid_path)
webpage.add_videos(vids, txts, links, width=200)
print(f"added {len(vids)} videos")
webpage.save()
|
"""
Copyright (c) 2020 OneUpPotato
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sortedcollections import ValueSortedDict
from num2words import num2words
from textwrap import dedent
from utils.database import Session, WeeklyScores
class PointsHandler:
def __init__(self, bot) -> None:
self.bot = bot
self.flair_settings = self.bot.settings.general["flairs"]
self.db_session = Session()
# Load all of the scores.
self.load_scores()
def load_scores(self) -> None:
"""
Loads all of the scores from the flairs on Reddit.
"""
self.scores = ValueSortedDict({})
for flair in self.bot.reddit.main_subreddit.flair(limit=None):
try:
self.scores[flair["user"].name.lower()] = int(
"".join([char for char in flair["flair_text"].split(" ")[0] if char.isnumeric()])
)
except Exception as e:
print(e)
pass
print("POINTS: Loaded scores.")
def update_score(self, username: str, amount: int) -> None:
"""
Updates the score of a user.
:param username: The user whose score is being updated.
:param amount: The amount to modify their score by.
"""
username = username.lower()
# Check if the user has a score already.
if username not in self.scores.keys():
self.scores[username] = amount
self.bot.reddit.main_subreddit.flair.set(
username,
self.flair_settings["user"]["score"]["text"].format(amount),
flair_template_id=self.flair_settings["user"]["score"]["id"],
)
else:
self.scores[username] += amount
self.bot.reddit.main_subreddit.flair.set(
username,
self.flair_settings["user"]["score"]["text"].format(self.scores[username]),
)
# Update the weekly leaderboard stats.
result = self.db_session.query(WeeklyScores).filter_by(username=username).first()
if result is not None:
result.score += amount
else:
self.db_session.add(
WeeklyScores(
username=username,
score=amount,
)
)
self.db_session.commit()
def generate_leaderboard_table(self):
"""
Generates a leaderboard table. This is used for the widget and sidebar.
"""
leaderboard_table = dedent("""
|**Place**|**Username**|**Points**|
|:-:|:-:|:-:|
""").strip()
# For the top 10 users.
for i, score_info in enumerate(reversed(self.scores.items()[-10:])):
leaderboard_table += f"\n|{num2words((i + 1), to="ordinal_num")}|{score_info[0]}|{score_info[1]}|"
return leaderboard_table
| """
Copyright (c) 2020 OneUpPotato
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sortedcollections import ValueSortedDict
from num2words import num2words
from textwrap import dedent
from utils.database import Session, WeeklyScores
class PointsHandler:
def __init__(self, bot) -> None:
self.bot = bot
self.flair_settings = self.bot.settings.general["flairs"]
self.db_session = Session()
# Load all of the scores.
self.load_scores()
def load_scores(self) -> None:
"""
Loads all of the scores from the flairs on Reddit.
"""
self.scores = ValueSortedDict({})
for flair in self.bot.reddit.main_subreddit.flair(limit=None):
try:
self.scores[flair["user"].name.lower()] = int(
"".join([char for char in flair["flair_text"].split(" ")[0] if char.isnumeric()])
)
except Exception as e:
print(e)
pass
print("POINTS: Loaded scores.")
def update_score(self, username: str, amount: int) -> None:
"""
Updates the score of a user.
:param username: The user whose score is being updated.
:param amount: The amount to modify their score by.
"""
username = username.lower()
# Check if the user has a score already.
if username not in self.scores.keys():
self.scores[username] = amount
self.bot.reddit.main_subreddit.flair.set(
username,
self.flair_settings["user"]["score"]["text"].format(amount),
flair_template_id=self.flair_settings["user"]["score"]["id"],
)
else:
self.scores[username] += amount
self.bot.reddit.main_subreddit.flair.set(
username,
self.flair_settings["user"]["score"]["text"].format(self.scores[username]),
)
# Update the weekly leaderboard stats.
result = self.db_session.query(WeeklyScores).filter_by(username=username).first()
if result is not None:
result.score += amount
else:
self.db_session.add(
WeeklyScores(
username=username,
score=amount,
)
)
self.db_session.commit()
def generate_leaderboard_table(self):
"""
Generates a leaderboard table. This is used for the widget and sidebar.
"""
leaderboard_table = dedent("""
|**Place**|**Username**|**Points**|
|:-:|:-:|:-:|
""").strip()
# For the top 10 users.
for i, score_info in enumerate(reversed(self.scores.items()[-10:])):
leaderboard_table += f"\n|{num2words((i + 1), to='ordinal_num')}|{score_info[0]}|{score_info[1]}|"
return leaderboard_table
|
"""
Methods need to work with Oracle DB(misc)
oracle = OracleDB()
connection, cursor = oracle.connect()
#This is space for you query's.
#For example:
all = Select().Dictionary().all(oracle, cursor))
oracle.close(connection, cursor)
"""
from datetime import datetime
import cx_Oracle as Oracle
from config import Misc
class OracleDB:
"""
Oracle database connection wrapper
@author: jbaranski
https://gist.github.com/jbaranski/6537b4075873984ea06e5fbe291f4441
"""
def __init__(self, host=Misc.ORACLE_IP_MAIN, port=Misc.ORACLE_PORT, username=Misc.ORACLE_USER,
password=Misc.ORACLE_PASS, database=Misc.ORACLE_DB_NAME):
self.connection = None
self.cursor = None
self.host = host
self.port = port
self.username = username
self.password = password
self.database = database
def connect(self):
"""Connect to MISC"""
try:
connection = Oracle.connect(f"{self.username}/{self.password}@{self.host}:{self.port}/"
f"{self.database}", encoding="UTF-8", nencoding="UTF-8")
cursor = connection.cursor()
#print("Connection successfully")
return connection, cursor
except Oracle.DatabaseError as e:
print("There is a problem with Oracle", e)
@staticmethod
def make_dictionary_results(cursor):
"""Remake a tuple-answer to dictionary-answer
Tuple-answer
[(1, 1, 'Test', 'static', 1000)]
Dictionary-answer
[{'CUSTOMER_ID': 1, 'PROFILE_ID': 1, 'PROFILE_NAME': 'Test', 'PROFILE_TYPE': 'static', 'CONFIG_ID': 1000}]
"""
cursor.rowfactory = lambda *args: dict(zip([d[0] for d in cursor.description], args))
result = cursor.fetchall()
return result
@staticmethod
def close(connection, cursor):
"""Close connection and cursor"""
try:
cursor.close()
connection.close()
#print("Disconnection successfully")
except Oracle.DatabaseError:
pass
class Select:
@staticmethod
def get_by_query(oracle, cursor, query=f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"):
"""Input manually query
SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1'"""
cursor.execute(query)
result = oracle.make_dictionary_results(cursor)
return result
class Customers:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_CUSTOMERS"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def cid_equals(oracle, cursor, cid=0):
"""SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def unp_equals(oracle, cursor, unp=0):
"""SELECT * FROM SDP.VPN_CUSTOMERS WHERE UNP=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE UNP={unp}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def name_equals(oracle, cursor, name='Тестовый абонент'):
"""SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME='Тестовый абонент'"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME='{name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def name_like(oracle, cursor, name='Тестовый абонент'):
"""SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE 'Тестовый абонент'"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE '%{name}%'")
result = oracle.make_dictionary_results(cursor)
return result
class Profiles:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_PROFILES"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES ORDER BY CUSTOMER_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def customer_id_equals(oracle, cursor, customer_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id} ORDER BY PROFILE_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_id_equals(oracle, cursor, profile_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_name_equals(oracle, cursor, profile_name='MinskTrans IoT'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME='MinskTrans IOT'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME='{profile_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_name_like(oracle, cursor, profile_name='MinskTrans'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME LIKE '%MinskTrans%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME LIKE '{profile_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_type_equals(oracle, cursor, profile_type='static'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_TYPE='static'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_TYPE='{profile_type}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_type_like(oracle, cursor, profile_type='static'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_TYPE LIKE '%static%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_TYPE LIKE '%{profile_type}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def config_id_equals(oracle, cursor, config_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE CONFIG_ID=0'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CONFIG_ID={config_id}")
result = oracle.make_dictionary_results(cursor)
return result
class Dictionary:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_DICT ORDER BY CUSTOMER_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_DICT ORDER BY CUSTOMER_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def customer_id_equals(oracle, cursor, customer_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID=0 ORDER BY PROFILE_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id} ORDER BY PROFILE_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_id_equals(oracle, cursor, profile_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def attribute_name_equals(oracle, cursor, attribute_name='default'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_NAME='Name'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_NAME='{attribute_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def attribute_name_like(oracle, cursor, attribute_name='default'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_NAME LIKE '%Name%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_NAME LIKE '%{attribute_name}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def attribute_value_equals(oracle, cursor, attribute_value='default'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_VALUE='Value'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_VALUE='{attribute_value}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def attribute_value_like(oracle, cursor, attribute_value='default'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_VALUE LIKE '%Name%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_VALUE LIKE '%{attribute_value}%'")
result = oracle.make_dictionary_results(cursor)
return result
class Attributes:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_ATTRIBUTES"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def value_equals(oracle, cursor, value):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE VALUE='Gi-1'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE VALUE='{value}' ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def value_like(oracle, cursor, value):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE VALUE LIKE '%Gi-1%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE VALUE LIKE '%{value}%' ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def config_id_equals(oracle, cursor, config_id):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID={config_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def att_id_equals(oracle, cursor, att_id):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_ID={att_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def att_name_equals(oracle, cursor, att_name):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_NAME='Framed-Pool'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_NAME='{att_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def att_name_like(oracle, cursor, att_name):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_NAME LIKE '%Framed-Pool%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_NAME LIKE '%{att_name}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def config_id_and_att_name_equals(oracle, cursor, config_id=1, att_name=''):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID=1 AND ATT_NAME='Framed-IP-Address'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES "
f"WHERE CONFIG_ID={config_id} AND ATT_NAME='{att_name}'")
result = oracle.make_dictionary_results(cursor)
return result
class Users:
@staticmethod
def all(oracle, cursor, ):
"""SELECT * FROM SDP.VPN_USERS"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS ORDER BY CUSTOMER_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def username_equals(oracle, cursor, username='375292222222'):
"""SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='375292222222'"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def username_like(oracle, cursor, username='3752922711'):
"""SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='%3752922711%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME LIKE '%{username}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def password_equals(oracle, cursor, password='002pass'):
"""SELECT * FROM SDP.VPN_USERS WHERE PASSWORD='002pass'"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD='{password}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def password_like(oracle, cursor, password='pass'):
"""SELECT * FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%pass%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%{password}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def customer_id_equals(oracle, cursor, customer_id=1):
"""SELECT * FROM SDP.VPN_USERS WHERE CUSTOMER_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CUSTOMER_ID={customer_id} ORDER BY PROFILE_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def config_id_equals(oracle, cursor, config_id=1):
"""SELECT * FROM SDP.VPN_USERS WHERE CONFIG_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CONFIG_ID={config_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_id_equals(oracle, cursor, profile_id=1):
"""SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id} ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_id_and_config_id_equals(oracle, cursor, profile_id=1, config_id=1):
"""SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id} AND CONFIG_ID={config_id}"
f"ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
class Contexts:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_CONTEXTS ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS ORDER BY CONTEXT_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def context_id_equals(oracle, cursor, context_id=0):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def context_equals(oracle, cursor, context='Gi-1'):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def context_like(oracle, cursor, context='Gi-1'):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def vrf_count_equals(oracle, cursor, vrf_count=255):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE VRF_COUNT={vrf_count}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def full_equals(oracle, cursor, is_full=0):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE IS_FULL={is_full}")
result = oracle.make_dictionary_results(cursor)
return result
class VRFs:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_VRFS ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS ORDER BY CONTEXT_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def context_id_equals(oracle, cursor, context_id=0):
"""SELECT * FROM SDP.VPN_VRFS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE CONTEXT_ID={context_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def rt_vrf_equals(oracle, cursor, rt_vrf=0):
"""SELECT * FROM SDP.VPN_VRFS WHERE RT_VRF=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT_VRF={rt_vrf}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def vrf_name_equals(oracle, cursor, vrf_name='10000_kgb'):
"""SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME='10000_kgb'"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME='{vrf_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def vrf_name_like(oracle, cursor, vrf_name='10000_kgb'):
"""SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%10000_kgb%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%{vrf_name}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def rd_equals(oracle, cursor, rd=10000):
"""SELECT * FROM SDP.VPN_VRFS WHERE RD=10000"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RD={rd}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def rt_equals(oracle, cursor, rt='10000'):
"""SELECT * FROM SDP.VPN_VRFS WHERE RT=10000"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT={rt}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def rt_like(oracle, cursor, rt='10000'):
"""SELECT * FROM SDP.VPN_VRFS WHERE RT LIKE '%10000%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT LIKE '%{rt}%'")
result = oracle.make_dictionary_results(cursor)
return result
class Insert:
@staticmethod
def insert_by_query(connection, cursor, query=f"INSERT INTO SDP.VPN_CUSTOMERS "
f"(CID, NAME, IS_TEST) VALUES (0, 'Name_test', 1)"):
"""Input manually query
SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"""
cursor.execute(query)
connection.commit()
return cursor
class Customers:
@staticmethod
def all(oracle, connection, cursor, cid, unp, name, address, url='', contact_name='', contact_info='',
is_test=0):
""""INSERT INTO SDP.VPN_CUSTOMERS
(CID, UNP, NAME, ADDRESS, URL, CONTACT_NAME, CONTACT_INFO, ACTIVATION_DATE, IS_TEST)
VALUES
(0, 0, 'Name_test', '', '', '', '', to_date('2020-05-20 14:02:46', 'yyyy-mm-dd hh24:mi:ss'), 1);"""
activation_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"INSERT INTO SDP.VPN_CUSTOMERS "
f"(CID, UNP, NAME, ADDRESS, URL, CONTACT_NAME, CONTACT_INFO, ACTIVATION_DATE, IS_TEST) "
f"VALUES "
f"({cid}, {unp}, '{name}', '{address}', '{url}', '{contact_name}', '{contact_info}', "
f"to_date('{activation_date}', 'yyyy-mm-dd hh24:mi:ss'), {is_test})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def minimal(oracle, connection, cursor, cid, unp, name, is_test=0):
"""INSERT INTO SDP.VPN_CUSTOMERS
(CID, UNP, NAME, ADDRESS, URL, CONTACT_NAME, CONTACT_INFO, ACTIVATION_DATE, IS_TEST)
VALUES
(0, 0, 'Name_test', '', '', '', '', to_date('2020-05-20 14:02:46', 'yyyy-mm-dd hh24:mi:ss'), 1);"""
activation_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"INSERT INTO SDP.VPN_CUSTOMERS "
f"(CID, UNP, NAME, ADDRESS, URL, CONTACT_NAME, CONTACT_INFO, ACTIVATION_DATE, IS_TEST) "
f"VALUES "
f"({cid}, {unp}, '{name}', '', '', '', '', "
f"to_date('{activation_date}', 'yyyy-mm-dd hh24:mi:ss'), {is_test})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
result = oracle.make_dictionary_results(cursor)
return result
class Profiles:
@staticmethod
def all(oracle, connection, cursor, customer_id, profile_id, profile_name, config_id, profile_type='static'):
""""""
cursor.execute(f"INSERT INTO SDP.VPN_PROFILES "
f"(CUSTOMER_ID, PROFILE_ID, PROFILE_NAME, PROFILE_TYPE, CONFIG_ID) "
f"VALUES "
f"({customer_id}, {profile_id}, '{profile_name}', '{profile_type}', {config_id})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def minimal(oracle, connection, cursor, profile_id, profile_name, config_id, profile_type='static'):
""""""
cursor.execute(f"INSERT INTO SDP.VPN_PROFILES "
f"(CUSTOMER_ID, PROFILE_ID, PROFILE_NAME, PROFILE_TYPE, CONFIG_ID) "
f"VALUES "
f"(0, {profile_id}, '{profile_name}', '{profile_type}', {config_id})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
class Dictionary:
@staticmethod
def all(oracle, connection, cursor, customer_id, profile_id, attribute_name, attribute_value):
"""INSERT INTO SDP.VPN_ATTRIBUTES (VALUE, CONFIG_ID, ATT_ID, ATT_NAME)
VALUES ('', 1, 3, 'SN-VPN')
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:param profile_id: 1
:param attribute_name: 'SN-VPN'
:param profile_type: 'static'
:type customer_id: int
:type config_id: int
:type att_name: str
:type value: str
Returns:
:return: [{Select before delete}] or {"error": "Error text"}
:rtype: list"""
cursor.execute(f"INSERT INTO SDP.VPN_DICT "
f"(CUSTOMER_ID, PROFILE_ID, ATTRIBUTE_NAME, ATTRIBUTE_VALUE) "
f"VALUES "
f"({customer_id}, {profile_id}, '{attribute_name}', '{attribute_value}')")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE PROFILE_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
class Attributes:
@staticmethod
def all(oracle, connection, cursor, value, config_id, att_name):
"""INSERT INTO SDP.VPN_ATTRIBUTES (VALUE, CONFIG_ID, ATT_ID, ATT_NAME)
VALUES ('', 1, 3, 'SN-VPN')
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param value: ''
:param config_id: 1
:param att_name: 'SN-VPN'
:type value: str
:type config_id: int
:type att_name: str
Returns:
:return: [{Select before delete}] or {"error": "Error text"}
:rtype: list"""
att_id = cursor.execute(f"SELECT ID FROM SDP.VPN_ATTRIBUTE_DIC WHERE NAME='{att_name}'").fetchall()
try:
cursor.execute(f"INSERT INTO SDP.VPN_ATTRIBUTES (VALUE, CONFIG_ID, ATT_ID, ATT_NAME) "
f"VALUES ('{value}', {config_id}, {att_id[0][0]}, '{att_name}')")
except IndexError as e:
return {"error": f"Error: {e}: with att_id={att_id}"}
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES "
f"WHERE VALUE='{value}' AND CONFIG_ID={config_id} AND ATT_ID={att_id[0][0]}")
result = oracle.make_dictionary_results(cursor)
return result
class Users:
@staticmethod
def all(oracle, connection, cursor, msisdn, customer_id, config_id, profile_id, password=''):
"""INSERT INTO SDP.VPN_USERS (USER_NAME, PASSWORD, CUSTOMER_ID, CONFIG_ID, PROFILE_ID)
VALUES ('375291234567', '', 1, 1, 1)
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param msisdn: '375291234567'
:param customer_id: 1
:param config_id: 1
:param profile_id: 1
:param password: '12345qwerty'
:type msisdn: str
:type customer_id: int
:type config_id: int
:type profile_id: int
:type password: str
Returns:
:return: [{Select before delete}]
:rtype: list"""
if type(msisdn) is not str:
msisdn = str(msisdn)
cursor.execute(f"INSERT INTO SDP.VPN_USERS (USER_NAME, PASSWORD, CUSTOMER_ID, CONFIG_ID, PROFILE_ID) "
f"VALUES ('{msisdn}', '{password}', {customer_id}, {config_id}, {profile_id})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{msisdn}' AND CONFIG_ID={config_id}")
result = oracle.make_dictionary_results(cursor)
return result
class Contexts:
@staticmethod
def all(oracle, connection, cursor, context_id, context, vrf_count, is_full):
"""INSERT INTO SDP.VPN_CONTEXTS (CONTEXT_ID, CONTEXT, VRF_COUNT, IS_FULL)
VALUES (1, 'Gi-3', 245, 0)
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 1
:param context: 10000
:param vrf_count: '10000_test'
:param is_full: 10000
:type context_id: int
:type context: str
:type vrf_count: int
:type is_full: int
Returns:
:return: [{Select before delete}]
:rtype: list"""
cursor.execute(f"INSERT INTO SDP.VPN_CONTEXTS (CONTEXT_ID, CONTEXT, VRF_COUNT, IS_FULL) "
f"VALUES ({context_id}, '{context}', {vrf_count}, {is_full})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
result = oracle.make_dictionary_results(cursor)
return result
class VRFs:
@staticmethod
def all(oracle, connection, cursor, context_id, rt_vrf, vrf_name, rd, rt):
"""INSERT INTO SDP.VPN_VRFS (CONTEXT_ID, RT_VRF, VRF_NAME, RD, RT)
VALUES (1, 1, '10000_test', 10000, '10000, 10001')
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 1
:param rt_vrf: 10000
:param vrf_name: '10000_test'
:param rd: 10000
:param rt: '10000, 10001'
:type context_id: int
:type rt_vrf: int
:type vrf_name: str
:type rd: int
:type rt: str
Returns:
:return: [{Select before delete}]
:rtype: list"""
cursor.execute(f"INSERT INTO SDP.VPN_VRFS (CONTEXT_ID, RT_VRF, VRF_NAME, RD, RT) "
f"VALUES ({context_id}, {rt_vrf}, '{vrf_name}', {rd}, '{rt}')")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RD={rd}")
result = oracle.make_dictionary_results(cursor)
return result
class Update:
@staticmethod
def update_by_query(cursor, query=f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"):
"""Input manually query
SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"""
cursor.execute(query)
return query
class Customers:
@staticmethod
def all(oracle, connection, cursor, unp, name, address, url, contact_name, contact_info, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET UNP=123456789,NAME='Test',ADDRESS='Washington',URL='www',
CONTACT_NAME='John',CONTACT_INFO='375291788765',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss'),
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param unp: 123456789
:param name: 'Test'
:param address: 'Washington'
:param url: 'www'
:param contact_name: 'John'
:param contact_info: '375291788765'
:param customer_id: 1
:type unp: int
:type name: str
:type address: str
:type url: str
:type contact_name: str
:type contact_info: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET UNP={unp},"
f"NAME='{name}',"
f"ADDRESS='{address}',"
f"URL='{url}',"
f"CONTACT_NAME='{contact_name}',"
f"CONTACT_INFO='{contact_info}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss'),"
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def unp_cid_equals(oracle, connection, cursor, unp, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET UNP=123456789,
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param unp: 123456789
:param customer_id: 1
:type unp: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET UNP={unp},"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def name_cid_equals(oracle, connection, cursor, name, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET NAME='Test',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param name: 'Test'
:param customer_id: 1
:type name: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET NAME='{name}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def address_cid_equals(oracle, connection, cursor, address, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET ADDRESS='Washington',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param address: 'Washington'
:param customer_id: 1
:type address: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET ADDRESS='{address}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def url_cid_equals(oracle, connection, cursor, url, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET URL='{url}',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param url: 'www'
:param customer_id: 1
:type url: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET URL='{url}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def contact_name_cid_equals(oracle, connection, cursor, contact_name, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET CONTACT_NAME='John W.V.',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param contact_name: 'John W.V.'
:param customer_id: 1
:type contact_name: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET CONTACT_NAME='{contact_name}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def contact_info_cid_equals(oracle, connection, cursor, contact_info, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET CONTACT_INFO='Full name company',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param contact_info: 'Full name company'
:param customer_id: 1
:type contact_info: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET CONTACT_INFO='{contact_info}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Profiles:
@staticmethod
def all(oracle, connection, cursor, profile_id, customer_id, profile_name, profile_type, config_id):
"""UPDATE SDP.VPN_PROFILES SET CUSTOMER_ID=1,PROFILE_NAME='Test',PROFILE_TYPE='static',CONFIG_ID=1
WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param customer_id: 1
:param profile_name: 'Test'
:param profile_type: 'static'
:param config_id: 1
:type profile_id: int
:type customer_id: int
:type profile_name: str
:type profile_type: str
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES "
f"SET CUSTOMER_ID={customer_id},"
f"PROFILE_NAME='{profile_name}',"
f"PROFILE_TYPE='{profile_type}',"
f"CONFIG_ID={config_id} "
f"WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def customer_id_pid_equals(oracle, connection, cursor, profile_id, customer_id):
"""UPDATE SDP.VPN_PROFILES SET CUSTOMER_ID=1 WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param customer_id: 1
:type profile_id: int
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES SET CUSTOMER_ID={customer_id} WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_name_pid_equals(oracle, connection, cursor, profile_id, profile_name):
"""UPDATE SDP.VPN_PROFILES SET PROFILE_NAME='Test' WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param profile_name: 'Test'
:type profile_id: int
:type profile_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES SET PROFILE_NAME='{profile_name}' WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_type_pid_equals(oracle, connection, cursor, profile_id, profile_type):
"""UPDATE SDP.VPN_PROFILES SET PROFILE_TYPE='static' WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param profile_type: 'static'
:type profile_id: int
:type profile_type: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES SET PROFILE_TYPE='{profile_type}' WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def config_id_pid_equals(oracle, connection, cursor, profile_id, config_id):
"""UPDATE SDP.VPN_PROFILES SET CONFIG_ID=1 WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param config_id: 1
:type profile_id: int
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES SET CONFIG_ID={config_id} WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Dictionary:
@staticmethod
def all(oracle, connection, cursor, profile_id, customer_id, attribute_name, attribute_value):
"""UPDATE SDP.VPN_DICT SET CUSTOMER_ID=1, ATTRIBUTE_NAME='',ATTRIBUTE_VALUE=''
WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param customer_id: 1
:param attribute_name: ''
:param attribute_value: ''
:type profile_id: int
:type customer_id: int
:type attribute_name: str
:type attribute_value: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_DICT "
f"SET CUSTOMER_ID={customer_id},"
f"ATTRIBUTE_VALUE='{attribute_value}' "
f"WHERE PROFILE_ID={profile_id} AND ATTRIBUTE_NAME='{attribute_name}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def customer_id_pid_equals(oracle, connection, cursor, profile_id, customer_id):
"""UPDATE SDP.VPN_DICT SET CUSTOMER_ID=1 WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param customer_id: 1
:type profile_id: int
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_DICT SET CUSTOMER_ID={customer_id} WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def name_and_value_pid_equals(oracle, connection, cursor, profile_id, attribute_name, attribute_value):
"""UPDATE SDP.VPN_DICT SET ATTRIBUTE_VALUE=''
WHERE PROFILE_ID=1 and ATTRIBUTE_NAME=''
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param attribute_name: ''
:param attribute_value: ''
:type profile_id: int
:type attribute_name: str
:type attribute_value: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_DICT "
f"SET ATTRIBUTE_VALUE='{attribute_value}' "
f"WHERE PROFILE_ID={profile_id} and ATTRIBUTE_NAME='{attribute_name}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Attributes:
@staticmethod
def all(oracle, connection, cursor, att_id, value, profile_id, config_id):
"""UPDATE SDP.VPN_ATTRIBUTES SET VALUE='172.24.202.111' WHERE CONFIG_ID=1 AND ATT_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param att_id: 1
:param value: '172.24.202.111'
:param profile_id: 1
:param config_id: 1
:type att_id: str
:type value: int
:type profile_id: int
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_ATTRIBUTES SET VALUE='{value}' "
f"WHERE CONFIG_ID={config_id} AND ATT_ID={att_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Users:
@staticmethod
def all(oracle, connection, cursor, password, customer_id, config_id, profile_id, username):
"""UPDATE SDP.VPN_USERS SET PASSWORD='12345qwerty',CUSTOMER_ID=1,CONFIG_ID=1,PROFILE_ID=1
WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param password: '12345qwerty'
:param customer_id: 1
:param config_id: 1
:param profile_id: 1
:param username: '375291234567'
:type password: str
:type customer_id: int
:type config_id: int
:type profile_id: int
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS "
f"SET PASSWORD='{password}',"
f"CUSTOMER_ID={customer_id},"
f"CONFIG_ID={config_id},"
f"PROFILE_ID={profile_id} "
f"WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def username(oracle, connection, cursor, old_username, new_username):
"""UPDATE SDP.VPN_USERS SET USER_NAME='375291111111' WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param old_username: '375291234567'
:param new_username: '375291111111'
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{old_username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET USER_NAME='{new_username}' WHERE USER_NAME='{old_username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{new_username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def password_username_equals(oracle, connection, cursor, password, username):
"""UPDATE SDP.VPN_USERS SET PASSWORD='12345qwerty' WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param password: '12345qwerty'
:param username: '375291234567'
:type password: str
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET PASSWORD='{password}' WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def customer_id_username_equals(oracle, connection, cursor, customer_id, username):
"""UPDATE SDP.VPN_USERS SET CUSTOMER_ID=1 WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:param username: '375291234567'
:type customer_id: int
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET CUSTOMER_ID={customer_id} WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def config_id_username_equals(oracle, connection, cursor, config_id, username):
"""UPDATE SDP.VPN_USERS SET CONFIG_ID=1 WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param config_id: 1
:param username: '375291234567'
:type config_id: int
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET CONFIG_ID={config_id} WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_id_username_equals(oracle, connection, cursor, profile_id, username):
"""UPDATE SDP.VPN_USERS SET PROFILE_ID=1 WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param username: '375291234567'
:type profile_id: int
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET PROFILE_ID={profile_id} WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Contexts:
@staticmethod
def all(oracle, connection, cursor, context, vrf_count, is_full, context_id):
"""UPDATE SDP.VPN_CONTEXTS
SET CONTEXT='Gi-3',VRF_COUNT=254,IS_FULL=0
WHERE CONTEXT_ID=3
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context: 'Gi-3'
:param vrf_count: 250
:param is_full: 1
:param context_id: 1
:type context: str
:type vrf_count: int
:type is_full: int
:type context_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET CONTEXT='{context}',VRF_COUNT={vrf_count},IS_FULL={is_full} "
f"WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_count_context_id_equals(oracle, connection, cursor, vrf_count, context_id):
"""UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT=250 WHERE CONTEXT_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 1
:param vrf_count: 250
:type context_id: str
:type vrf_count: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT={vrf_count} WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_count_context_equals(oracle, connection, cursor, vrf_count, context):
"""UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT=250 WHERE CONTEXT='Gi-3'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context: 'Gi-3'
:param vrf_count: 250
:type context: str
:type vrf_count: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT={vrf_count} WHERE CONTEXT='{context}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_count_context_like(oracle, connection, cursor, vrf_count, context):
"""UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT=250 WHERE CONTEXT LIKE '%Gi-3%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context: 'Gi-3'
:param vrf_count: 250
:type context: str
:type vrf_count: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT={vrf_count} WHERE CONTEXT LIKE '%{context}%'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def is_full_context_id_equals(oracle, connection, cursor, is_full, context_id):
"""UPDATE SDP.VPN_CONTEXTS SET IS_FULL=1 WHERE CONTEXT_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param is_full: 1
:param context_id: 1
:type is_full: int
:type context_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET IS_FULL={is_full} WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def is_full_context_equals(oracle, connection, cursor, is_full, context):
"""UPDATE SDP.VPN_CONTEXTS SET IS_FULL=1 WHERE CONTEXT='Gi-3'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param is_full: 1
:param context: 'Gi-3'
:type is_full: int
:type context: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET IS_FULL={is_full} WHERE CONTEXT='{context}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def is_full_context_like(oracle, connection, cursor, is_full, context):
"""UPDATE SDP.VPN_CONTEXTS SET IS_FULL=1 WHERE CONTEXT LIKE '%Gi-3%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param is_full: 1
:param context: 'Gi-3'
:type is_full: int
:type context: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET IS_FULL={is_full} WHERE CONTEXT LIKE '%{context}%'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class VRFs:
pass
class Delete:
@staticmethod
def delete_by_query(connection, cursor, query=''):
"""Input manually query
SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"""
cursor.execute(query)
connection.commit()
return query
class Customers:
@staticmethod
def cid_equals(oracle, connection, cursor, cid):
"""DELETE FROM SDP.VPN_CUSTOMERS WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param cid: 1
:type cid: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def name_equals(oracle, connection, cursor, name):
"""DELETE FROM SDP.VPN_CUSTOMERS WHERE NAME='Test'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param name: 'Test'
:type name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME='{name}'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_CUSTOMERS WHERE CID={old[0]["CID"]}")
connection.commit()
except IndexError as e:
print(f"Error: {e} : Row with NAME={name} is not exist")
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={old[0]["CID"]}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def name_like(oracle, connection, cursor, name):
"""DELETE FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE '%Test%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param name: 'Test'
:type name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE '%{name}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_CUSTOMERS WHERE CID={old[0]["CID"]}")
connection.commit()
except IndexError as e:
print(f"Error: {e} : Row with NAME LIKE {name} is not exist")
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE '%{name}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def unp_equals(oracle, connection, cursor, unp):
"""DELETE FROM SDP.VPN_CUSTOMERS WHERE UNP=123456789
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param unp: 123456789
:type unp: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE UNP={unp}")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_CUSTOMERS WHERE CID={old[0]["CID"]}")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with UNP={unp} is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE UNP={unp}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Profiles:
@staticmethod
def cid_equals(oracle, connection, cursor, customer_id):
"""DELETE FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def pid_equals(oracle, connection, cursor, profile_id):
"""DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:type profile_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_name_equals(oracle, connection, cursor, profile_name):
"""DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_NAME='Test'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_name: 'Test'
:type profile_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME='{profile_name}'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_ID={old[0]["PROFILE_ID"]}")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with PROFILE_NAME LIKE {profile_name} is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={old[0]["PROFILE_ID"]}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_name_like(oracle, connection, cursor, profile_name):
"""DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_NAME LIKE '%Test%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_name: 'Test'
:type profile_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME LIKE '%{profile_name}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_ID={old[0]["PROFILE_ID"]}")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with PROFILE_NAME LIKE '{profile_name}' is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={old[0]["PROFILE_ID"]}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def config_id_equals(oracle, connection, cursor, config_id):
"""DELETE FROM SDP.VPN_PROFILES WHERE CONFIG_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param config_id: 1
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CONFIG_ID={config_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE CONFIG_ID={config_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CONFIG_ID={config_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Dictionary:
@staticmethod
def cid_equals(oracle, connection, cursor, customer_id):
"""DELETE FROM SDP.VPN_DICT WHERE CUSTOMER_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE CUSTOMER_ID={customer_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_DICT WHERE CUSTOMER_ID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE CUSTOMER_ID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def pid_equals(oracle, connection, cursor, profile_id):
"""DELETE FROM SDP.VPN_DICT WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:type profile_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_DICT WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Attributes:
@staticmethod
def config_id_equals(oracle, connection, cursor, config_id):
"""DELETE FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param config_id: 1
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID={config_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID={config_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID={config_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Users:
@staticmethod
def username_equals(oracle, connection, cursor, username):
"""DELETE FROM SDP.VPN_USERS WHERE USER_NAME='375291797391'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param username: 1
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def customer_id_equals(oracle, connection, cursor, customer_id):
"""DELETE FROM SDP.VPN_USERS WHERE CUSTOMER_ID=1'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CUSTOMER_ID={customer_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE CUSTOMER_ID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CUSTOMER_ID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_id_equals(oracle, connection, cursor, profile_id):
"""DELETE FROM SDP.VPN_USERS WHERE PROFILE_ID=1'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:type profile_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def config_id_equals(oracle, connection, cursor, config_id):
"""DELETE FROM SDP.VPN_USERS WHERE CONFIG_ID=1'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param config_id: 1
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CONFIG_ID={config_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE CONFIG_ID={config_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CONFIG_ID={config_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def password_equals(oracle, connection, cursor, password):
"""DELETE FROM SDP.VPN_USERS WHERE PASSWORD='qwerty12345'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param password: 'qwerty12345'
:type password: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD='{password}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE PASSWORD='{password}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD='{password}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def password_like(oracle, connection, cursor, password):
"""DELETE FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%qwerty12345%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param password: 'qwerty12345'
:type password: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%{password}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE PASSWORD='{old[0]["PASSWORD"]}'")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with PASSWORD LIKE '{password}' is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%{password}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Contexts:
@staticmethod
def context_id_equals(oracle, connection, cursor, context_id):
"""DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 1
:type context_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def context_equals(oracle, connection, cursor, context):
"""DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT='Gi-3'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context: 'Gi-3'
:type context: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def context_like(oracle, connection, cursor, context):
"""DELETE FROM SDP.VPN_VRFS WHERE CONTEXT='Gi-3'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context:'Gi-3'
:type context: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{old[0]["CONTEXT"]}'")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with CONTEXT LIKE '{context}' is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{old[0]["CONTEXT"]}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class VRFs:
@staticmethod
def context_id_equals(oracle, connection, cursor, context_id):
"""DELETE FROM SDP.VPN_VRFS WHERE CONTEXT_ID=3
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 3
:type context_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def rt_vrf_equals(oracle, connection, cursor, rt_vrf):
"""DELETE FROM SDP.VPN_VRFS WHERE RT_VRF=10000'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param rt_vrf: 10000
:type rt_vrf: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT_VRF={rt_vrf}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE RT_VRF={rt_vrf}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT_VRF={rt_vrf}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_name_equals(oracle, connection, cursor, vrf_name):
"""DELETE FROM SDP.VPN_VRFS WHERE VRF_NAME='10000_test'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param vrf_name: '10000_test'
:type vrf_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME='{vrf_name}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE VRF_NAME='{vrf_name}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME='{vrf_name}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_name_like(oracle, connection, cursor, vrf_name):
"""DELETE FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%10000_test%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param vrf_name: '%10000_test%'
:type vrf_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%{vrf_name}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE VRF_NAME='{old[0]["VRF_NAME"]}'")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with VRF_NAME LIKE {vrf_name} is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%{vrf_name}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def rd_equals(oracle, connection, cursor, rd):
"""DELETE FROM SDP.VPN_VRFS WHERE RD=10000
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param rd: 10000
:type rd: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RD={rd}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE RD={rd}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RD={rd}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def rt_equals(oracle, connection, cursor, rt):
"""DELETE FROM SDP.VPN_VRFS WHERE RT='10000, 10001'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param rt: example - '10000, 10001'
:type rt: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT='{rt}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE RT='{rt}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT='{rt}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def rt_like(oracle, connection, cursor, rt):
"""DELETE FROM SDP.VPN_VRFS WHERE RT='%10000%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param rt: example - '10000'
:type rt: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT LIKE '%{rt}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE RT='{old[0]["RT"]}'")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with RT LIKE {rt} is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT LIKE '%{rt}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
# ora = OracleDB()
# con, cur = ora.connect()
# print(Select().VRFs().all(ora, cur))
# print(Insert().VRFs().all(ora, con, cur, context_id=1, rt_vrf=10000, vrf_name='10000_test', rd=10000, rt='10000, 10001'))
# print(Delete().VRFs().rt_vrf_equals(ora, con, cur, '10000'))
# ora.close(con, cur)
| """
Methods need to work with Oracle DB(misc)
oracle = OracleDB()
connection, cursor = oracle.connect()
#This is space for you query's.
#For example:
all = Select().Dictionary().all(oracle, cursor))
oracle.close(connection, cursor)
"""
from datetime import datetime
import cx_Oracle as Oracle
from config import Misc
class OracleDB:
"""
Oracle database connection wrapper
@author: jbaranski
https://gist.github.com/jbaranski/6537b4075873984ea06e5fbe291f4441
"""
def __init__(self, host=Misc.ORACLE_IP_MAIN, port=Misc.ORACLE_PORT, username=Misc.ORACLE_USER,
password=Misc.ORACLE_PASS, database=Misc.ORACLE_DB_NAME):
self.connection = None
self.cursor = None
self.host = host
self.port = port
self.username = username
self.password = password
self.database = database
def connect(self):
"""Connect to MISC"""
try:
connection = Oracle.connect(f"{self.username}/{self.password}@{self.host}:{self.port}/"
f"{self.database}", encoding="UTF-8", nencoding="UTF-8")
cursor = connection.cursor()
#print("Connection successfully")
return connection, cursor
except Oracle.DatabaseError as e:
print("There is a problem with Oracle", e)
@staticmethod
def make_dictionary_results(cursor):
"""Remake a tuple-answer to dictionary-answer
Tuple-answer
[(1, 1, 'Test', 'static', 1000)]
Dictionary-answer
[{'CUSTOMER_ID': 1, 'PROFILE_ID': 1, 'PROFILE_NAME': 'Test', 'PROFILE_TYPE': 'static', 'CONFIG_ID': 1000}]
"""
cursor.rowfactory = lambda *args: dict(zip([d[0] for d in cursor.description], args))
result = cursor.fetchall()
return result
@staticmethod
def close(connection, cursor):
"""Close connection and cursor"""
try:
cursor.close()
connection.close()
#print("Disconnection successfully")
except Oracle.DatabaseError:
pass
class Select:
@staticmethod
def get_by_query(oracle, cursor, query=f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"):
"""Input manually query
SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1'"""
cursor.execute(query)
result = oracle.make_dictionary_results(cursor)
return result
class Customers:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_CUSTOMERS"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def cid_equals(oracle, cursor, cid=0):
"""SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def unp_equals(oracle, cursor, unp=0):
"""SELECT * FROM SDP.VPN_CUSTOMERS WHERE UNP=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE UNP={unp}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def name_equals(oracle, cursor, name='Тестовый абонент'):
"""SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME='Тестовый абонент'"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME='{name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def name_like(oracle, cursor, name='Тестовый абонент'):
"""SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE 'Тестовый абонент'"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE '%{name}%'")
result = oracle.make_dictionary_results(cursor)
return result
class Profiles:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_PROFILES"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES ORDER BY CUSTOMER_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def customer_id_equals(oracle, cursor, customer_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id} ORDER BY PROFILE_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_id_equals(oracle, cursor, profile_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_name_equals(oracle, cursor, profile_name='MinskTrans IoT'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME='MinskTrans IOT'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME='{profile_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_name_like(oracle, cursor, profile_name='MinskTrans'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME LIKE '%MinskTrans%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME LIKE '{profile_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_type_equals(oracle, cursor, profile_type='static'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_TYPE='static'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_TYPE='{profile_type}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_type_like(oracle, cursor, profile_type='static'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_TYPE LIKE '%static%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_TYPE LIKE '%{profile_type}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def config_id_equals(oracle, cursor, config_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE CONFIG_ID=0'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CONFIG_ID={config_id}")
result = oracle.make_dictionary_results(cursor)
return result
class Dictionary:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_DICT ORDER BY CUSTOMER_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_DICT ORDER BY CUSTOMER_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def customer_id_equals(oracle, cursor, customer_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID=0 ORDER BY PROFILE_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id} ORDER BY PROFILE_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_id_equals(oracle, cursor, profile_id=0):
"""SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID=0"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def attribute_name_equals(oracle, cursor, attribute_name='default'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_NAME='Name'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_NAME='{attribute_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def attribute_name_like(oracle, cursor, attribute_name='default'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_NAME LIKE '%Name%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_NAME LIKE '%{attribute_name}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def attribute_value_equals(oracle, cursor, attribute_value='default'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_VALUE='Value'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_VALUE='{attribute_value}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def attribute_value_like(oracle, cursor, attribute_value='default'):
"""SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_VALUE LIKE '%Name%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE ATTRIBUTE_VALUE LIKE '%{attribute_value}%'")
result = oracle.make_dictionary_results(cursor)
return result
class Attributes:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_ATTRIBUTES"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def value_equals(oracle, cursor, value):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE VALUE='Gi-1'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE VALUE='{value}' ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def value_like(oracle, cursor, value):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE VALUE LIKE '%Gi-1%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE VALUE LIKE '%{value}%' ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def config_id_equals(oracle, cursor, config_id):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID={config_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def att_id_equals(oracle, cursor, att_id):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_ID={att_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def att_name_equals(oracle, cursor, att_name):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_NAME='Framed-Pool'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_NAME='{att_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def att_name_like(oracle, cursor, att_name):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_NAME LIKE '%Framed-Pool%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE ATT_NAME LIKE '%{att_name}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def config_id_and_att_name_equals(oracle, cursor, config_id=1, att_name=''):
"""SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID=1 AND ATT_NAME='Framed-IP-Address'"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES "
f"WHERE CONFIG_ID={config_id} AND ATT_NAME='{att_name}'")
result = oracle.make_dictionary_results(cursor)
return result
class Users:
@staticmethod
def all(oracle, cursor, ):
"""SELECT * FROM SDP.VPN_USERS"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS ORDER BY CUSTOMER_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def username_equals(oracle, cursor, username='375292222222'):
"""SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='375292222222'"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def username_like(oracle, cursor, username='3752922711'):
"""SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='%3752922711%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME LIKE '%{username}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def password_equals(oracle, cursor, password='002pass'):
"""SELECT * FROM SDP.VPN_USERS WHERE PASSWORD='002pass'"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD='{password}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def password_like(oracle, cursor, password='pass'):
"""SELECT * FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%pass%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%{password}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def customer_id_equals(oracle, cursor, customer_id=1):
"""SELECT * FROM SDP.VPN_USERS WHERE CUSTOMER_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CUSTOMER_ID={customer_id} ORDER BY PROFILE_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def config_id_equals(oracle, cursor, config_id=1):
"""SELECT * FROM SDP.VPN_USERS WHERE CONFIG_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CONFIG_ID={config_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_id_equals(oracle, cursor, profile_id=1):
"""SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id} ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def profile_id_and_config_id_equals(oracle, cursor, profile_id=1, config_id=1):
"""SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id} AND CONFIG_ID={config_id}"
f"ORDER BY CONFIG_ID")
result = oracle.make_dictionary_results(cursor)
return result
class Contexts:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_CONTEXTS ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS ORDER BY CONTEXT_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def context_id_equals(oracle, cursor, context_id=0):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def context_equals(oracle, cursor, context='Gi-1'):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def context_like(oracle, cursor, context='Gi-1'):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def vrf_count_equals(oracle, cursor, vrf_count=255):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE VRF_COUNT={vrf_count}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def full_equals(oracle, cursor, is_full=0):
"""SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE IS_FULL={is_full}")
result = oracle.make_dictionary_results(cursor)
return result
class VRFs:
@staticmethod
def all(oracle, cursor):
"""SELECT * FROM SDP.VPN_VRFS ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS ORDER BY CONTEXT_ID")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def context_id_equals(oracle, cursor, context_id=0):
"""SELECT * FROM SDP.VPN_VRFS WHERE CONTEXT_ID=1 ORDER BY CONTEXT_ID"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE CONTEXT_ID={context_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def rt_vrf_equals(oracle, cursor, rt_vrf=0):
"""SELECT * FROM SDP.VPN_VRFS WHERE RT_VRF=1"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT_VRF={rt_vrf}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def vrf_name_equals(oracle, cursor, vrf_name='10000_kgb'):
"""SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME='10000_kgb'"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME='{vrf_name}'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def vrf_name_like(oracle, cursor, vrf_name='10000_kgb'):
"""SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%10000_kgb%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%{vrf_name}%'")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def rd_equals(oracle, cursor, rd=10000):
"""SELECT * FROM SDP.VPN_VRFS WHERE RD=10000"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RD={rd}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def rt_equals(oracle, cursor, rt='10000'):
"""SELECT * FROM SDP.VPN_VRFS WHERE RT=10000"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT={rt}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def rt_like(oracle, cursor, rt='10000'):
"""SELECT * FROM SDP.VPN_VRFS WHERE RT LIKE '%10000%'"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT LIKE '%{rt}%'")
result = oracle.make_dictionary_results(cursor)
return result
class Insert:
@staticmethod
def insert_by_query(connection, cursor, query=f"INSERT INTO SDP.VPN_CUSTOMERS "
f"(CID, NAME, IS_TEST) VALUES (0, 'Name_test', 1)"):
"""Input manually query
SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"""
cursor.execute(query)
connection.commit()
return cursor
class Customers:
@staticmethod
def all(oracle, connection, cursor, cid, unp, name, address, url='', contact_name='', contact_info='',
is_test=0):
""""INSERT INTO SDP.VPN_CUSTOMERS
(CID, UNP, NAME, ADDRESS, URL, CONTACT_NAME, CONTACT_INFO, ACTIVATION_DATE, IS_TEST)
VALUES
(0, 0, 'Name_test', '', '', '', '', to_date('2020-05-20 14:02:46', 'yyyy-mm-dd hh24:mi:ss'), 1);"""
activation_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"INSERT INTO SDP.VPN_CUSTOMERS "
f"(CID, UNP, NAME, ADDRESS, URL, CONTACT_NAME, CONTACT_INFO, ACTIVATION_DATE, IS_TEST) "
f"VALUES "
f"({cid}, {unp}, '{name}', '{address}', '{url}', '{contact_name}', '{contact_info}', "
f"to_date('{activation_date}', 'yyyy-mm-dd hh24:mi:ss'), {is_test})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def minimal(oracle, connection, cursor, cid, unp, name, is_test=0):
"""INSERT INTO SDP.VPN_CUSTOMERS
(CID, UNP, NAME, ADDRESS, URL, CONTACT_NAME, CONTACT_INFO, ACTIVATION_DATE, IS_TEST)
VALUES
(0, 0, 'Name_test', '', '', '', '', to_date('2020-05-20 14:02:46', 'yyyy-mm-dd hh24:mi:ss'), 1);"""
activation_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"INSERT INTO SDP.VPN_CUSTOMERS "
f"(CID, UNP, NAME, ADDRESS, URL, CONTACT_NAME, CONTACT_INFO, ACTIVATION_DATE, IS_TEST) "
f"VALUES "
f"({cid}, {unp}, '{name}', '', '', '', '', "
f"to_date('{activation_date}', 'yyyy-mm-dd hh24:mi:ss'), {is_test})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
result = oracle.make_dictionary_results(cursor)
return result
class Profiles:
@staticmethod
def all(oracle, connection, cursor, customer_id, profile_id, profile_name, config_id, profile_type='static'):
""""""
cursor.execute(f"INSERT INTO SDP.VPN_PROFILES "
f"(CUSTOMER_ID, PROFILE_ID, PROFILE_NAME, PROFILE_TYPE, CONFIG_ID) "
f"VALUES "
f"({customer_id}, {profile_id}, '{profile_name}', '{profile_type}', {config_id})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
@staticmethod
def minimal(oracle, connection, cursor, profile_id, profile_name, config_id, profile_type='static'):
""""""
cursor.execute(f"INSERT INTO SDP.VPN_PROFILES "
f"(CUSTOMER_ID, PROFILE_ID, PROFILE_NAME, PROFILE_TYPE, CONFIG_ID) "
f"VALUES "
f"(0, {profile_id}, '{profile_name}', '{profile_type}', {config_id})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
class Dictionary:
@staticmethod
def all(oracle, connection, cursor, customer_id, profile_id, attribute_name, attribute_value):
"""INSERT INTO SDP.VPN_ATTRIBUTES (VALUE, CONFIG_ID, ATT_ID, ATT_NAME)
VALUES ('', 1, 3, 'SN-VPN')
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:param profile_id: 1
:param attribute_name: 'SN-VPN'
:param profile_type: 'static'
:type customer_id: int
:type config_id: int
:type att_name: str
:type value: str
Returns:
:return: [{Select before delete}] or {"error": "Error text"}
:rtype: list"""
cursor.execute(f"INSERT INTO SDP.VPN_DICT "
f"(CUSTOMER_ID, PROFILE_ID, ATTRIBUTE_NAME, ATTRIBUTE_VALUE) "
f"VALUES "
f"({customer_id}, {profile_id}, '{attribute_name}', '{attribute_value}')")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE PROFILE_ID={profile_id}")
result = oracle.make_dictionary_results(cursor)
return result
class Attributes:
@staticmethod
def all(oracle, connection, cursor, value, config_id, att_name):
"""INSERT INTO SDP.VPN_ATTRIBUTES (VALUE, CONFIG_ID, ATT_ID, ATT_NAME)
VALUES ('', 1, 3, 'SN-VPN')
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param value: ''
:param config_id: 1
:param att_name: 'SN-VPN'
:type value: str
:type config_id: int
:type att_name: str
Returns:
:return: [{Select before delete}] or {"error": "Error text"}
:rtype: list"""
att_id = cursor.execute(f"SELECT ID FROM SDP.VPN_ATTRIBUTE_DIC WHERE NAME='{att_name}'").fetchall()
try:
cursor.execute(f"INSERT INTO SDP.VPN_ATTRIBUTES (VALUE, CONFIG_ID, ATT_ID, ATT_NAME) "
f"VALUES ('{value}', {config_id}, {att_id[0][0]}, '{att_name}')")
except IndexError as e:
return {"error": f"Error: {e}: with att_id={att_id}"}
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES "
f"WHERE VALUE='{value}' AND CONFIG_ID={config_id} AND ATT_ID={att_id[0][0]}")
result = oracle.make_dictionary_results(cursor)
return result
class Users:
@staticmethod
def all(oracle, connection, cursor, msisdn, customer_id, config_id, profile_id, password=''):
"""INSERT INTO SDP.VPN_USERS (USER_NAME, PASSWORD, CUSTOMER_ID, CONFIG_ID, PROFILE_ID)
VALUES ('375291234567', '', 1, 1, 1)
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param msisdn: '375291234567'
:param customer_id: 1
:param config_id: 1
:param profile_id: 1
:param password: '12345qwerty'
:type msisdn: str
:type customer_id: int
:type config_id: int
:type profile_id: int
:type password: str
Returns:
:return: [{Select before delete}]
:rtype: list"""
if type(msisdn) is not str:
msisdn = str(msisdn)
cursor.execute(f"INSERT INTO SDP.VPN_USERS (USER_NAME, PASSWORD, CUSTOMER_ID, CONFIG_ID, PROFILE_ID) "
f"VALUES ('{msisdn}', '{password}', {customer_id}, {config_id}, {profile_id})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{msisdn}' AND CONFIG_ID={config_id}")
result = oracle.make_dictionary_results(cursor)
return result
class Contexts:
@staticmethod
def all(oracle, connection, cursor, context_id, context, vrf_count, is_full):
"""INSERT INTO SDP.VPN_CONTEXTS (CONTEXT_ID, CONTEXT, VRF_COUNT, IS_FULL)
VALUES (1, 'Gi-3', 245, 0)
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 1
:param context: 10000
:param vrf_count: '10000_test'
:param is_full: 10000
:type context_id: int
:type context: str
:type vrf_count: int
:type is_full: int
Returns:
:return: [{Select before delete}]
:rtype: list"""
cursor.execute(f"INSERT INTO SDP.VPN_CONTEXTS (CONTEXT_ID, CONTEXT, VRF_COUNT, IS_FULL) "
f"VALUES ({context_id}, '{context}', {vrf_count}, {is_full})")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
result = oracle.make_dictionary_results(cursor)
return result
class VRFs:
@staticmethod
def all(oracle, connection, cursor, context_id, rt_vrf, vrf_name, rd, rt):
"""INSERT INTO SDP.VPN_VRFS (CONTEXT_ID, RT_VRF, VRF_NAME, RD, RT)
VALUES (1, 1, '10000_test', 10000, '10000, 10001')
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 1
:param rt_vrf: 10000
:param vrf_name: '10000_test'
:param rd: 10000
:param rt: '10000, 10001'
:type context_id: int
:type rt_vrf: int
:type vrf_name: str
:type rd: int
:type rt: str
Returns:
:return: [{Select before delete}]
:rtype: list"""
cursor.execute(f"INSERT INTO SDP.VPN_VRFS (CONTEXT_ID, RT_VRF, VRF_NAME, RD, RT) "
f"VALUES ({context_id}, {rt_vrf}, '{vrf_name}', {rd}, '{rt}')")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RD={rd}")
result = oracle.make_dictionary_results(cursor)
return result
class Update:
@staticmethod
def update_by_query(cursor, query=f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"):
"""Input manually query
SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"""
cursor.execute(query)
return query
class Customers:
@staticmethod
def all(oracle, connection, cursor, unp, name, address, url, contact_name, contact_info, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET UNP=123456789,NAME='Test',ADDRESS='Washington',URL='www',
CONTACT_NAME='John',CONTACT_INFO='375291788765',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss'),
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param unp: 123456789
:param name: 'Test'
:param address: 'Washington'
:param url: 'www'
:param contact_name: 'John'
:param contact_info: '375291788765'
:param customer_id: 1
:type unp: int
:type name: str
:type address: str
:type url: str
:type contact_name: str
:type contact_info: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET UNP={unp},"
f"NAME='{name}',"
f"ADDRESS='{address}',"
f"URL='{url}',"
f"CONTACT_NAME='{contact_name}',"
f"CONTACT_INFO='{contact_info}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss'),"
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def unp_cid_equals(oracle, connection, cursor, unp, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET UNP=123456789,
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param unp: 123456789
:param customer_id: 1
:type unp: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET UNP={unp},"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def name_cid_equals(oracle, connection, cursor, name, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET NAME='Test',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param name: 'Test'
:param customer_id: 1
:type name: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET NAME='{name}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def address_cid_equals(oracle, connection, cursor, address, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET ADDRESS='Washington',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param address: 'Washington'
:param customer_id: 1
:type address: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET ADDRESS='{address}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def url_cid_equals(oracle, connection, cursor, url, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET URL='{url}',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param url: 'www'
:param customer_id: 1
:type url: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET URL='{url}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def contact_name_cid_equals(oracle, connection, cursor, contact_name, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET CONTACT_NAME='John W.V.',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss')
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param contact_name: 'John W.V.'
:param customer_id: 1
:type contact_name: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET CONTACT_NAME='{contact_name}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def contact_info_cid_equals(oracle, connection, cursor, contact_info, customer_id):
"""UPDATE SDP.VPN_CUSTOMERS SET CONTACT_INFO='Full name company',
EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param contact_info: 'Full name company'
:param customer_id: 1
:type contact_info: str
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
old = oracle.make_dictionary_results(cursor)
expiration_date = str(datetime.now().isoformat(' ', 'seconds'))
cursor.execute(f"UPDATE SDP.VPN_CUSTOMERS "
f"SET CONTACT_INFO='{contact_info}',"
f"EXPIRATION_DATE=to_date('{expiration_date}', 'yyyy-mm-dd hh24:mi:ss') "
f"WHERE CID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Profiles:
@staticmethod
def all(oracle, connection, cursor, profile_id, customer_id, profile_name, profile_type, config_id):
"""UPDATE SDP.VPN_PROFILES SET CUSTOMER_ID=1,PROFILE_NAME='Test',PROFILE_TYPE='static',CONFIG_ID=1
WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param customer_id: 1
:param profile_name: 'Test'
:param profile_type: 'static'
:param config_id: 1
:type profile_id: int
:type customer_id: int
:type profile_name: str
:type profile_type: str
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES "
f"SET CUSTOMER_ID={customer_id},"
f"PROFILE_NAME='{profile_name}',"
f"PROFILE_TYPE='{profile_type}',"
f"CONFIG_ID={config_id} "
f"WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def customer_id_pid_equals(oracle, connection, cursor, profile_id, customer_id):
"""UPDATE SDP.VPN_PROFILES SET CUSTOMER_ID=1 WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param customer_id: 1
:type profile_id: int
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES SET CUSTOMER_ID={customer_id} WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_name_pid_equals(oracle, connection, cursor, profile_id, profile_name):
"""UPDATE SDP.VPN_PROFILES SET PROFILE_NAME='Test' WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param profile_name: 'Test'
:type profile_id: int
:type profile_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES SET PROFILE_NAME='{profile_name}' WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_type_pid_equals(oracle, connection, cursor, profile_id, profile_type):
"""UPDATE SDP.VPN_PROFILES SET PROFILE_TYPE='static' WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param profile_type: 'static'
:type profile_id: int
:type profile_type: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES SET PROFILE_TYPE='{profile_type}' WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def config_id_pid_equals(oracle, connection, cursor, profile_id, config_id):
"""UPDATE SDP.VPN_PROFILES SET CONFIG_ID=1 WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param config_id: 1
:type profile_id: int
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_PROFILES SET CONFIG_ID={config_id} WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Dictionary:
@staticmethod
def all(oracle, connection, cursor, profile_id, customer_id, attribute_name, attribute_value):
"""UPDATE SDP.VPN_DICT SET CUSTOMER_ID=1, ATTRIBUTE_NAME='',ATTRIBUTE_VALUE=''
WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param customer_id: 1
:param attribute_name: ''
:param attribute_value: ''
:type profile_id: int
:type customer_id: int
:type attribute_name: str
:type attribute_value: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_DICT "
f"SET CUSTOMER_ID={customer_id},"
f"ATTRIBUTE_VALUE='{attribute_value}' "
f"WHERE PROFILE_ID={profile_id} AND ATTRIBUTE_NAME='{attribute_name}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def customer_id_pid_equals(oracle, connection, cursor, profile_id, customer_id):
"""UPDATE SDP.VPN_DICT SET CUSTOMER_ID=1 WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param customer_id: 1
:type profile_id: int
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_DICT SET CUSTOMER_ID={customer_id} WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def name_and_value_pid_equals(oracle, connection, cursor, profile_id, attribute_name, attribute_value):
"""UPDATE SDP.VPN_DICT SET ATTRIBUTE_VALUE=''
WHERE PROFILE_ID=1 and ATTRIBUTE_NAME=''
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param attribute_name: ''
:param attribute_value: ''
:type profile_id: int
:type attribute_name: str
:type attribute_value: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_DICT "
f"SET ATTRIBUTE_VALUE='{attribute_value}' "
f"WHERE PROFILE_ID={profile_id} and ATTRIBUTE_NAME='{attribute_name}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Attributes:
@staticmethod
def all(oracle, connection, cursor, att_id, value, profile_id, config_id):
"""UPDATE SDP.VPN_ATTRIBUTES SET VALUE='172.24.202.111' WHERE CONFIG_ID=1 AND ATT_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param att_id: 1
:param value: '172.24.202.111'
:param profile_id: 1
:param config_id: 1
:type att_id: str
:type value: int
:type profile_id: int
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_ATTRIBUTES SET VALUE='{value}' "
f"WHERE CONFIG_ID={config_id} AND ATT_ID={att_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Users:
@staticmethod
def all(oracle, connection, cursor, password, customer_id, config_id, profile_id, username):
"""UPDATE SDP.VPN_USERS SET PASSWORD='12345qwerty',CUSTOMER_ID=1,CONFIG_ID=1,PROFILE_ID=1
WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param password: '12345qwerty'
:param customer_id: 1
:param config_id: 1
:param profile_id: 1
:param username: '375291234567'
:type password: str
:type customer_id: int
:type config_id: int
:type profile_id: int
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS "
f"SET PASSWORD='{password}',"
f"CUSTOMER_ID={customer_id},"
f"CONFIG_ID={config_id},"
f"PROFILE_ID={profile_id} "
f"WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def username(oracle, connection, cursor, old_username, new_username):
"""UPDATE SDP.VPN_USERS SET USER_NAME='375291111111' WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param old_username: '375291234567'
:param new_username: '375291111111'
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{old_username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET USER_NAME='{new_username}' WHERE USER_NAME='{old_username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{new_username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def password_username_equals(oracle, connection, cursor, password, username):
"""UPDATE SDP.VPN_USERS SET PASSWORD='12345qwerty' WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param password: '12345qwerty'
:param username: '375291234567'
:type password: str
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET PASSWORD='{password}' WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def customer_id_username_equals(oracle, connection, cursor, customer_id, username):
"""UPDATE SDP.VPN_USERS SET CUSTOMER_ID=1 WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:param username: '375291234567'
:type customer_id: int
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET CUSTOMER_ID={customer_id} WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def config_id_username_equals(oracle, connection, cursor, config_id, username):
"""UPDATE SDP.VPN_USERS SET CONFIG_ID=1 WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param config_id: 1
:param username: '375291234567'
:type config_id: int
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET CONFIG_ID={config_id} WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_id_username_equals(oracle, connection, cursor, profile_id, username):
"""UPDATE SDP.VPN_USERS SET PROFILE_ID=1 WHERE USER_NAME='375291234567'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:param username: '375291234567'
:type profile_id: int
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_USERS SET PROFILE_ID={profile_id} WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Contexts:
@staticmethod
def all(oracle, connection, cursor, context, vrf_count, is_full, context_id):
"""UPDATE SDP.VPN_CONTEXTS
SET CONTEXT='Gi-3',VRF_COUNT=254,IS_FULL=0
WHERE CONTEXT_ID=3
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context: 'Gi-3'
:param vrf_count: 250
:param is_full: 1
:param context_id: 1
:type context: str
:type vrf_count: int
:type is_full: int
:type context_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET CONTEXT='{context}',VRF_COUNT={vrf_count},IS_FULL={is_full} "
f"WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_count_context_id_equals(oracle, connection, cursor, vrf_count, context_id):
"""UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT=250 WHERE CONTEXT_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 1
:param vrf_count: 250
:type context_id: str
:type vrf_count: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT={vrf_count} WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_count_context_equals(oracle, connection, cursor, vrf_count, context):
"""UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT=250 WHERE CONTEXT='Gi-3'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context: 'Gi-3'
:param vrf_count: 250
:type context: str
:type vrf_count: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT={vrf_count} WHERE CONTEXT='{context}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_count_context_like(oracle, connection, cursor, vrf_count, context):
"""UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT=250 WHERE CONTEXT LIKE '%Gi-3%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context: 'Gi-3'
:param vrf_count: 250
:type context: str
:type vrf_count: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET VRF_COUNT={vrf_count} WHERE CONTEXT LIKE '%{context}%'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def is_full_context_id_equals(oracle, connection, cursor, is_full, context_id):
"""UPDATE SDP.VPN_CONTEXTS SET IS_FULL=1 WHERE CONTEXT_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param is_full: 1
:param context_id: 1
:type is_full: int
:type context_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET IS_FULL={is_full} WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def is_full_context_equals(oracle, connection, cursor, is_full, context):
"""UPDATE SDP.VPN_CONTEXTS SET IS_FULL=1 WHERE CONTEXT='Gi-3'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param is_full: 1
:param context: 'Gi-3'
:type is_full: int
:type context: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET IS_FULL={is_full} WHERE CONTEXT='{context}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def is_full_context_like(oracle, connection, cursor, is_full, context):
"""UPDATE SDP.VPN_CONTEXTS SET IS_FULL=1 WHERE CONTEXT LIKE '%Gi-3%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param is_full: 1
:param context: 'Gi-3'
:type is_full: int
:type context: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"UPDATE SDP.VPN_CONTEXTS SET IS_FULL={is_full} WHERE CONTEXT LIKE '%{context}%'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class VRFs:
pass
class Delete:
@staticmethod
def delete_by_query(connection, cursor, query=''):
"""Input manually query
SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID=1"""
cursor.execute(query)
connection.commit()
return query
class Customers:
@staticmethod
def cid_equals(oracle, connection, cursor, cid):
"""DELETE FROM SDP.VPN_CUSTOMERS WHERE CID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param cid: 1
:type cid: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={cid}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def name_equals(oracle, connection, cursor, name):
"""DELETE FROM SDP.VPN_CUSTOMERS WHERE NAME='Test'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param name: 'Test'
:type name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME='{name}'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_CUSTOMERS WHERE CID={old[0]['CID']}")
connection.commit()
except IndexError as e:
print(f"Error: {e} : Row with NAME={name} is not exist")
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE CID={old[0]['CID']}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def name_like(oracle, connection, cursor, name):
"""DELETE FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE '%Test%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param name: 'Test'
:type name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE '%{name}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_CUSTOMERS WHERE CID={old[0]['CID']}")
connection.commit()
except IndexError as e:
print(f"Error: {e} : Row with NAME LIKE {name} is not exist")
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE NAME LIKE '%{name}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def unp_equals(oracle, connection, cursor, unp):
"""DELETE FROM SDP.VPN_CUSTOMERS WHERE UNP=123456789
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param unp: 123456789
:type unp: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE UNP={unp}")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_CUSTOMERS WHERE CID={old[0]['CID']}")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with UNP={unp} is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_CUSTOMERS WHERE UNP={unp}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Profiles:
@staticmethod
def cid_equals(oracle, connection, cursor, customer_id):
"""DELETE FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CUSTOMER_ID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def pid_equals(oracle, connection, cursor, profile_id):
"""DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:type profile_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_name_equals(oracle, connection, cursor, profile_name):
"""DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_NAME='Test'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_name: 'Test'
:type profile_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME='{profile_name}'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_ID={old[0]['PROFILE_ID']}")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with PROFILE_NAME LIKE {profile_name} is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={old[0]['PROFILE_ID']}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_name_like(oracle, connection, cursor, profile_name):
"""DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_NAME LIKE '%Test%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_name: 'Test'
:type profile_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_NAME LIKE '%{profile_name}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE PROFILE_ID={old[0]['PROFILE_ID']}")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with PROFILE_NAME LIKE '{profile_name}' is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE PROFILE_ID={old[0]['PROFILE_ID']}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def config_id_equals(oracle, connection, cursor, config_id):
"""DELETE FROM SDP.VPN_PROFILES WHERE CONFIG_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param config_id: 1
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CONFIG_ID={config_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_PROFILES WHERE CONFIG_ID={config_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_PROFILES WHERE CONFIG_ID={config_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Dictionary:
@staticmethod
def cid_equals(oracle, connection, cursor, customer_id):
"""DELETE FROM SDP.VPN_DICT WHERE CUSTOMER_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE CUSTOMER_ID={customer_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_DICT WHERE CUSTOMER_ID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE CUSTOMER_ID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def pid_equals(oracle, connection, cursor, profile_id):
"""DELETE FROM SDP.VPN_DICT WHERE PROFILE_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:type profile_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_DICT WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_DICT WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Attributes:
@staticmethod
def config_id_equals(oracle, connection, cursor, config_id):
"""DELETE FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param config_id: 1
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID={config_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID={config_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_ATTRIBUTES WHERE CONFIG_ID={config_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Users:
@staticmethod
def username_equals(oracle, connection, cursor, username):
"""DELETE FROM SDP.VPN_USERS WHERE USER_NAME='375291797391'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param username: 1
:type username: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE USER_NAME='{username}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def customer_id_equals(oracle, connection, cursor, customer_id):
"""DELETE FROM SDP.VPN_USERS WHERE CUSTOMER_ID=1'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param customer_id: 1
:type customer_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CUSTOMER_ID={customer_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE CUSTOMER_ID={customer_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CUSTOMER_ID={customer_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def profile_id_equals(oracle, connection, cursor, profile_id):
"""DELETE FROM SDP.VPN_USERS WHERE PROFILE_ID=1'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param profile_id: 1
:type profile_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PROFILE_ID={profile_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def config_id_equals(oracle, connection, cursor, config_id):
"""DELETE FROM SDP.VPN_USERS WHERE CONFIG_ID=1'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param config_id: 1
:type config_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CONFIG_ID={config_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE CONFIG_ID={config_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE CONFIG_ID={config_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def password_equals(oracle, connection, cursor, password):
"""DELETE FROM SDP.VPN_USERS WHERE PASSWORD='qwerty12345'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param password: 'qwerty12345'
:type password: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD='{password}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE PASSWORD='{password}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD='{password}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def password_like(oracle, connection, cursor, password):
"""DELETE FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%qwerty12345%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param password: 'qwerty12345'
:type password: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%{password}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_USERS WHERE PASSWORD='{old[0]['PASSWORD']}'")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with PASSWORD LIKE '{password}' is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_USERS WHERE PASSWORD LIKE '%{password}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class Contexts:
@staticmethod
def context_id_equals(oracle, connection, cursor, context_id):
"""DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID=1
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 1
:type context_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def context_equals(oracle, connection, cursor, context):
"""DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT='Gi-3'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context: 'Gi-3'
:type context: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{context}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def context_like(oracle, connection, cursor, context):
"""DELETE FROM SDP.VPN_VRFS WHERE CONTEXT='Gi-3'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context:'Gi-3'
:type context: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT LIKE '%{context}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{old[0]['CONTEXT']}'")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with CONTEXT LIKE '{context}' is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_CONTEXTS WHERE CONTEXT='{old[0]['CONTEXT']}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
class VRFs:
@staticmethod
def context_id_equals(oracle, connection, cursor, context_id):
"""DELETE FROM SDP.VPN_VRFS WHERE CONTEXT_ID=3
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param context_id: 3
:type context_id: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE CONTEXT_ID={context_id}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE CONTEXT_ID={context_id}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE CONTEXT_ID={context_id}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def rt_vrf_equals(oracle, connection, cursor, rt_vrf):
"""DELETE FROM SDP.VPN_VRFS WHERE RT_VRF=10000'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param rt_vrf: 10000
:type rt_vrf: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT_VRF={rt_vrf}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE RT_VRF={rt_vrf}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT_VRF={rt_vrf}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_name_equals(oracle, connection, cursor, vrf_name):
"""DELETE FROM SDP.VPN_VRFS WHERE VRF_NAME='10000_test'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param vrf_name: '10000_test'
:type vrf_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME='{vrf_name}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE VRF_NAME='{vrf_name}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME='{vrf_name}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def vrf_name_like(oracle, connection, cursor, vrf_name):
"""DELETE FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%10000_test%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param vrf_name: '%10000_test%'
:type vrf_name: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%{vrf_name}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE VRF_NAME='{old[0]['VRF_NAME']}'")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with VRF_NAME LIKE {vrf_name} is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE VRF_NAME LIKE '%{vrf_name}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def rd_equals(oracle, connection, cursor, rd):
"""DELETE FROM SDP.VPN_VRFS WHERE RD=10000
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param rd: 10000
:type rd: int
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RD={rd}")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE RD={rd}")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RD={rd}")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def rt_equals(oracle, connection, cursor, rt):
"""DELETE FROM SDP.VPN_VRFS WHERE RT='10000, 10001'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param rt: example - '10000, 10001'
:type rt: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT='{rt}'")
old = oracle.make_dictionary_results(cursor)
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE RT='{rt}'")
connection.commit()
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT='{rt}'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
@staticmethod
def rt_like(oracle, connection, cursor, rt):
"""DELETE FROM SDP.VPN_VRFS WHERE RT='%10000%'
Parameters:
:param oracle: object oracle
:param connection: object connection
:param cursor: object cursor
:param rt: example - '10000'
:type rt: str
Returns:
:return: {"old": [Select before delete], "new": [Select after delete]} or {"error": "Error text"}
:rtype: dict"""
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT LIKE '%{rt}%'")
old = oracle.make_dictionary_results(cursor)
try:
cursor.execute(f"DELETE FROM SDP.VPN_VRFS WHERE RT='{old[0]['RT']}'")
connection.commit()
except IndexError as e:
return {"error": f"Error: {e} : Row with RT LIKE {rt} is not exist"}
cursor.execute(f"SELECT * FROM SDP.VPN_VRFS WHERE RT LIKE '%{rt}%'")
new = oracle.make_dictionary_results(cursor)
return {"old": old, "new": new}
# ora = OracleDB()
# con, cur = ora.connect()
# print(Select().VRFs().all(ora, cur))
# print(Insert().VRFs().all(ora, con, cur, context_id=1, rt_vrf=10000, vrf_name='10000_test', rd=10000, rt='10000, 10001'))
# print(Delete().VRFs().rt_vrf_equals(ora, con, cur, '10000'))
# ora.close(con, cur)
|
import decimal
import uuid
import base64
import requests
import json
from datetime import datetime
from flask import request, current_app, Response
from flask_restplus import Resource, reqparse
from werkzeug.datastructures import FileStorage
from werkzeug import exceptions
from sqlalchemy.exc import DBAPIError
from ..models.mine_expected_document import MineExpectedDocument
from ....mines.mine.models.mine import Mine
from ...expected.models.mine_expected_document import MineExpectedDocument
from ...expected.models.mine_expected_document_xref import MineExpectedDocumentXref
from ...mines.models.mine_document import MineDocument
from app.extensions import api, db
from ....utils.access_decorators import requires_any_of, MINE_CREATE, MINESPACE_PROPONENT
from ....utils.resources_mixins import UserMixin, ErrorMixin
class ExpectedDocumentUploadResource(Resource, UserMixin, ErrorMixin):
parser = reqparse.RequestParser()
parser.add_argument('mine_document_guid', type=str)
parser.add_argument('document_manager_guid', type=str)
parser.add_argument('filename', type=str)
@api.doc(
params={
'expected_document_guid':
'Required: The guid of the expected document that this upload will be satisfying.'
})
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def post(self, expected_document_guid):
if not expected_document_guid:
return self.create_error_payload(400, 'Expected Document GUID is required'), 400
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
if not expected_document:
return self.create_error_payload(404, 'Expected Document not found'), 404
metadata = self._parse_request_metadata()
if not metadata or not metadata.get('filename'):
return self.create_error_payload(400,
'Filename not found in request metadata header'), 400
folder, pretty_folder = self._parse_upload_folders(expected_document)
data = {
'folder': folder,
'pretty_folder': pretty_folder,
'filename': metadata.get('filename')
}
document_manager_URL = f'{current_app.config['DOCUMENT_MANAGER_URL']}/document-manager'
resp = requests.post(
url=document_manager_URL,
headers={key: value
for (key, value) in request.headers if key != 'Host'},
data=data,
cookies=request.cookies,
)
response = Response(resp.content, resp.status_code, resp.raw.headers.items())
return response
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def put(self, expected_document_guid):
if not expected_document_guid:
return self.create_error_payload(400, 'Expected Document GUID is required'), 400
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
if not expected_document:
return self.create_error_payload(404, 'Expected Document not found'), 404
data = self.parser.parse_args()
if data.get('mine_document_guid'):
# Associating existing mine document
mine_doc = MineDocument.find_by_mine_document_guid(data.get('mine_document_guid'))
if not mine_doc:
return self.create_error_payload(404, 'Mine Document not found'), 404
expected_document.mine_documents.append(mine_doc)
db.session.commit()
elif data.get('document_manager_guid'):
# Register and associate a new file upload
filename = data.get('filename')
if not filename:
return self.create_error_payload(400,
'Must supply filename for new file upload'), 400
mine_doc = MineDocument(
mine_guid=expected_document.mine_guid,
document_manager_guid=data.get('document_manager_guid'),
document_name=filename,
**self.get_create_update_dict())
expected_document.mine_documents.append(mine_doc)
db.session.commit()
else:
return self.create_error_payload(
400, 'Must specify either Mine Document GIUD or Docuemnt Manager GUID'), 400
return expected_document.json()
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def delete(self, expected_document_guid=None, mine_document_guid=None):
if expected_document_guid is None or mine_document_guid is None:
return self.create_error_payload(
400, 'Must provide a expected document guid and a mine document guid'), 400
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
mine_document = MineDocument.find_by_mine_document_guid(mine_document_guid)
if expected_document is None or mine_document is None:
return self.create_error_payload(
404, 'Either the Expected Document or the Mine Document was not found'), 404
expected_document.mine_documents.remove(mine_document)
expected_document.save()
return {'status': 200, 'message': 'The document was removed succesfully'}
def _parse_upload_folders(self, expected_document):
mine = Mine.find_by_mine_guid(str(expected_document.mine_guid))
document_category = expected_document.required_document.req_document_category
if not document_category:
document_category = 'documents'
folder = f'mines/{str(mine.mine_guid)}/{document_category}'
pretty_folder = f'mines/{mine.mine_no}/{document_category}'
return folder, pretty_folder
def _parse_request_metadata(self):
request_metadata = request.headers.get("Upload-Metadata")
metadata = {}
if not request_metadata:
return metadata
for key_value in request_metadata.split(","):
(key, value) = key_value.split(" ")
metadata[key] = base64.b64decode(value).decode("utf-8")
return metadata
| import decimal
import uuid
import base64
import requests
import json
from datetime import datetime
from flask import request, current_app, Response
from flask_restplus import Resource, reqparse
from werkzeug.datastructures import FileStorage
from werkzeug import exceptions
from sqlalchemy.exc import DBAPIError
from ..models.mine_expected_document import MineExpectedDocument
from ....mines.mine.models.mine import Mine
from ...expected.models.mine_expected_document import MineExpectedDocument
from ...expected.models.mine_expected_document_xref import MineExpectedDocumentXref
from ...mines.models.mine_document import MineDocument
from app.extensions import api, db
from ....utils.access_decorators import requires_any_of, MINE_CREATE, MINESPACE_PROPONENT
from ....utils.resources_mixins import UserMixin, ErrorMixin
class ExpectedDocumentUploadResource(Resource, UserMixin, ErrorMixin):
parser = reqparse.RequestParser()
parser.add_argument('mine_document_guid', type=str)
parser.add_argument('document_manager_guid', type=str)
parser.add_argument('filename', type=str)
@api.doc(
params={
'expected_document_guid':
'Required: The guid of the expected document that this upload will be satisfying.'
})
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def post(self, expected_document_guid):
if not expected_document_guid:
return self.create_error_payload(400, 'Expected Document GUID is required'), 400
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
if not expected_document:
return self.create_error_payload(404, 'Expected Document not found'), 404
metadata = self._parse_request_metadata()
if not metadata or not metadata.get('filename'):
return self.create_error_payload(400,
'Filename not found in request metadata header'), 400
folder, pretty_folder = self._parse_upload_folders(expected_document)
data = {
'folder': folder,
'pretty_folder': pretty_folder,
'filename': metadata.get('filename')
}
document_manager_URL = f'{current_app.config["DOCUMENT_MANAGER_URL"]}/document-manager'
resp = requests.post(
url=document_manager_URL,
headers={key: value
for (key, value) in request.headers if key != 'Host'},
data=data,
cookies=request.cookies,
)
response = Response(resp.content, resp.status_code, resp.raw.headers.items())
return response
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def put(self, expected_document_guid):
if not expected_document_guid:
return self.create_error_payload(400, 'Expected Document GUID is required'), 400
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
if not expected_document:
return self.create_error_payload(404, 'Expected Document not found'), 404
data = self.parser.parse_args()
if data.get('mine_document_guid'):
# Associating existing mine document
mine_doc = MineDocument.find_by_mine_document_guid(data.get('mine_document_guid'))
if not mine_doc:
return self.create_error_payload(404, 'Mine Document not found'), 404
expected_document.mine_documents.append(mine_doc)
db.session.commit()
elif data.get('document_manager_guid'):
# Register and associate a new file upload
filename = data.get('filename')
if not filename:
return self.create_error_payload(400,
'Must supply filename for new file upload'), 400
mine_doc = MineDocument(
mine_guid=expected_document.mine_guid,
document_manager_guid=data.get('document_manager_guid'),
document_name=filename,
**self.get_create_update_dict())
expected_document.mine_documents.append(mine_doc)
db.session.commit()
else:
return self.create_error_payload(
400, 'Must specify either Mine Document GIUD or Docuemnt Manager GUID'), 400
return expected_document.json()
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def delete(self, expected_document_guid=None, mine_document_guid=None):
if expected_document_guid is None or mine_document_guid is None:
return self.create_error_payload(
400, 'Must provide a expected document guid and a mine document guid'), 400
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
mine_document = MineDocument.find_by_mine_document_guid(mine_document_guid)
if expected_document is None or mine_document is None:
return self.create_error_payload(
404, 'Either the Expected Document or the Mine Document was not found'), 404
expected_document.mine_documents.remove(mine_document)
expected_document.save()
return {'status': 200, 'message': 'The document was removed succesfully'}
def _parse_upload_folders(self, expected_document):
mine = Mine.find_by_mine_guid(str(expected_document.mine_guid))
document_category = expected_document.required_document.req_document_category
if not document_category:
document_category = 'documents'
folder = f'mines/{str(mine.mine_guid)}/{document_category}'
pretty_folder = f'mines/{mine.mine_no}/{document_category}'
return folder, pretty_folder
def _parse_request_metadata(self):
request_metadata = request.headers.get("Upload-Metadata")
metadata = {}
if not request_metadata:
return metadata
for key_value in request_metadata.split(","):
(key, value) = key_value.split(" ")
metadata[key] = base64.b64decode(value).decode("utf-8")
return metadata
|
import tkinter as tk
from tkinter import messagebox
from threading import Thread
from time import sleep
import constants.all as c
from components.semaphore import Semaphore, SemaphoreKind
from components.record import Record, RecordKind
from components.openLogButton import OpenLogButton
from modules.finder import read
from modules.parser import parse
from modules.webclient import WebClient
from models.type import Type
from modules.searchclient import find_similar_elements as find_similar, wait_until_visible as wait_until, escape_send
from modules.webclient import is_returned_http_error as returned_error
class App(tk.Frame):
def __init__(self, root, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
Thread(target=self.get_data).start()
self.sequences = []
self.run_count = 0
# Widgets
self.top = tk.Frame(root, bg=c.FRAME_BG_COLOR)
self.top.place(relx=0.02, rely=0.02, relwidth=0.98, relheight=0.50)
self.all_nr = Semaphore(self.top, SemaphoreKind.ALL)
self.success_nr = Semaphore(self.top, SemaphoreKind.SUCCESS)
self.err_nr = Semaphore(self.top, SemaphoreKind.ERRORS)
self.bottom = tk.Frame(root, bg=c.FRAME_BG_COLOR)
self.bottom.place(relx=0.02, rely=0.52, relwidth=0.98, relheight=0.44)
self.past = Record(self.bottom, RecordKind.PAST)
self.running = Record(self.bottom, RecordKind.RUNNING)
self.next = Record(self.bottom, RecordKind.NEXT)
def get_data(self):
# imports settings and sequences from .\library folder and parse them into objects
data = read()
self.sequences = parse(data)
WebClient()
self.all_nr.update(fix=len(self.sequences))
self.begin()
def begin(self):
for seq in self.sequences:
# if any click on previous elements in this file failed it will skip next sections
failed_clicks = filter(lambda
s: s.file_id == seq.file_id and s.section_id < seq.section_id and not s.success and s.type == c.CLICK,
self.sequences)
if len(list(failed_clicks)) > 0:
print('skip for failed click: ', seq.desc)
else:
# search all occurrences in then execute them all
print('begin: ', seq.attribute_value, seq.auto_find)
if seq.auto_find:
# sequence split to several sequnces
self.create_similar(seq)
else:
self.perform(seq)
sleep(seq.wait)
self.create_report()
print('Finished')
def perform(self, seq):
print("perform... ", seq.desc, seq.type, seq.attribute_id, seq.attribute_value)
seq.invoked = True
try:
element = wait_until(seq)
if seq.type == Type.CLICK:
element.click()
seq.success = True
# if console shows error
for log in c.DRIVER.get_log('browser'):
if log['level'] == 'SEVERE':
er = f'{seq.attribute_id}: {seq.attribute_value}, err: {log['message']}'
seq.error = er
print('Err:', seq.error)
seq.success = False
seq.failed = True
break
print('success: ', seq.success, ' - failed: ', seq.failed)
elif seq.type == Type.INPUT:
if len(seq.insert_text) > 0:
seq.success = True
element.send_keys(seq.insert_text)
else:
seq.error = f'Unknown type of in then sequence : {seq.type}'
seq.failed = True
else:
print("seq.type: ", seq.type)
# self.err_nr.update(add=1)
seq.error = f'Internal application error: Unknown sequence type: {seq.type}'
seq.failed = True
except (ValueError, Exception) as e:
err = f"Unable to locate an element with {seq.attribute_id} expression {seq.attribute_value}."
returned_err = returned_error()
if returned_err[0]:
err = f'{returned_err[1]}'
print('HTML Error: ', err )
seq.error = err
seq.failed = True
self.update_records()
# Create sub events - Automatic recognition based on similar id inside xpath
def create_similar(self, s):
# Find similar items by ID and remove the same to avoid duplicates
sim = find_similar(s)
# list of attributes values
existing_attribute_values = [x.attribute_value for x in self.sequences]
# add non-existing sequences and sort them
similar = list(filter(lambda x: x.attribute_value not in existing_attribute_values, sim))
similar.sort(key=lambda x: x.attribute_value)
# extend list after current sequence
ind = self.sequences.index(s)
self.sequences[ind + 1:1] = similar
# update score
self.all_nr.update(fix=len(self.sequences))
self.recursive_perform()
def recursive_perform(self):
# recursion only for elements which weren't invokend and mare marked with auto_find
undone = [x for x in self.sequences if not x.invoked and x.auto_find]
# avoid duplications
existing_attribute_values = [x.attribute_value for x in self.sequences]
for u in undone:
# click and find other similar
self.perform(u)
similar = find_similar(u)
similar.sort(key=lambda x: x.attribute_value)
for i, s in enumerate(similar):
if s.attribute_value not in existing_attribute_values:
ind = self.sequences.index(u)
# if not existing add it to sequences after previous
self.sequences.insert(ind+i, s)
# recheck for invoked after addaing new elements
not_invoked = list(filter(lambda x: not x.invoked and x.auto_find, self.sequences))
if len(not_invoked) > 0:
# check for modal pop up and close it if necessary:
escape_send()
# restart
self.recursive_perform()
break
else:
break
def update_records(self):
nr_of_seq = len(self.sequences)
if nr_of_seq == 0:
messagebox.showerror('Error', 'No Sequences found!')
return
invoked = list(filter(lambda x: x.invoked, self.sequences))
counter = len(invoked) + 1
success = list(filter(lambda x: x.success and not x.failed, self.sequences))
failed = list(filter(lambda x: x.failed, self.sequences))
# update counters on top
self.all_nr.update(fix=nr_of_seq)
self.success_nr.update(fix=len(success))
self.err_nr.update(fix=len(failed))
# update records on bottom
# current record
if len(invoked) < 1:
self.running.clear()
return
txt = invoked[-1].desc
if self.sequences[-1].wait > 0:
txt += f' waiting {self.sequences[-1].wait}s'
self.running.update(txt)
# next record
following = counter + 1
if following < nr_of_seq:
self.next.update(self.sequences[following].desc)
else:
self.next.clear()
# previous record
if len(success) > 0:
if self.sequences[-1].error:
self.past.update(self.sequences[-1].desc, err=True)
else:
self.past.update(self.sequences[-1].desc)
else:
self.past.clear()
def create_report(self):
for widgets in self.bottom.winfo_children():
widgets.destroy()
finished_lbl = tk.Label(self.bottom, justify='left', anchor="w",
bg=c.FRAME_BG_COLOR, font=c.END_MESSAGE_FONT)
failed = list(filter(lambda x: x.failed, self.sequences))
col = c.SCORE_COLOR
message = ':) Good job! No errors found.'
if len(failed) > 0:
col = c.ERROR_COLOR
message = ':( Errors found.'
OpenLogButton(self.bottom, failed)
finished_lbl.config(text=message, fg=col)
finished_lbl.place(relx=0.02, rely=0.25, relwidth=0.85, relheight=0.25)
| import tkinter as tk
from tkinter import messagebox
from threading import Thread
from time import sleep
import constants.all as c
from components.semaphore import Semaphore, SemaphoreKind
from components.record import Record, RecordKind
from components.openLogButton import OpenLogButton
from modules.finder import read
from modules.parser import parse
from modules.webclient import WebClient
from models.type import Type
from modules.searchclient import find_similar_elements as find_similar, wait_until_visible as wait_until, escape_send
from modules.webclient import is_returned_http_error as returned_error
class App(tk.Frame):
def __init__(self, root, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
Thread(target=self.get_data).start()
self.sequences = []
self.run_count = 0
# Widgets
self.top = tk.Frame(root, bg=c.FRAME_BG_COLOR)
self.top.place(relx=0.02, rely=0.02, relwidth=0.98, relheight=0.50)
self.all_nr = Semaphore(self.top, SemaphoreKind.ALL)
self.success_nr = Semaphore(self.top, SemaphoreKind.SUCCESS)
self.err_nr = Semaphore(self.top, SemaphoreKind.ERRORS)
self.bottom = tk.Frame(root, bg=c.FRAME_BG_COLOR)
self.bottom.place(relx=0.02, rely=0.52, relwidth=0.98, relheight=0.44)
self.past = Record(self.bottom, RecordKind.PAST)
self.running = Record(self.bottom, RecordKind.RUNNING)
self.next = Record(self.bottom, RecordKind.NEXT)
def get_data(self):
# imports settings and sequences from .\library folder and parse them into objects
data = read()
self.sequences = parse(data)
WebClient()
self.all_nr.update(fix=len(self.sequences))
self.begin()
def begin(self):
for seq in self.sequences:
# if any click on previous elements in this file failed it will skip next sections
failed_clicks = filter(lambda
s: s.file_id == seq.file_id and s.section_id < seq.section_id and not s.success and s.type == c.CLICK,
self.sequences)
if len(list(failed_clicks)) > 0:
print('skip for failed click: ', seq.desc)
else:
# search all occurrences in then execute them all
print('begin: ', seq.attribute_value, seq.auto_find)
if seq.auto_find:
# sequence split to several sequnces
self.create_similar(seq)
else:
self.perform(seq)
sleep(seq.wait)
self.create_report()
print('Finished')
def perform(self, seq):
print("perform... ", seq.desc, seq.type, seq.attribute_id, seq.attribute_value)
seq.invoked = True
try:
element = wait_until(seq)
if seq.type == Type.CLICK:
element.click()
seq.success = True
# if console shows error
for log in c.DRIVER.get_log('browser'):
if log['level'] == 'SEVERE':
er = f'{seq.attribute_id}: {seq.attribute_value}, err: {log["message"]}'
seq.error = er
print('Err:', seq.error)
seq.success = False
seq.failed = True
break
print('success: ', seq.success, ' - failed: ', seq.failed)
elif seq.type == Type.INPUT:
if len(seq.insert_text) > 0:
seq.success = True
element.send_keys(seq.insert_text)
else:
seq.error = f'Unknown type of in then sequence : {seq.type}'
seq.failed = True
else:
print("seq.type: ", seq.type)
# self.err_nr.update(add=1)
seq.error = f'Internal application error: Unknown sequence type: {seq.type}'
seq.failed = True
except (ValueError, Exception) as e:
err = f"Unable to locate an element with {seq.attribute_id} expression {seq.attribute_value}."
returned_err = returned_error()
if returned_err[0]:
err = f'{returned_err[1]}'
print('HTML Error: ', err )
seq.error = err
seq.failed = True
self.update_records()
# Create sub events - Automatic recognition based on similar id inside xpath
def create_similar(self, s):
# Find similar items by ID and remove the same to avoid duplicates
sim = find_similar(s)
# list of attributes values
existing_attribute_values = [x.attribute_value for x in self.sequences]
# add non-existing sequences and sort them
similar = list(filter(lambda x: x.attribute_value not in existing_attribute_values, sim))
similar.sort(key=lambda x: x.attribute_value)
# extend list after current sequence
ind = self.sequences.index(s)
self.sequences[ind + 1:1] = similar
# update score
self.all_nr.update(fix=len(self.sequences))
self.recursive_perform()
def recursive_perform(self):
# recursion only for elements which weren't invokend and mare marked with auto_find
undone = [x for x in self.sequences if not x.invoked and x.auto_find]
# avoid duplications
existing_attribute_values = [x.attribute_value for x in self.sequences]
for u in undone:
# click and find other similar
self.perform(u)
similar = find_similar(u)
similar.sort(key=lambda x: x.attribute_value)
for i, s in enumerate(similar):
if s.attribute_value not in existing_attribute_values:
ind = self.sequences.index(u)
# if not existing add it to sequences after previous
self.sequences.insert(ind+i, s)
# recheck for invoked after addaing new elements
not_invoked = list(filter(lambda x: not x.invoked and x.auto_find, self.sequences))
if len(not_invoked) > 0:
# check for modal pop up and close it if necessary:
escape_send()
# restart
self.recursive_perform()
break
else:
break
def update_records(self):
nr_of_seq = len(self.sequences)
if nr_of_seq == 0:
messagebox.showerror('Error', 'No Sequences found!')
return
invoked = list(filter(lambda x: x.invoked, self.sequences))
counter = len(invoked) + 1
success = list(filter(lambda x: x.success and not x.failed, self.sequences))
failed = list(filter(lambda x: x.failed, self.sequences))
# update counters on top
self.all_nr.update(fix=nr_of_seq)
self.success_nr.update(fix=len(success))
self.err_nr.update(fix=len(failed))
# update records on bottom
# current record
if len(invoked) < 1:
self.running.clear()
return
txt = invoked[-1].desc
if self.sequences[-1].wait > 0:
txt += f' waiting {self.sequences[-1].wait}s'
self.running.update(txt)
# next record
following = counter + 1
if following < nr_of_seq:
self.next.update(self.sequences[following].desc)
else:
self.next.clear()
# previous record
if len(success) > 0:
if self.sequences[-1].error:
self.past.update(self.sequences[-1].desc, err=True)
else:
self.past.update(self.sequences[-1].desc)
else:
self.past.clear()
def create_report(self):
for widgets in self.bottom.winfo_children():
widgets.destroy()
finished_lbl = tk.Label(self.bottom, justify='left', anchor="w",
bg=c.FRAME_BG_COLOR, font=c.END_MESSAGE_FONT)
failed = list(filter(lambda x: x.failed, self.sequences))
col = c.SCORE_COLOR
message = ':) Good job! No errors found.'
if len(failed) > 0:
col = c.ERROR_COLOR
message = ':( Errors found.'
OpenLogButton(self.bottom, failed)
finished_lbl.config(text=message, fg=col)
finished_lbl.place(relx=0.02, rely=0.25, relwidth=0.85, relheight=0.25)
|
#!/usr/bin/env python3
import concurrent.futures
import json
import os
import subprocess
import sys
import tempfile
import yaml
import model.kubernetes
import kube.ctx
from ci.util import (
Failure,
info,
which,
)
own_dir = os.path.abspath(os.path.dirname(__file__))
repo_dir = os.path.abspath(os.path.join(own_dir, os.pardir))
sys.path.insert(0, os.path.join(repo_dir, 'test'))
import pubip_remedy_test as pubip_test # noqa
# TODO: failed_vm_test fails on newer Azure, and it's not clear how to fix it since it was based on
# some weird Azure behavior that was meanwhile patched.
# import failed_vm_test as vm_test # noqa
import cleanup as test_cleanup # noqa
HELM_CHART_NAME = 'remedy-controller-azure'
HELM_CHART_DEPLOYMENT_NAMESPACE = 'default'
VM_TEST_REQUIRED_ATTEMPTS = 4
KUBECONFIG_DIR = os.environ['TM_KUBECONFIG_PATH']
CONCOURSE_HELM_CHART_REPO = "https://concourse-charts.storage.googleapis.com/"
kube_ctx = kube.ctx.Ctx()
def main():
kubeconfig_path = os.path.join(KUBECONFIG_DIR, 'shoot.config')
os.environ['KUBECONFIG'] = kubeconfig_path
test_credentials = credentials_from_environ()
with open(kubeconfig_path, 'r') as f:
kubeconfig = yaml.safe_load(f.read())
kubernetes_config = model.kubernetes.KubernetesConfig(
'',
{'kubeconfig': kubeconfig}, # MUST be positional
)
# vm failer expects the credentials at one special location. TODO: Remove this once its adjusted
expected_dir = os.path.join(repo_dir, 'dev')
expected_file_path = os.path.join(expected_dir, 'credentials.yaml')
os.mkdir(expected_dir)
with open(expected_file_path, mode='w') as f:
yaml.safe_dump(test_credentials, f)
with tempfile.NamedTemporaryFile(mode='w', delete=False) as credentials_file:
yaml.safe_dump(test_credentials, credentials_file)
credentials_path = os.path.abspath(credentials_file.name)
if not (version := os.environ.get('TEST_VERSION')):
# fallback to local file if env-var is not set
with open(os.path.join(repo_dir, 'VERSION')) as version_file:
version = version_file.read()
chart_dir = os.path.join(repo_dir, 'charts', HELM_CHART_NAME)
values = create_helm_values(chart_dir, version, credentials_path)
print(f'Deploying Helm chart for version {version}')
execute_helm_deployment(
kubernetes_config,
HELM_CHART_DEPLOYMENT_NAMESPACE,
chart_dir,
HELM_CHART_NAME,
values,
)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
pubip_future = executor.submit(
pubip_test.run_test,
path_to_credentials_file=credentials_path,
path_to_kubeconfig=kubeconfig_path,
test_namespace=HELM_CHART_DEPLOYMENT_NAMESPACE,
)
# failed_vm_future = executor.submit(
# vm_test.run_test,
# path_to_credentials_file=credentials_path,
# path_to_kubeconfig=kubeconfig_path,
# required_attempts=VM_TEST_REQUIRED_ATTEMPTS,
# test_namespace=HELM_CHART_DEPLOYMENT_NAMESPACE,
# check_interval=10,
# run_duration=360,
# )
pubip_test_ok = False
# vm_test_ok = False
try:
pubip_test_ok = pubip_future.result()
# vm_test_ok = failed_vm_future.result()
finally:
uninstall_helm_deployment(
kubernetes_config,
HELM_CHART_DEPLOYMENT_NAMESPACE,
HELM_CHART_NAME,
)
test_cleanup.cleanup(
path_to_credentials_file=credentials_path,
path_to_kubeconfig=kubeconfig_path,
test_namespace=HELM_CHART_DEPLOYMENT_NAMESPACE,
)
if not pubip_test_ok: # or not vm_test_ok:
exit(1)
def credentials_from_environ():
return {
'aadClientId': os.environ['CLIENT_ID'],
'aadClientSecret': os.environ['CLIENT_SECRET'],
'tenantId': os.environ['TENANT_ID'],
'subscriptionId': os.environ['SUBSCRIPTION_ID'],
'resourceGroup': f'shoot--it--{os.environ['SHOOT_NAME']}',
'location': os.environ['REGION'],
}
def create_helm_values(chart_dir, version, path_to_credentials_file):
with open(os.path.join(path_to_credentials_file)) as credentials_file:
credentials = yaml.safe_load(credentials_file)
with open(os.path.join(chart_dir, 'values.yaml')) as values_file:
values = yaml.safe_load(values_file)
values['image']['tag'] = version
values['cloudProviderConfig'] = json.dumps(credentials)
# lower default values in order to speed up failed-vm-test
values['config']['azure']['failedVMRemedy']['requeueInterval'] = '30s'
values['config']['azure']['failedVMRemedy']['maxReapplyAttempts'] = VM_TEST_REQUIRED_ATTEMPTS
# set the node selector so that the remedy-controller _wont_ run on the nodes that
# will be failed
values['nodeSelector'] = {'worker.garden.sapcloud.io/group': 'test-nodes'}
return values
def uninstall_helm_deployment(
kubernetes_config,
namespace: str,
release_name: str,
):
helm_executable = ensure_helm_setup()
KUBECONFIG_FILE_NAME = "kubecfg"
# prepare subprocess args using relative file paths for the values files
subprocess_args = [
helm_executable,
"uninstall",
release_name,
"--namespace",
namespace,
]
helm_env = os.environ.copy()
helm_env['KUBECONFIG'] = KUBECONFIG_FILE_NAME
# create temp dir containing all previously referenced files
with tempfile.TemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, KUBECONFIG_FILE_NAME), 'w') as f:
yaml.dump(kubernetes_config.kubeconfig(), f)
# run helm from inside the temporary directory so that the prepared file paths work
subprocess.run(subprocess_args, check=True, cwd=temp_dir, env=helm_env)
def ensure_helm_setup():
"""Ensure up-to-date helm installation. Return the path to the found Helm executable"""
# we currently have both helmV3 and helmV2 in our images. To keep it convenient for local
# execution, try both
try:
helm_executable = which('helm3')
except Failure:
info("No executable 'helm3' found in path. Falling back to 'helm'")
helm_executable = which('helm')
return helm_executable
# Stuff used for yaml formatting, when dumping a dictionary
class LiteralStr(str):
"""Used to create yaml block style indicator | """
def literal_str_representer(dumper, data):
"""Used to create yaml block style indicator"""
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
def execute_helm_deployment(
kubernetes_config,
namespace: str,
chart_name: str,
release_name: str,
*values: dict,
chart_version: str=None,
):
yaml.add_representer(LiteralStr, literal_str_representer)
helm_executable = ensure_helm_setup()
# create namespace if absent
namespace_helper = kube_ctx.namespace_helper()
if not namespace_helper.get_namespace(namespace):
namespace_helper.create_namespace(namespace)
KUBECONFIG_FILE_NAME = "kubecfg"
# prepare subprocess args using relative file paths for the values files
subprocess_args = [
helm_executable,
"upgrade",
release_name,
chart_name,
"--install",
"--force",
"--namespace",
namespace,
]
if chart_version:
subprocess_args += ["--version", chart_version]
for idx, _ in enumerate(values):
subprocess_args.append("--values")
subprocess_args.append("value" + str(idx))
helm_env = os.environ.copy()
helm_env['KUBECONFIG'] = KUBECONFIG_FILE_NAME
# create temp dir containing all previously referenced files
with tempfile.TemporaryDirectory() as temp_dir:
for idx, value in enumerate(values):
with open(os.path.join(temp_dir, "value" + str(idx)), 'w') as f:
yaml.dump(value, f)
with open(os.path.join(temp_dir, KUBECONFIG_FILE_NAME), 'w') as f:
yaml.dump(kubernetes_config.kubeconfig(), f)
# run helm from inside the temporary directory so that the prepared file paths work
subprocess.run(subprocess_args, check=True, cwd=temp_dir, env=helm_env)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import concurrent.futures
import json
import os
import subprocess
import sys
import tempfile
import yaml
import model.kubernetes
import kube.ctx
from ci.util import (
Failure,
info,
which,
)
own_dir = os.path.abspath(os.path.dirname(__file__))
repo_dir = os.path.abspath(os.path.join(own_dir, os.pardir))
sys.path.insert(0, os.path.join(repo_dir, 'test'))
import pubip_remedy_test as pubip_test # noqa
# TODO: failed_vm_test fails on newer Azure, and it's not clear how to fix it since it was based on
# some weird Azure behavior that was meanwhile patched.
# import failed_vm_test as vm_test # noqa
import cleanup as test_cleanup # noqa
HELM_CHART_NAME = 'remedy-controller-azure'
HELM_CHART_DEPLOYMENT_NAMESPACE = 'default'
VM_TEST_REQUIRED_ATTEMPTS = 4
KUBECONFIG_DIR = os.environ['TM_KUBECONFIG_PATH']
CONCOURSE_HELM_CHART_REPO = "https://concourse-charts.storage.googleapis.com/"
kube_ctx = kube.ctx.Ctx()
def main():
kubeconfig_path = os.path.join(KUBECONFIG_DIR, 'shoot.config')
os.environ['KUBECONFIG'] = kubeconfig_path
test_credentials = credentials_from_environ()
with open(kubeconfig_path, 'r') as f:
kubeconfig = yaml.safe_load(f.read())
kubernetes_config = model.kubernetes.KubernetesConfig(
'',
{'kubeconfig': kubeconfig}, # MUST be positional
)
# vm failer expects the credentials at one special location. TODO: Remove this once its adjusted
expected_dir = os.path.join(repo_dir, 'dev')
expected_file_path = os.path.join(expected_dir, 'credentials.yaml')
os.mkdir(expected_dir)
with open(expected_file_path, mode='w') as f:
yaml.safe_dump(test_credentials, f)
with tempfile.NamedTemporaryFile(mode='w', delete=False) as credentials_file:
yaml.safe_dump(test_credentials, credentials_file)
credentials_path = os.path.abspath(credentials_file.name)
if not (version := os.environ.get('TEST_VERSION')):
# fallback to local file if env-var is not set
with open(os.path.join(repo_dir, 'VERSION')) as version_file:
version = version_file.read()
chart_dir = os.path.join(repo_dir, 'charts', HELM_CHART_NAME)
values = create_helm_values(chart_dir, version, credentials_path)
print(f'Deploying Helm chart for version {version}')
execute_helm_deployment(
kubernetes_config,
HELM_CHART_DEPLOYMENT_NAMESPACE,
chart_dir,
HELM_CHART_NAME,
values,
)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
pubip_future = executor.submit(
pubip_test.run_test,
path_to_credentials_file=credentials_path,
path_to_kubeconfig=kubeconfig_path,
test_namespace=HELM_CHART_DEPLOYMENT_NAMESPACE,
)
# failed_vm_future = executor.submit(
# vm_test.run_test,
# path_to_credentials_file=credentials_path,
# path_to_kubeconfig=kubeconfig_path,
# required_attempts=VM_TEST_REQUIRED_ATTEMPTS,
# test_namespace=HELM_CHART_DEPLOYMENT_NAMESPACE,
# check_interval=10,
# run_duration=360,
# )
pubip_test_ok = False
# vm_test_ok = False
try:
pubip_test_ok = pubip_future.result()
# vm_test_ok = failed_vm_future.result()
finally:
uninstall_helm_deployment(
kubernetes_config,
HELM_CHART_DEPLOYMENT_NAMESPACE,
HELM_CHART_NAME,
)
test_cleanup.cleanup(
path_to_credentials_file=credentials_path,
path_to_kubeconfig=kubeconfig_path,
test_namespace=HELM_CHART_DEPLOYMENT_NAMESPACE,
)
if not pubip_test_ok: # or not vm_test_ok:
exit(1)
def credentials_from_environ():
return {
'aadClientId': os.environ['CLIENT_ID'],
'aadClientSecret': os.environ['CLIENT_SECRET'],
'tenantId': os.environ['TENANT_ID'],
'subscriptionId': os.environ['SUBSCRIPTION_ID'],
'resourceGroup': f'shoot--it--{os.environ["SHOOT_NAME"]}',
'location': os.environ['REGION'],
}
def create_helm_values(chart_dir, version, path_to_credentials_file):
with open(os.path.join(path_to_credentials_file)) as credentials_file:
credentials = yaml.safe_load(credentials_file)
with open(os.path.join(chart_dir, 'values.yaml')) as values_file:
values = yaml.safe_load(values_file)
values['image']['tag'] = version
values['cloudProviderConfig'] = json.dumps(credentials)
# lower default values in order to speed up failed-vm-test
values['config']['azure']['failedVMRemedy']['requeueInterval'] = '30s'
values['config']['azure']['failedVMRemedy']['maxReapplyAttempts'] = VM_TEST_REQUIRED_ATTEMPTS
# set the node selector so that the remedy-controller _wont_ run on the nodes that
# will be failed
values['nodeSelector'] = {'worker.garden.sapcloud.io/group': 'test-nodes'}
return values
def uninstall_helm_deployment(
kubernetes_config,
namespace: str,
release_name: str,
):
helm_executable = ensure_helm_setup()
KUBECONFIG_FILE_NAME = "kubecfg"
# prepare subprocess args using relative file paths for the values files
subprocess_args = [
helm_executable,
"uninstall",
release_name,
"--namespace",
namespace,
]
helm_env = os.environ.copy()
helm_env['KUBECONFIG'] = KUBECONFIG_FILE_NAME
# create temp dir containing all previously referenced files
with tempfile.TemporaryDirectory() as temp_dir:
with open(os.path.join(temp_dir, KUBECONFIG_FILE_NAME), 'w') as f:
yaml.dump(kubernetes_config.kubeconfig(), f)
# run helm from inside the temporary directory so that the prepared file paths work
subprocess.run(subprocess_args, check=True, cwd=temp_dir, env=helm_env)
def ensure_helm_setup():
"""Ensure up-to-date helm installation. Return the path to the found Helm executable"""
# we currently have both helmV3 and helmV2 in our images. To keep it convenient for local
# execution, try both
try:
helm_executable = which('helm3')
except Failure:
info("No executable 'helm3' found in path. Falling back to 'helm'")
helm_executable = which('helm')
return helm_executable
# Stuff used for yaml formatting, when dumping a dictionary
class LiteralStr(str):
"""Used to create yaml block style indicator | """
def literal_str_representer(dumper, data):
"""Used to create yaml block style indicator"""
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
def execute_helm_deployment(
kubernetes_config,
namespace: str,
chart_name: str,
release_name: str,
*values: dict,
chart_version: str=None,
):
yaml.add_representer(LiteralStr, literal_str_representer)
helm_executable = ensure_helm_setup()
# create namespace if absent
namespace_helper = kube_ctx.namespace_helper()
if not namespace_helper.get_namespace(namespace):
namespace_helper.create_namespace(namespace)
KUBECONFIG_FILE_NAME = "kubecfg"
# prepare subprocess args using relative file paths for the values files
subprocess_args = [
helm_executable,
"upgrade",
release_name,
chart_name,
"--install",
"--force",
"--namespace",
namespace,
]
if chart_version:
subprocess_args += ["--version", chart_version]
for idx, _ in enumerate(values):
subprocess_args.append("--values")
subprocess_args.append("value" + str(idx))
helm_env = os.environ.copy()
helm_env['KUBECONFIG'] = KUBECONFIG_FILE_NAME
# create temp dir containing all previously referenced files
with tempfile.TemporaryDirectory() as temp_dir:
for idx, value in enumerate(values):
with open(os.path.join(temp_dir, "value" + str(idx)), 'w') as f:
yaml.dump(value, f)
with open(os.path.join(temp_dir, KUBECONFIG_FILE_NAME), 'w') as f:
yaml.dump(kubernetes_config.kubeconfig(), f)
# run helm from inside the temporary directory so that the prepared file paths work
subprocess.run(subprocess_args, check=True, cwd=temp_dir, env=helm_env)
if __name__ == '__main__':
main()
|
import os
import torch
import pandas as pd
from PIL import Image
import numpy as np
from wilds.datasets.wilds_dataset import WILDSDataset
from wilds.common.grouper import CombinatorialGrouper
from wilds.common.metrics.all_metrics import Accuracy
class WaterbirdsDataset(WILDSDataset):
"""
The Waterbirds dataset.
This dataset is not part of the official WILDS benchmark.
We provide it for convenience and to facilitate comparisons to previous work.
Supported `split_scheme`:
'official'
Input (x):
Images of birds against various backgrounds that have already been cropped and centered.
Label (y):
y is binary. It is 1 if the bird is a waterbird (e.g., duck), and 0 if it is a landbird.
Metadata:
Each image is annotated with whether the background is a land or water background.
Original publication:
@inproceedings{sagawa2019distributionally,
title = {Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},
author = {Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},
booktitle = {International Conference on Learning Representations},
year = {2019}
}
The dataset was constructed from the CUB-200-2011 dataset and the Places dataset:
@techreport{WahCUB_200_2011,
Title = {{The Caltech-UCSD Birds-200-2011 Dataset}},
Author = {Wah, C. and Branson, S. and Welinder, P. and Perona, P. and Belongie, S.},
Year = {2011}
Institution = {California Institute of Technology},
Number = {CNS-TR-2011-001}
}
@article{zhou2017places,
title = {Places: A 10 million Image Database for Scene Recognition},
author = {Zhou, Bolei and Lapedriza, Agata and Khosla, Aditya and Oliva, Aude and Torralba, Antonio},
journal ={IEEE Transactions on Pattern Analysis and Machine Intelligence},
year = {2017},
publisher = {IEEE}
}
License:
The use of this dataset is restricted to non-commercial research and educational purposes.
"""
_dataset_name = 'waterbirds'
_versions_dict = {
'1.0': {
'download_url': 'https://worksheets.codalab.org/rest/bundles/0x505056d5cdea4e4eaa0e242cbfe2daa4/contents/blob/',
'compressed_size': None}}
def __init__(self, version=None, root_dir='data', download=False, split_scheme='official'):
self._version = version
self._data_dir = self.initialize_data_dir(root_dir, download)
if not os.path.exists(self.data_dir):
raise ValueError(
f'{self.data_dir} does not exist yet. Please generate the dataset first.')
# Read in metadata
# Note: metadata_df is one-indexed.
metadata_df = pd.read_csv(
os.path.join(self.data_dir, 'metadata.csv'))
# Get the y values
self._y_array = torch.LongTensor(metadata_df['y'].values)
self._y_size = 1
self._n_classes = 2
self._metadata_array = torch.stack(
(torch.LongTensor(metadata_df['place'].values), self._y_array),
dim=1
)
self._metadata_fields = ['background', 'y']
self._metadata_map = {
'background': [' land', 'water'], # Padding for str formatting
'y': [' landbird', 'waterbird']
}
# Extract filenames
self._input_array = metadata_df['img_filename'].values
self._original_resolution = (224, 224)
# Extract splits
self._split_scheme = split_scheme
if self._split_scheme != 'official':
raise ValueError(f'Split scheme {self._split_scheme} not recognized')
self._split_array = metadata_df['split'].values
self._eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=(['background', 'y']))
super().__init__(root_dir, download, split_scheme)
def get_input(self, idx):
"""
Returns x for a given idx.
"""
img_filename = os.path.join(
self.data_dir,
self._input_array[idx])
x = Image.open(img_filename).convert('RGB')
return x
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
"""
Computes all evaluation metrics.
Args:
- y_pred (Tensor): Predictions from a model. By default, they are predicted labels (LongTensor).
But they can also be other model outputs such that prediction_fn(y_pred)
are predicted labels.
- y_true (LongTensor): Ground-truth labels
- metadata (Tensor): Metadata
- prediction_fn (function): A function that turns y_pred into predicted labels
Output:
- results (dictionary): Dictionary of evaluation metrics
- results_str (str): String summarizing the evaluation metrics
"""
metric = Accuracy(prediction_fn=prediction_fn)
results, results_str = self.standard_group_eval(
metric,
self._eval_grouper,
y_pred, y_true, metadata)
# For Waterbirds, the validation and test sets are constructed to be more balanced
# compared to the training set.
# To compute the actual average accuracy over the empirical (training) distribution,
# we therefore weight each groups according to their frequency in the training set.
results['adj_acc_avg'] = (
(results['acc_y:landbird_background:land'] * 3498
+ results['acc_y:landbird_background:water'] * 184
+ results['acc_y:waterbird_background:land'] * 56
+ results['acc_y:waterbird_background:water'] * 1057) /
(3498 + 184 + 56 + 1057))
del results['acc_avg']
results_str = f"Adjusted average acc: {results["adj_acc_avg"]:.3f}\n" + '\n'.join(results_str.split('\n')[1:])
return results, results_str
| import os
import torch
import pandas as pd
from PIL import Image
import numpy as np
from wilds.datasets.wilds_dataset import WILDSDataset
from wilds.common.grouper import CombinatorialGrouper
from wilds.common.metrics.all_metrics import Accuracy
class WaterbirdsDataset(WILDSDataset):
"""
The Waterbirds dataset.
This dataset is not part of the official WILDS benchmark.
We provide it for convenience and to facilitate comparisons to previous work.
Supported `split_scheme`:
'official'
Input (x):
Images of birds against various backgrounds that have already been cropped and centered.
Label (y):
y is binary. It is 1 if the bird is a waterbird (e.g., duck), and 0 if it is a landbird.
Metadata:
Each image is annotated with whether the background is a land or water background.
Original publication:
@inproceedings{sagawa2019distributionally,
title = {Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},
author = {Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},
booktitle = {International Conference on Learning Representations},
year = {2019}
}
The dataset was constructed from the CUB-200-2011 dataset and the Places dataset:
@techreport{WahCUB_200_2011,
Title = {{The Caltech-UCSD Birds-200-2011 Dataset}},
Author = {Wah, C. and Branson, S. and Welinder, P. and Perona, P. and Belongie, S.},
Year = {2011}
Institution = {California Institute of Technology},
Number = {CNS-TR-2011-001}
}
@article{zhou2017places,
title = {Places: A 10 million Image Database for Scene Recognition},
author = {Zhou, Bolei and Lapedriza, Agata and Khosla, Aditya and Oliva, Aude and Torralba, Antonio},
journal ={IEEE Transactions on Pattern Analysis and Machine Intelligence},
year = {2017},
publisher = {IEEE}
}
License:
The use of this dataset is restricted to non-commercial research and educational purposes.
"""
_dataset_name = 'waterbirds'
_versions_dict = {
'1.0': {
'download_url': 'https://worksheets.codalab.org/rest/bundles/0x505056d5cdea4e4eaa0e242cbfe2daa4/contents/blob/',
'compressed_size': None}}
def __init__(self, version=None, root_dir='data', download=False, split_scheme='official'):
self._version = version
self._data_dir = self.initialize_data_dir(root_dir, download)
if not os.path.exists(self.data_dir):
raise ValueError(
f'{self.data_dir} does not exist yet. Please generate the dataset first.')
# Read in metadata
# Note: metadata_df is one-indexed.
metadata_df = pd.read_csv(
os.path.join(self.data_dir, 'metadata.csv'))
# Get the y values
self._y_array = torch.LongTensor(metadata_df['y'].values)
self._y_size = 1
self._n_classes = 2
self._metadata_array = torch.stack(
(torch.LongTensor(metadata_df['place'].values), self._y_array),
dim=1
)
self._metadata_fields = ['background', 'y']
self._metadata_map = {
'background': [' land', 'water'], # Padding for str formatting
'y': [' landbird', 'waterbird']
}
# Extract filenames
self._input_array = metadata_df['img_filename'].values
self._original_resolution = (224, 224)
# Extract splits
self._split_scheme = split_scheme
if self._split_scheme != 'official':
raise ValueError(f'Split scheme {self._split_scheme} not recognized')
self._split_array = metadata_df['split'].values
self._eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=(['background', 'y']))
super().__init__(root_dir, download, split_scheme)
def get_input(self, idx):
"""
Returns x for a given idx.
"""
img_filename = os.path.join(
self.data_dir,
self._input_array[idx])
x = Image.open(img_filename).convert('RGB')
return x
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
"""
Computes all evaluation metrics.
Args:
- y_pred (Tensor): Predictions from a model. By default, they are predicted labels (LongTensor).
But they can also be other model outputs such that prediction_fn(y_pred)
are predicted labels.
- y_true (LongTensor): Ground-truth labels
- metadata (Tensor): Metadata
- prediction_fn (function): A function that turns y_pred into predicted labels
Output:
- results (dictionary): Dictionary of evaluation metrics
- results_str (str): String summarizing the evaluation metrics
"""
metric = Accuracy(prediction_fn=prediction_fn)
results, results_str = self.standard_group_eval(
metric,
self._eval_grouper,
y_pred, y_true, metadata)
# For Waterbirds, the validation and test sets are constructed to be more balanced
# compared to the training set.
# To compute the actual average accuracy over the empirical (training) distribution,
# we therefore weight each groups according to their frequency in the training set.
results['adj_acc_avg'] = (
(results['acc_y:landbird_background:land'] * 3498
+ results['acc_y:landbird_background:water'] * 184
+ results['acc_y:waterbird_background:land'] * 56
+ results['acc_y:waterbird_background:water'] * 1057) /
(3498 + 184 + 56 + 1057))
del results['acc_avg']
results_str = f"Adjusted average acc: {results['adj_acc_avg']:.3f}\n" + '\n'.join(results_str.split('\n')[1:])
return results, results_str
|
from typing import Dict, Optional, List, Set, Tuple, Union
import pytest
import torch
from allennlp.common import Params
from allennlp.common.from_params import FromParams, takes_arg, remove_optional, create_kwargs
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import DatasetReader, Tokenizer
from allennlp.models import Model
from allennlp.models.archival import load_archive
from allennlp.common.checks import ConfigurationError
class MyClass(FromParams):
def __init__(self, my_int: int, my_bool: bool = False) -> None:
self.my_int = my_int
self.my_bool = my_bool
class TestFromParams(AllenNlpTestCase):
def test_takes_arg(self):
def bare_function(some_input: int) -> int:
return some_input + 1
assert takes_arg(bare_function, "some_input")
assert not takes_arg(bare_function, "some_other_input")
class SomeClass:
total = 0
def __init__(self, constructor_param: str) -> None:
self.constructor_param = constructor_param
def check_param(self, check: str) -> bool:
return self.constructor_param == check
@classmethod
def set_total(cls, new_total: int) -> None:
cls.total = new_total
assert takes_arg(SomeClass, "self")
assert takes_arg(SomeClass, "constructor_param")
assert not takes_arg(SomeClass, "check")
assert takes_arg(SomeClass.check_param, "check")
assert not takes_arg(SomeClass.check_param, "other_check")
assert takes_arg(SomeClass.set_total, "new_total")
assert not takes_arg(SomeClass.set_total, "total")
def test_remove_optional(self):
optional_type = Optional[Dict[str, str]]
bare_type = remove_optional(optional_type) # type: ignore
bare_bare_type = remove_optional(bare_type)
assert bare_type == Dict[str, str]
assert bare_bare_type == Dict[str, str]
assert remove_optional(Optional[str]) == str
assert remove_optional(str) == str
def test_from_params(self):
my_class = MyClass.from_params(Params({"my_int": 10}), my_bool=True)
assert isinstance(my_class, MyClass)
assert my_class.my_int == 10
assert my_class.my_bool
def test_create_kwargs(self):
kwargs = create_kwargs(MyClass, MyClass, Params({"my_int": 5}), my_bool=True, my_float=4.4)
# my_float should not be included because it's not a param of the MyClass constructor
assert kwargs == {"my_int": 5, "my_bool": True}
def test_extras(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
@A.register("c")
class C(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
# custom from params
@classmethod
def from_params(cls, params: Params, size: int, **extras) -> "C": # type: ignore
name = params.pop("name")
return cls(size=size, name=name)
# Check that extras get passed, even though A doesn't need them.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra")
assert b.name == "extra"
assert b.size == 10
# Check that extra extras don't get passed.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra", unwanted=True)
assert b.name == "extra"
assert b.size == 10
# Now the same with a custom from_params.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20)
assert c.name == "extra_c"
assert c.size == 20
# Check that extra extras don't get passed.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20, unwanted=True)
assert c.name == "extra_c"
assert c.size == 20
def test_extras_for_custom_classes(self):
from allennlp.common.registrable import Registrable
class BaseClass(Registrable):
pass
class BaseClass2(Registrable):
pass
@BaseClass.register("A")
class A(BaseClass):
def __init__(self, a: int, b: int, val: str) -> None:
self.a = a
self.b = b
self.val = val
def __hash__(self):
return self.b
def __eq__(self, other):
return self.b == other.b
@classmethod
def from_params(cls, params: Params, a: int, **extras) -> "A": # type: ignore
# A custom from params
b = params.pop_int("b")
val = params.pop("val", "C")
params.assert_empty(cls.__name__)
return cls(a=a, b=b, val=val)
@BaseClass2.register("B")
class B(BaseClass2):
def __init__(self, c: int, b: int) -> None:
self.c = c
self.b = b
@classmethod
def from_params(cls, params: Params, c: int, **extras) -> "B": # type: ignore
b = params.pop_int("b")
params.assert_empty(cls.__name__)
return cls(c=c, b=b)
@BaseClass.register("E")
class E(BaseClass):
def __init__(self, m: int, n: int) -> None:
self.m = m
self.n = n
@classmethod
def from_params(cls, params: Params, **extras2) -> "E": # type: ignore
m = params.pop_int("m")
params.assert_empty(cls.__name__)
n = extras2["n"]
return cls(m=m, n=n)
class C:
pass
@BaseClass.register("D")
class D(BaseClass):
def __init__(
self,
arg1: List[BaseClass],
arg2: Tuple[BaseClass, BaseClass2],
arg3: Dict[str, BaseClass],
arg4: Set[BaseClass],
arg5: List[BaseClass],
) -> None:
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.arg4 = arg4
self.arg5 = arg5
vals = [1, 2, 3]
params = Params(
{
"type": "D",
"arg1": [
{"type": "A", "b": vals[0]},
{"type": "A", "b": vals[1]},
{"type": "A", "b": vals[2]},
],
"arg2": [{"type": "A", "b": vals[0]}, {"type": "B", "b": vals[0]}],
"arg3": {
"class_1": {"type": "A", "b": vals[0]},
"class_2": {"type": "A", "b": vals[1]},
},
"arg4": [
{"type": "A", "b": vals[0], "val": "M"},
{"type": "A", "b": vals[1], "val": "N"},
{"type": "A", "b": vals[1], "val": "N"},
],
"arg5": [{"type": "E", "m": 9}],
}
)
extra = C()
tval1 = 5
tval2 = 6
d = BaseClass.from_params(params=params, extra=extra, a=tval1, c=tval2, n=10)
# Tests for List # Parameters
assert len(d.arg1) == len(vals)
assert isinstance(d.arg1, list)
assert isinstance(d.arg1[0], A)
assert all([x.b == y for x, y in zip(d.arg1, vals)])
assert all([x.a == tval1 for x in d.arg1])
# Tests for Tuple
assert isinstance(d.arg2, tuple)
assert isinstance(d.arg2[0], A)
assert isinstance(d.arg2[1], B)
assert d.arg2[0].a == tval1
assert d.arg2[1].c == tval2
assert d.arg2[0].b == d.arg2[1].b == vals[0]
# Tests for Dict
assert isinstance(d.arg3, dict)
assert isinstance(d.arg3["class_1"], A)
assert d.arg3["class_1"].a == d.arg3["class_2"].a == tval1
assert d.arg3["class_1"].b == vals[0]
assert d.arg3["class_2"].b == vals[1]
# Tests for Set
assert isinstance(d.arg4, set)
assert len(d.arg4) == 2
assert any(x.val == "M" for x in d.arg4)
assert any(x.val == "N" for x in d.arg4)
# Tests for custom extras parameters
assert isinstance(d.arg5, list)
assert isinstance(d.arg5[0], E)
assert d.arg5[0].m == 9
assert d.arg5[0].n == 10
def test_no_constructor(self):
params = Params({"type": "just_spaces"})
Tokenizer.from_params(params)
def test_union(self):
class A(FromParams):
def __init__(self, a: Union[int, List[int]]) -> None:
self.a = a
class B(FromParams):
def __init__(self, b: Union[A, List[A]]) -> None:
# Really you would want to be sure that `self.b` has a consistent type, but for
# this test we'll ignore that.
self.b = b
class C(FromParams):
def __init__(self, c: Union[A, B, Dict[str, A]]) -> None:
# Really you would want to be sure that `self.c` has a consistent type, but for
# this test we'll ignore that.
self.c = c
params = Params({"a": 3})
a = A.from_params(params)
assert a.a == 3
params = Params({"a": [3, 4, 5]})
a = A.from_params(params)
assert a.a == [3, 4, 5]
params = Params({"b": {"a": 3}})
b = B.from_params(params)
assert isinstance(b.b, A)
assert b.b.a == 3
params = Params({"b": [{"a": 3}, {"a": [4, 5]}]})
b = B.from_params(params)
assert isinstance(b.b, list)
assert b.b[0].a == 3
assert b.b[1].a == [4, 5]
# This is a contrived, ugly example (why would you want to duplicate names in a nested
# structure like this??), but it demonstrates a potential bug when dealing with mutatable
# parameters. If you're not careful about keeping the parameters un-mutated in two
# separate places, you'll end up with a B, or with a dict that's missing the 'b' key.
params = Params({"c": {"a": {"a": 3}, "b": {"a": [4, 5]}}})
c = C.from_params(params)
assert isinstance(c.c, dict)
assert c.c["a"].a == 3
assert c.c["b"].a == [4, 5]
def test_dict(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Dict[str, A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": {"first": {"type": "b", "size": 1}, "second": {"type": "b", "size": 2}},
}
)
d = C.from_params(params)
assert isinstance(d.items, dict)
assert len(d.items) == 2
assert all(isinstance(key, str) for key in d.items.keys())
assert all(isinstance(value, B) for value in d.items.values())
assert d.items["first"].size == 1
assert d.items["second"].size == 2
def test_dict_not_params(self):
class A(FromParams):
def __init__(self, counts: Dict[str, int]) -> None:
self.counts = counts
params = Params({"counts": {"a": 10, "b": 20}})
a = A.from_params(params)
assert isinstance(a.counts, dict)
assert not isinstance(a.counts, Params)
def test_list(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: List[A]) -> None:
self.items = items
params = Params(
{"type": "d", "items": [{"type": "b", "size": 1}, {"type": "b", "size": 2}]}
)
d = C.from_params(params)
assert isinstance(d.items, list)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert d.items[0].size == 1
assert d.items[1].size == 2
def test_tuple(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, name: str) -> None:
self.name = name
class E(Registrable):
pass
@E.register("f")
class F(E):
def __init__(self, items: Tuple[A, C]) -> None:
self.items = items
params = Params(
{"type": "f", "items": [{"type": "b", "size": 1}, {"type": "d", "name": "item2"}]}
)
f = E.from_params(params)
assert isinstance(f.items, tuple)
assert len(f.items) == 2
assert isinstance(f.items[0], B)
assert isinstance(f.items[1], D)
assert f.items[0].size == 1
assert f.items[1].name == "item2"
def test_set(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
def __init__(self, name: str) -> None:
self.name = name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
@A.register("b")
class B(A):
pass
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Set[A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": [
{"type": "b", "name": "item1"},
{"type": "b", "name": "item2"},
{"type": "b", "name": "item2"},
],
}
)
d = C.from_params(params)
assert isinstance(d.items, set)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert any(item.name == "item1" for item in d.items)
assert any(item.name == "item2" for item in d.items)
def test_transferring_of_modules(self):
model_archive = str(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "decomposable_attention" / "experiment.json")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder (freeze) and attend_feedforward params (tunable)
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_text_field_embedder",
"freeze": True,
}
}
model_params["attend_feedforward"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_attend_feedforward._module",
"freeze": False,
}
}
transfer_model = Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
# TextFieldEmbedder and AttendFeedforward parameters should be transferred
for trained_parameter, transfer_parameter in zip(
trained_model._text_field_embedder.parameters(),
transfer_model._text_field_embedder.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
for trained_parameter, transfer_parameter in zip(
trained_model._attend_feedforward.parameters(),
transfer_model._attend_feedforward.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
# Any other module's parameters shouldn't be same (eg. compare_feedforward)
for trained_parameter, transfer_parameter in zip(
trained_model._compare_feedforward.parameters(),
transfer_model._compare_feedforward.parameters(),
):
assert torch.all(trained_parameter != transfer_parameter)
# TextFieldEmbedder should have requires_grad Off
for parameter in transfer_model._text_field_embedder.parameters():
assert not parameter.requires_grad
# # AttendFeedforward should have requires_grad On
for parameter in transfer_model._attend_feedforward.parameters():
assert parameter.requires_grad
def test_transferring_of_modules_ensures_type_consistency(self):
model_archive = str(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "decomposable_attention" / "experiment.json")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder and make it load AttendFeedForward
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_attend_feedforward._module",
}
}
with pytest.raises(ConfigurationError):
Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
def test_kwargs_are_passed_to_superclass(self):
params = Params(
{"type": "text_classification_json", "lazy": True, "cache_directory": "tmp"}
)
reader = DatasetReader.from_params(params)
assert reader.lazy is True
assert str(reader._cache_directory) == "tmp"
| from typing import Dict, Optional, List, Set, Tuple, Union
import pytest
import torch
from allennlp.common import Params
from allennlp.common.from_params import FromParams, takes_arg, remove_optional, create_kwargs
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import DatasetReader, Tokenizer
from allennlp.models import Model
from allennlp.models.archival import load_archive
from allennlp.common.checks import ConfigurationError
class MyClass(FromParams):
def __init__(self, my_int: int, my_bool: bool = False) -> None:
self.my_int = my_int
self.my_bool = my_bool
class TestFromParams(AllenNlpTestCase):
def test_takes_arg(self):
def bare_function(some_input: int) -> int:
return some_input + 1
assert takes_arg(bare_function, "some_input")
assert not takes_arg(bare_function, "some_other_input")
class SomeClass:
total = 0
def __init__(self, constructor_param: str) -> None:
self.constructor_param = constructor_param
def check_param(self, check: str) -> bool:
return self.constructor_param == check
@classmethod
def set_total(cls, new_total: int) -> None:
cls.total = new_total
assert takes_arg(SomeClass, "self")
assert takes_arg(SomeClass, "constructor_param")
assert not takes_arg(SomeClass, "check")
assert takes_arg(SomeClass.check_param, "check")
assert not takes_arg(SomeClass.check_param, "other_check")
assert takes_arg(SomeClass.set_total, "new_total")
assert not takes_arg(SomeClass.set_total, "total")
def test_remove_optional(self):
optional_type = Optional[Dict[str, str]]
bare_type = remove_optional(optional_type) # type: ignore
bare_bare_type = remove_optional(bare_type)
assert bare_type == Dict[str, str]
assert bare_bare_type == Dict[str, str]
assert remove_optional(Optional[str]) == str
assert remove_optional(str) == str
def test_from_params(self):
my_class = MyClass.from_params(Params({"my_int": 10}), my_bool=True)
assert isinstance(my_class, MyClass)
assert my_class.my_int == 10
assert my_class.my_bool
def test_create_kwargs(self):
kwargs = create_kwargs(MyClass, MyClass, Params({"my_int": 5}), my_bool=True, my_float=4.4)
# my_float should not be included because it's not a param of the MyClass constructor
assert kwargs == {"my_int": 5, "my_bool": True}
def test_extras(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
@A.register("c")
class C(A):
def __init__(self, size: int, name: str) -> None:
self.size = size
self.name = name
# custom from params
@classmethod
def from_params(cls, params: Params, size: int, **extras) -> "C": # type: ignore
name = params.pop("name")
return cls(size=size, name=name)
# Check that extras get passed, even though A doesn't need them.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra")
assert b.name == "extra"
assert b.size == 10
# Check that extra extras don't get passed.
params = Params({"type": "b", "size": 10})
b = A.from_params(params, name="extra", unwanted=True)
assert b.name == "extra"
assert b.size == 10
# Now the same with a custom from_params.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20)
assert c.name == "extra_c"
assert c.size == 20
# Check that extra extras don't get passed.
params = Params({"type": "c", "name": "extra_c"})
c = A.from_params(params, size=20, unwanted=True)
assert c.name == "extra_c"
assert c.size == 20
def test_extras_for_custom_classes(self):
from allennlp.common.registrable import Registrable
class BaseClass(Registrable):
pass
class BaseClass2(Registrable):
pass
@BaseClass.register("A")
class A(BaseClass):
def __init__(self, a: int, b: int, val: str) -> None:
self.a = a
self.b = b
self.val = val
def __hash__(self):
return self.b
def __eq__(self, other):
return self.b == other.b
@classmethod
def from_params(cls, params: Params, a: int, **extras) -> "A": # type: ignore
# A custom from params
b = params.pop_int("b")
val = params.pop("val", "C")
params.assert_empty(cls.__name__)
return cls(a=a, b=b, val=val)
@BaseClass2.register("B")
class B(BaseClass2):
def __init__(self, c: int, b: int) -> None:
self.c = c
self.b = b
@classmethod
def from_params(cls, params: Params, c: int, **extras) -> "B": # type: ignore
b = params.pop_int("b")
params.assert_empty(cls.__name__)
return cls(c=c, b=b)
@BaseClass.register("E")
class E(BaseClass):
def __init__(self, m: int, n: int) -> None:
self.m = m
self.n = n
@classmethod
def from_params(cls, params: Params, **extras2) -> "E": # type: ignore
m = params.pop_int("m")
params.assert_empty(cls.__name__)
n = extras2["n"]
return cls(m=m, n=n)
class C:
pass
@BaseClass.register("D")
class D(BaseClass):
def __init__(
self,
arg1: List[BaseClass],
arg2: Tuple[BaseClass, BaseClass2],
arg3: Dict[str, BaseClass],
arg4: Set[BaseClass],
arg5: List[BaseClass],
) -> None:
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.arg4 = arg4
self.arg5 = arg5
vals = [1, 2, 3]
params = Params(
{
"type": "D",
"arg1": [
{"type": "A", "b": vals[0]},
{"type": "A", "b": vals[1]},
{"type": "A", "b": vals[2]},
],
"arg2": [{"type": "A", "b": vals[0]}, {"type": "B", "b": vals[0]}],
"arg3": {
"class_1": {"type": "A", "b": vals[0]},
"class_2": {"type": "A", "b": vals[1]},
},
"arg4": [
{"type": "A", "b": vals[0], "val": "M"},
{"type": "A", "b": vals[1], "val": "N"},
{"type": "A", "b": vals[1], "val": "N"},
],
"arg5": [{"type": "E", "m": 9}],
}
)
extra = C()
tval1 = 5
tval2 = 6
d = BaseClass.from_params(params=params, extra=extra, a=tval1, c=tval2, n=10)
# Tests for List # Parameters
assert len(d.arg1) == len(vals)
assert isinstance(d.arg1, list)
assert isinstance(d.arg1[0], A)
assert all([x.b == y for x, y in zip(d.arg1, vals)])
assert all([x.a == tval1 for x in d.arg1])
# Tests for Tuple
assert isinstance(d.arg2, tuple)
assert isinstance(d.arg2[0], A)
assert isinstance(d.arg2[1], B)
assert d.arg2[0].a == tval1
assert d.arg2[1].c == tval2
assert d.arg2[0].b == d.arg2[1].b == vals[0]
# Tests for Dict
assert isinstance(d.arg3, dict)
assert isinstance(d.arg3["class_1"], A)
assert d.arg3["class_1"].a == d.arg3["class_2"].a == tval1
assert d.arg3["class_1"].b == vals[0]
assert d.arg3["class_2"].b == vals[1]
# Tests for Set
assert isinstance(d.arg4, set)
assert len(d.arg4) == 2
assert any(x.val == "M" for x in d.arg4)
assert any(x.val == "N" for x in d.arg4)
# Tests for custom extras parameters
assert isinstance(d.arg5, list)
assert isinstance(d.arg5[0], E)
assert d.arg5[0].m == 9
assert d.arg5[0].n == 10
def test_no_constructor(self):
params = Params({"type": "just_spaces"})
Tokenizer.from_params(params)
def test_union(self):
class A(FromParams):
def __init__(self, a: Union[int, List[int]]) -> None:
self.a = a
class B(FromParams):
def __init__(self, b: Union[A, List[A]]) -> None:
# Really you would want to be sure that `self.b` has a consistent type, but for
# this test we'll ignore that.
self.b = b
class C(FromParams):
def __init__(self, c: Union[A, B, Dict[str, A]]) -> None:
# Really you would want to be sure that `self.c` has a consistent type, but for
# this test we'll ignore that.
self.c = c
params = Params({"a": 3})
a = A.from_params(params)
assert a.a == 3
params = Params({"a": [3, 4, 5]})
a = A.from_params(params)
assert a.a == [3, 4, 5]
params = Params({"b": {"a": 3}})
b = B.from_params(params)
assert isinstance(b.b, A)
assert b.b.a == 3
params = Params({"b": [{"a": 3}, {"a": [4, 5]}]})
b = B.from_params(params)
assert isinstance(b.b, list)
assert b.b[0].a == 3
assert b.b[1].a == [4, 5]
# This is a contrived, ugly example (why would you want to duplicate names in a nested
# structure like this??), but it demonstrates a potential bug when dealing with mutatable
# parameters. If you're not careful about keeping the parameters un-mutated in two
# separate places, you'll end up with a B, or with a dict that's missing the 'b' key.
params = Params({"c": {"a": {"a": 3}, "b": {"a": [4, 5]}}})
c = C.from_params(params)
assert isinstance(c.c, dict)
assert c.c["a"].a == 3
assert c.c["b"].a == [4, 5]
def test_dict(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Dict[str, A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": {"first": {"type": "b", "size": 1}, "second": {"type": "b", "size": 2}},
}
)
d = C.from_params(params)
assert isinstance(d.items, dict)
assert len(d.items) == 2
assert all(isinstance(key, str) for key in d.items.keys())
assert all(isinstance(value, B) for value in d.items.values())
assert d.items["first"].size == 1
assert d.items["second"].size == 2
def test_dict_not_params(self):
class A(FromParams):
def __init__(self, counts: Dict[str, int]) -> None:
self.counts = counts
params = Params({"counts": {"a": 10, "b": 20}})
a = A.from_params(params)
assert isinstance(a.counts, dict)
assert not isinstance(a.counts, Params)
def test_list(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: List[A]) -> None:
self.items = items
params = Params(
{"type": "d", "items": [{"type": "b", "size": 1}, {"type": "b", "size": 2}]}
)
d = C.from_params(params)
assert isinstance(d.items, list)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert d.items[0].size == 1
assert d.items[1].size == 2
def test_tuple(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
pass
@A.register("b")
class B(A):
def __init__(self, size: int) -> None:
self.size = size
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, name: str) -> None:
self.name = name
class E(Registrable):
pass
@E.register("f")
class F(E):
def __init__(self, items: Tuple[A, C]) -> None:
self.items = items
params = Params(
{"type": "f", "items": [{"type": "b", "size": 1}, {"type": "d", "name": "item2"}]}
)
f = E.from_params(params)
assert isinstance(f.items, tuple)
assert len(f.items) == 2
assert isinstance(f.items[0], B)
assert isinstance(f.items[1], D)
assert f.items[0].size == 1
assert f.items[1].name == "item2"
def test_set(self):
from allennlp.common.registrable import Registrable
class A(Registrable):
def __init__(self, name: str) -> None:
self.name = name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
@A.register("b")
class B(A):
pass
class C(Registrable):
pass
@C.register("d")
class D(C):
def __init__(self, items: Set[A]) -> None:
self.items = items
params = Params(
{
"type": "d",
"items": [
{"type": "b", "name": "item1"},
{"type": "b", "name": "item2"},
{"type": "b", "name": "item2"},
],
}
)
d = C.from_params(params)
assert isinstance(d.items, set)
assert len(d.items) == 2
assert all(isinstance(item, B) for item in d.items)
assert any(item.name == "item1" for item in d.items)
assert any(item.name == "item2" for item in d.items)
def test_transferring_of_modules(self):
model_archive = str(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "decomposable_attention" / "experiment.json")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder (freeze) and attend_feedforward params (tunable)
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_text_field_embedder",
"freeze": True,
}
}
model_params["attend_feedforward"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_attend_feedforward._module",
"freeze": False,
}
}
transfer_model = Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
# TextFieldEmbedder and AttendFeedforward parameters should be transferred
for trained_parameter, transfer_parameter in zip(
trained_model._text_field_embedder.parameters(),
transfer_model._text_field_embedder.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
for trained_parameter, transfer_parameter in zip(
trained_model._attend_feedforward.parameters(),
transfer_model._attend_feedforward.parameters(),
):
assert torch.all(trained_parameter == transfer_parameter)
# Any other module's parameters shouldn't be same (eg. compare_feedforward)
for trained_parameter, transfer_parameter in zip(
trained_model._compare_feedforward.parameters(),
transfer_model._compare_feedforward.parameters(),
):
assert torch.all(trained_parameter != transfer_parameter)
# TextFieldEmbedder should have requires_grad Off
for parameter in transfer_model._text_field_embedder.parameters():
assert not parameter.requires_grad
# # AttendFeedforward should have requires_grad On
for parameter in transfer_model._attend_feedforward.parameters():
assert parameter.requires_grad
def test_transferring_of_modules_ensures_type_consistency(self):
model_archive = str(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
trained_model = load_archive(model_archive).model
config_file = str(self.FIXTURES_ROOT / "decomposable_attention" / "experiment.json")
model_params = Params.from_file(config_file).pop("model").as_dict(quiet=True)
# Override only text_field_embedder and make it load AttendFeedForward
model_params["text_field_embedder"] = {
"_pretrained": {
"archive_file": model_archive,
"module_path": "_attend_feedforward._module",
}
}
with pytest.raises(ConfigurationError):
Model.from_params(vocab=trained_model.vocab, params=Params(model_params))
def test_kwargs_are_passed_to_superclass(self):
params = Params(
{"type": "text_classification_json", "lazy": True, "cache_directory": "tmp"}
)
reader = DatasetReader.from_params(params)
assert reader.lazy is True
assert str(reader._cache_directory) == "tmp"
|
import json
import logging
from pathlib import Path
from typing import List
import aiofiles
from bot.bot import Friendo
from bot.settings import MEME_PASSWORD, MEME_USERNAME
MEME_DIR = Path.cwd() / 'bot' / 'meme_api' / 'json' / 'meme_list.json'
log = logging.getLogger(__name__)
class Meme:
"""Pulls meme templates from imgfip api and creates memes."""
def __init__(self, bot: Friendo) -> None:
self.bot = bot
self.gen_meme_url = "https://api.imgflip.com/caption_image"
self.get_all_memes_url = "https://api.imgflip.com/get_memes"
self.bot.loop.run_until_complete(self.get_all_memes())
with open(MEME_DIR, "r") as m:
self.meme_dict = json.load(m)["data"]["memes"]
self.user_name = MEME_USERNAME
self.password = MEME_PASSWORD
async def generate_meme(self, *, name: str, text: str = None) -> str:
"""Creates a meme given the name of a template."""
data = {"username": self.user_name, "password": self.password}
if text:
for meme in self.meme_dict:
if meme["name"].lower() == name.lower():
data["template_id"] = meme["id"]
if len(text) <= meme["box_count"]:
for count, each in enumerate(text):
data[f"boxes[{count}][text]"] = each
else:
return f"Too many text boxes for {meme["name"]} with count {meme["box_count"]}"
async with self.bot.session.post(self.gen_meme_url, data=data) as resp:
if resp.status == 200:
_json = await resp.json()
return _json["data"]["url"]
async def get_all_memes(self) -> None:
"""Gets the names of all available meme templates."""
async with self.bot.session.get(self.get_all_memes_url) as resp:
if resp.status == 200:
log.info("updating meme list...")
_json = await resp.json()
async with aiofiles.open(MEME_DIR, "w+") as f:
await f.write(json.dumps(_json))
else:
log.info("Failed to update meme list, aborting...")
def search_meme_list(self, search_words: List[str]) -> str:
"""Checks if the input search_words matches any available meme templates."""
final_dict = {}
for meme in self.meme_dict:
name = meme["name"]
for each in meme["name"].split(" "):
# Check if any word in he search words matches in a meme name, lazy search
if any(word in each.lower() for word in search_words):
final_dict[name] = meme["box_count"]
if len(final_dict) > 0:
return "\n".join([f"Name: {x}, Text Boxes: {final_dict[x]}" for x in final_dict.keys()][:10])
| import json
import logging
from pathlib import Path
from typing import List
import aiofiles
from bot.bot import Friendo
from bot.settings import MEME_PASSWORD, MEME_USERNAME
MEME_DIR = Path.cwd() / 'bot' / 'meme_api' / 'json' / 'meme_list.json'
log = logging.getLogger(__name__)
class Meme:
"""Pulls meme templates from imgfip api and creates memes."""
def __init__(self, bot: Friendo) -> None:
self.bot = bot
self.gen_meme_url = "https://api.imgflip.com/caption_image"
self.get_all_memes_url = "https://api.imgflip.com/get_memes"
self.bot.loop.run_until_complete(self.get_all_memes())
with open(MEME_DIR, "r") as m:
self.meme_dict = json.load(m)["data"]["memes"]
self.user_name = MEME_USERNAME
self.password = MEME_PASSWORD
async def generate_meme(self, *, name: str, text: str = None) -> str:
"""Creates a meme given the name of a template."""
data = {"username": self.user_name, "password": self.password}
if text:
for meme in self.meme_dict:
if meme["name"].lower() == name.lower():
data["template_id"] = meme["id"]
if len(text) <= meme["box_count"]:
for count, each in enumerate(text):
data[f"boxes[{count}][text]"] = each
else:
return f"Too many text boxes for {meme['name']} with count {meme['box_count']}"
async with self.bot.session.post(self.gen_meme_url, data=data) as resp:
if resp.status == 200:
_json = await resp.json()
return _json["data"]["url"]
async def get_all_memes(self) -> None:
"""Gets the names of all available meme templates."""
async with self.bot.session.get(self.get_all_memes_url) as resp:
if resp.status == 200:
log.info("updating meme list...")
_json = await resp.json()
async with aiofiles.open(MEME_DIR, "w+") as f:
await f.write(json.dumps(_json))
else:
log.info("Failed to update meme list, aborting...")
def search_meme_list(self, search_words: List[str]) -> str:
"""Checks if the input search_words matches any available meme templates."""
final_dict = {}
for meme in self.meme_dict:
name = meme["name"]
for each in meme["name"].split(" "):
# Check if any word in he search words matches in a meme name, lazy search
if any(word in each.lower() for word in search_words):
final_dict[name] = meme["box_count"]
if len(final_dict) > 0:
return "\n".join([f"Name: {x}, Text Boxes: {final_dict[x]}" for x in final_dict.keys()][:10])
|
"""
Provides linkedin api-related code
"""
import random
import logging
from time import sleep
from urllib.parse import urlencode
import json
from linkedin_api.utils.helpers import get_id_from_urn
from linkedin_api.client import Client
logger = logging.getLogger(__name__)
def default_evade():
"""
A catch-all method to try and evade suspension from Linkedin.
Currenly, just delays the request by a random (bounded) time
"""
sleep(random.randint(2, 5)) # sleep a random duration to try and evade suspention
class Linkedin(object):
"""
Class for accessing Linkedin API.
"""
_MAX_UPDATE_COUNT = 100 # max seems to be 100
_MAX_SEARCH_COUNT = 49 # max seems to be 49
_MAX_REPEATED_REQUESTS = (
200 # VERY conservative max requests count to avoid rate-limit
)
def __init__(
self,
username,
password,
*,
authenticate=True,
refresh_cookies=False,
debug=False,
proxies={},
):
self.client = Client(
refresh_cookies=refresh_cookies, debug=debug, proxies=proxies
)
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
self.logger = logger
if authenticate:
self.client.authenticate(username, password)
def _fetch(self, uri, evade=default_evade, **kwargs):
"""
GET request to Linkedin API
"""
evade()
url = f"{self.client.API_BASE_URL}{uri}"
return self.client.session.get(url, **kwargs)
def _post(self, uri, evade=default_evade, **kwargs):
"""
POST request to Linkedin API
"""
evade()
url = f"{self.client.API_BASE_URL}{uri}"
return self.client.session.post(url, **kwargs)
def search(self, params, limit=None, results=[]):
"""
Do a search.
"""
count = (
limit
if limit and limit <= Linkedin._MAX_SEARCH_COUNT
else Linkedin._MAX_SEARCH_COUNT
)
default_params = {
"count": str(count),
"filters": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "all",
"start": len(results),
"queryContext": "List(spellCorrectionEnabled->true,relatedSearchesEnabled->true,kcardTypes->PROFILE|COMPANY)",
}
default_params.update(params)
res = self._fetch(
f"/search/blended?{urlencode(default_params, safe="(),")}",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
data = res.json()
new_elements = []
for i in range(len(data["data"]["elements"])):
new_elements.extend(data["data"]["elements"][i]["elements"])
# not entirely sure what extendedElements generally refers to - keyword search gives back a single job?
# new_elements.extend(data["data"]["elements"][i]["extendedElements"])
results.extend(new_elements)
results = results[
:limit
] # always trim results, no matter what the request returns
# recursive base case
if (
limit is not None
and (
len(results) >= limit # if our results exceed set limit
or len(results) / count >= Linkedin._MAX_REPEATED_REQUESTS
)
) or len(new_elements) == 0:
return results
self.logger.debug(f"results grew to {len(results)}")
return self.search(params, results=results, limit=limit)
def search_people(
self,
keywords=None,
connection_of=None,
network_depth=None,
current_company=None,
past_companies=None,
nonprofit_interests=None,
profile_languages=None,
regions=None,
industries=None,
schools=None,
title=None,
include_private_profiles=False, # profiles without a public id, "Linkedin Member"
limit=None,
):
"""
Do a people search.
"""
filters = ["resultType->PEOPLE"]
if connection_of:
filters.append(f"connectionOf->{connection_of}")
if network_depth:
filters.append(f"network->{network_depth}")
if regions:
filters.append(f'geoRegion->{'|'.join(regions)}')
if industries:
filters.append(f'industry->{'|'.join(industries)}')
if current_company:
filters.append(f'currentCompany->{'|'.join(current_company)}')
if past_companies:
filters.append(f'pastCompany->{'|'.join(past_companies)}')
if profile_languages:
filters.append(f'profileLanguage->{'|'.join(profile_languages)}')
if nonprofit_interests:
filters.append(f'nonprofitInterest->{'|'.join(nonprofit_interests)}')
if schools:
filters.append(f'schools->{'|'.join(schools)}')
if title:
filters.append(f"title->{title}")
params = {"filters": "List({})".format(",".join(filters))}
if keywords:
params["keywords"] = keywords
data = self.search(params, limit=limit)
results = []
for item in data:
if "publicIdentifier" not in item:
continue
results.append(
{
"urn_id": get_id_from_urn(item.get("targetUrn")),
"distance": item.get("memberDistance", {}).get("value"),
"public_id": item.get("publicIdentifier"),
}
)
return results
def search_companies(self, keywords=None, limit=None):
"""
Do a company search.
"""
filters = ["resultType->COMPANIES"]
params = {
"filters": "List({})".format(",".join(filters)),
"queryContext": "List(spellCorrectionEnabled->true)",
}
if keywords:
params["keywords"] = keywords
data = self.search(params, limit=limit)
results = []
for item in data:
if item.get("type") != "COMPANY":
continue
results.append(
{
"urn": item.get("targetUrn"),
"urn_id": get_id_from_urn(item.get("targetUrn")),
"name": item.get("title", {}).get("text"),
"headline": item.get("headline", {}).get("text"),
"subline": item.get("subline", {}).get("text"),
}
)
return results
def get_profile_contact_info(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
res = self._fetch(
f"/identity/profiles/{public_id or urn_id}/profileContactInfo"
)
data = res.json()
contact_info = {
"email_address": data.get("emailAddress"),
"websites": [],
"twitter": data.get("twitterHandles"),
"birthdate": data.get("birthDateOn"),
"ims": data.get("ims"),
"phone_numbers": data.get("phoneNumbers", []),
}
websites = data.get("websites", [])
for item in websites:
if "com.linkedin.voyager.identity.profile.StandardWebsite" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.StandardWebsite"
]["category"]
elif "" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.CustomWebsite"
]["label"]
del item["type"]
contact_info["websites"] = websites
return contact_info
def get_profile_skills(self, public_id=None, urn_id=None):
"""
Return the skills of a profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
params = {"count": 100, "start": 0}
res = self._fetch(
f"/identity/profiles/{public_id or urn_id}/skills", params=params
)
data = res.json()
skills = data.get("elements", [])
for item in skills:
del item["entityUrn"]
return skills
def get_profile(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
# NOTE this still works for now, but will probably eventually have to be converted to
# https://www.linkedin.com/voyager/api/identity/profiles/ACoAAAKT9JQBsH7LwKaE9Myay9WcX8OVGuDq9Uw
res = self._fetch(f"/identity/profiles/{public_id or urn_id}/profileView")
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
# massage [profile] data
profile = data["profile"]
if "miniProfile" in profile:
if "picture" in profile["miniProfile"]:
profile["displayPictureUrl"] = profile["miniProfile"]["picture"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
profile["profile_id"] = get_id_from_urn(profile["miniProfile"]["entityUrn"])
del profile["miniProfile"]
del profile["defaultLocale"]
del profile["supportedLocales"]
del profile["versionTag"]
del profile["showEducationOnProfileTopCard"]
# massage [experience] data
experience = data["positionView"]["elements"]
for item in experience:
if "company" in item and "miniCompany" in item["company"]:
if "logo" in item["company"]["miniCompany"]:
logo = item["company"]["miniCompany"]["logo"].get(
"com.linkedin.common.VectorImage"
)
if logo:
item["companyLogoUrl"] = logo["rootUrl"]
del item["company"]["miniCompany"]
profile["experience"] = experience
# massage [skills] data
# skills = [item["name"] for item in data["skillView"]["elements"]]
# profile["skills"] = skills
profile["skills"] = self.get_profile_skills(public_id=public_id, urn_id=urn_id)
# massage [education] data
education = data["educationView"]["elements"]
for item in education:
if "school" in item:
if "logo" in item["school"]:
item["school"]["logoUrl"] = item["school"]["logo"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
del item["school"]["logo"]
profile["education"] = education
# massage [languages] data
languages = data["languageView"]["elements"]
for item in languages:
del item["entityUrn"]
profile["languages"] = languages
# massage [publications] data
publications = data["publicationView"]["elements"]
for item in publications:
del item["entityUrn"]
for author in item.get("authors", []):
del author["entityUrn"]
profile["publications"] = publications
# massage [certifications] data
certifications = data["certificationView"]["elements"]
for item in certifications:
del item["entityUrn"]
profile["certifications"] = certifications
# massage [volunteer] data
volunteer = data["volunteerExperienceView"]["elements"]
for item in volunteer:
del item["entityUrn"]
profile["volunteer"] = volunteer
# massage [honors] data
honors = data["honorView"]["elements"]
for item in honors:
del item["entityUrn"]
profile["honors"] = honors
return profile
def get_profile_connections(self, urn_id):
"""
Return a list of profile ids connected to profile of given [urn_id]
"""
return self.search_people(connection_of=urn_id, network_depth="F")
def get_company_updates(
self, public_id=None, urn_id=None, max_results=None, results=[]
):
""""
Return a list of company posts
[public_id] - public identifier ie - microsoft
[urn_id] - id provided by the related URN
"""
params = {
"companyUniversalName": {public_id or urn_id},
"q": "companyFeedByUniversalName",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self._fetch(f"/feed/updates", params=params)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (
max_results is not None
and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS
)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_company_updates(
public_id=public_id, urn_id=urn_id, results=results, max_results=max_results
)
def get_profile_updates(
self, public_id=None, urn_id=None, max_results=None, results=[]
):
""""
Return a list of profile posts
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
params = {
"profileId": {public_id or urn_id},
"q": "memberShareFeed",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self._fetch(f"/feed/updates", params=params)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (
max_results is not None
and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS
)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_profile_updates(
public_id=public_id, urn_id=urn_id, results=results, max_results=max_results
)
def get_current_profile_views(self):
"""
Get profile view statistics, including chart data.
"""
res = self._fetch(f"/identity/wvmpCards")
data = res.json()
return data["elements"][0]["value"][
"com.linkedin.voyager.identity.me.wvmpOverview.WvmpViewersCard"
]["insightCards"][0]["value"][
"com.linkedin.voyager.identity.me.wvmpOverview.WvmpSummaryInsightCard"
][
"numViews"
]
def get_school(self, public_id):
"""
Return data for a single school.
[public_id] - public identifier i.e. uq
"""
params = {
"decorationId": "com.linkedin.voyager.deco.organization.web.WebFullCompanyMain-12",
"q": "universalName",
"universalName": public_id,
}
res = self._fetch(f"/organization/companies?{urlencode(params)}")
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data))
return {}
school = data["elements"][0]
return school
def get_company(self, public_id):
"""
Return data for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
params = {
"decorationId": "com.linkedin.voyager.deco.organization.web.WebFullCompanyMain-12",
"q": "universalName",
"universalName": public_id,
}
res = self._fetch(f"/organization/companies", params=params)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
company = data["elements"][0]
return company
def get_conversation_details(self, profile_urn_id):
"""
Return the conversation (or "message thread") details for a given [public_profile_id]
"""
# passing `params` doesn't work properly, think it's to do with List().
# Might be a bug in `requests`?
res = self._fetch(
f"/messaging/conversations?\
keyVersion=LEGACY_INBOX&q=participants&recipients=List({profile_urn_id})"
)
data = res.json()
item = data["elements"][0]
item["id"] = get_id_from_urn(item["entityUrn"])
return item
def get_conversations(self):
"""
Return list of conversations the user is in.
"""
params = {"keyVersion": "LEGACY_INBOX"}
res = self._fetch(f"/messaging/conversations", params=params)
return res.json()
def get_conversation(self, conversation_urn_id):
"""
Return the full conversation at a given [conversation_urn_id]
"""
res = self._fetch(f"/messaging/conversations/{conversation_urn_id}/events")
return res.json()
def send_message(self, conversation_urn_id=None, recipients=[], message_body=None):
"""
Send a message to a given conversation. If error, return true.
Recipients: List of profile urn id's
"""
params = {"action": "create"}
if not (conversation_urn_id or recipients) and not message_body:
return True
message_event = {
"eventCreate": {
"value": {
"com.linkedin.voyager.messaging.create.MessageCreate": {
"body": message_body,
"attachments": [],
"attributedBody": {"text": message_body, "attributes": []},
"mediaAttachments": [],
}
}
}
}
if conversation_urn_id and not recipients:
res = self._post(
f"/messaging/conversations/{conversation_urn_id}/events",
params=params,
data=json.dumps(message_event),
)
elif recipients and not conversation_urn_id:
message_event["recipients"] = recipients
message_event["subtype"] = "MEMBER_TO_MEMBER"
payload = {
"keyVersion": "LEGACY_INBOX",
"conversationCreate": message_event,
}
res = self._post(
f"/messaging/conversations", params=params, data=json.dumps(payload)
)
return res.status_code != 201
def mark_conversation_as_seen(self, conversation_urn_id):
"""
Send seen to a given conversation. If error, return True.
"""
payload = json.dumps({"patch": {"$set": {"read": True}}})
res = self._post(
f"/messaging/conversations/{conversation_urn_id}", data=payload
)
return res.status_code != 200
def get_user_profile(self):
""""
Return current user profile
"""
sleep(
random.randint(0, 1)
) # sleep a random duration to try and evade suspention
res = self._fetch(f"/me")
data = res.json()
return data
def get_invitations(self, start=0, limit=3):
"""
Return list of new invites
"""
params = {
"start": start,
"count": limit,
"includeInsights": True,
"q": "receivedInvitation",
}
res = self._fetch(
f"{self.client.API_BASE_URL}/relationships/invitationViews", params=params
)
if res.status_code != 200:
return []
response_payload = res.json()
return [element["invitation"] for element in response_payload["elements"]]
def reply_invitation(
self, invitation_entity_urn, invitation_shared_secret, action="accept"
):
"""
Reply to an invite, the default is to accept the invitation.
@Param: invitation_entity_urn: str
@Param: invitation_shared_secret: str
@Param: action: "accept" or "ignore"
Returns True if sucess, False otherwise
"""
invitation_id = get_id_from_urn(invitation_entity_urn)
params = {"action": action}
payload = json.dumps(
{
"invitationId": invitation_id,
"invitationSharedSecret": invitation_shared_secret,
"isGenericInvitation": False,
}
)
res = self._post(
f"{self.client.API_BASE_URL}/relationships/invitations/{invitation_id}",
params=params,
data=payload,
)
return res.status_code == 200
# def add_connection(self, profile_urn_id):
# payload = {
# "emberEntityName": "growth/invitation/norm-invitation",
# "invitee": {
# "com.linkedin.voyager.growth.invitation.InviteeProfile": {
# "profileId": profile_urn_id
# }
# },
# }
# print(payload)
# res = self._post(
# "/growth/normInvitations",
# data=payload,
# headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
# )
# return res.status_code != 201
def remove_connection(self, public_profile_id):
res = self._post(
f"/identity/profiles/{public_profile_id}/profileActions?action=disconnect",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
return res.status_code != 200
# TODO doesn't work
# def view_profile(self, public_profile_id):
# res = self._fetch(
# f"/identity/profiles/{public_profile_id}/profileView",
# headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
# )
# return res.status_code != 200
def get_profile_privacy_settings(self, public_profile_id):
res = self._fetch(
f"/identity/profiles/{public_profile_id}/privacySettings",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
if res.status_code != 200:
return {}
data = res.json()
return data.get("data", {})
def get_profile_member_badges(self, public_profile_id):
res = self._fetch(
f"/identity/profiles/{public_profile_id}/memberBadges",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
if res.status_code != 200:
return {}
data = res.json()
return data.get("data", {})
def get_profile_network_info(self, public_profile_id):
res = self._fetch(
f"/identity/profiles/{public_profile_id}/networkinfo",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
if res.status_code != 200:
return {}
data = res.json()
return data.get("data", {})
| """
Provides linkedin api-related code
"""
import random
import logging
from time import sleep
from urllib.parse import urlencode
import json
from linkedin_api.utils.helpers import get_id_from_urn
from linkedin_api.client import Client
logger = logging.getLogger(__name__)
def default_evade():
"""
A catch-all method to try and evade suspension from Linkedin.
Currenly, just delays the request by a random (bounded) time
"""
sleep(random.randint(2, 5)) # sleep a random duration to try and evade suspention
class Linkedin(object):
"""
Class for accessing Linkedin API.
"""
_MAX_UPDATE_COUNT = 100 # max seems to be 100
_MAX_SEARCH_COUNT = 49 # max seems to be 49
_MAX_REPEATED_REQUESTS = (
200 # VERY conservative max requests count to avoid rate-limit
)
def __init__(
self,
username,
password,
*,
authenticate=True,
refresh_cookies=False,
debug=False,
proxies={},
):
self.client = Client(
refresh_cookies=refresh_cookies, debug=debug, proxies=proxies
)
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
self.logger = logger
if authenticate:
self.client.authenticate(username, password)
def _fetch(self, uri, evade=default_evade, **kwargs):
"""
GET request to Linkedin API
"""
evade()
url = f"{self.client.API_BASE_URL}{uri}"
return self.client.session.get(url, **kwargs)
def _post(self, uri, evade=default_evade, **kwargs):
"""
POST request to Linkedin API
"""
evade()
url = f"{self.client.API_BASE_URL}{uri}"
return self.client.session.post(url, **kwargs)
def search(self, params, limit=None, results=[]):
"""
Do a search.
"""
count = (
limit
if limit and limit <= Linkedin._MAX_SEARCH_COUNT
else Linkedin._MAX_SEARCH_COUNT
)
default_params = {
"count": str(count),
"filters": "List()",
"origin": "GLOBAL_SEARCH_HEADER",
"q": "all",
"start": len(results),
"queryContext": "List(spellCorrectionEnabled->true,relatedSearchesEnabled->true,kcardTypes->PROFILE|COMPANY)",
}
default_params.update(params)
res = self._fetch(
f"/search/blended?{urlencode(default_params, safe='(),')}",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
data = res.json()
new_elements = []
for i in range(len(data["data"]["elements"])):
new_elements.extend(data["data"]["elements"][i]["elements"])
# not entirely sure what extendedElements generally refers to - keyword search gives back a single job?
# new_elements.extend(data["data"]["elements"][i]["extendedElements"])
results.extend(new_elements)
results = results[
:limit
] # always trim results, no matter what the request returns
# recursive base case
if (
limit is not None
and (
len(results) >= limit # if our results exceed set limit
or len(results) / count >= Linkedin._MAX_REPEATED_REQUESTS
)
) or len(new_elements) == 0:
return results
self.logger.debug(f"results grew to {len(results)}")
return self.search(params, results=results, limit=limit)
def search_people(
self,
keywords=None,
connection_of=None,
network_depth=None,
current_company=None,
past_companies=None,
nonprofit_interests=None,
profile_languages=None,
regions=None,
industries=None,
schools=None,
title=None,
include_private_profiles=False, # profiles without a public id, "Linkedin Member"
limit=None,
):
"""
Do a people search.
"""
filters = ["resultType->PEOPLE"]
if connection_of:
filters.append(f"connectionOf->{connection_of}")
if network_depth:
filters.append(f"network->{network_depth}")
if regions:
filters.append(f'geoRegion->{"|".join(regions)}')
if industries:
filters.append(f'industry->{"|".join(industries)}')
if current_company:
filters.append(f'currentCompany->{"|".join(current_company)}')
if past_companies:
filters.append(f'pastCompany->{"|".join(past_companies)}')
if profile_languages:
filters.append(f'profileLanguage->{"|".join(profile_languages)}')
if nonprofit_interests:
filters.append(f'nonprofitInterest->{"|".join(nonprofit_interests)}')
if schools:
filters.append(f'schools->{"|".join(schools)}')
if title:
filters.append(f"title->{title}")
params = {"filters": "List({})".format(",".join(filters))}
if keywords:
params["keywords"] = keywords
data = self.search(params, limit=limit)
results = []
for item in data:
if "publicIdentifier" not in item:
continue
results.append(
{
"urn_id": get_id_from_urn(item.get("targetUrn")),
"distance": item.get("memberDistance", {}).get("value"),
"public_id": item.get("publicIdentifier"),
}
)
return results
def search_companies(self, keywords=None, limit=None):
"""
Do a company search.
"""
filters = ["resultType->COMPANIES"]
params = {
"filters": "List({})".format(",".join(filters)),
"queryContext": "List(spellCorrectionEnabled->true)",
}
if keywords:
params["keywords"] = keywords
data = self.search(params, limit=limit)
results = []
for item in data:
if item.get("type") != "COMPANY":
continue
results.append(
{
"urn": item.get("targetUrn"),
"urn_id": get_id_from_urn(item.get("targetUrn")),
"name": item.get("title", {}).get("text"),
"headline": item.get("headline", {}).get("text"),
"subline": item.get("subline", {}).get("text"),
}
)
return results
def get_profile_contact_info(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
res = self._fetch(
f"/identity/profiles/{public_id or urn_id}/profileContactInfo"
)
data = res.json()
contact_info = {
"email_address": data.get("emailAddress"),
"websites": [],
"twitter": data.get("twitterHandles"),
"birthdate": data.get("birthDateOn"),
"ims": data.get("ims"),
"phone_numbers": data.get("phoneNumbers", []),
}
websites = data.get("websites", [])
for item in websites:
if "com.linkedin.voyager.identity.profile.StandardWebsite" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.StandardWebsite"
]["category"]
elif "" in item["type"]:
item["label"] = item["type"][
"com.linkedin.voyager.identity.profile.CustomWebsite"
]["label"]
del item["type"]
contact_info["websites"] = websites
return contact_info
def get_profile_skills(self, public_id=None, urn_id=None):
"""
Return the skills of a profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
params = {"count": 100, "start": 0}
res = self._fetch(
f"/identity/profiles/{public_id or urn_id}/skills", params=params
)
data = res.json()
skills = data.get("elements", [])
for item in skills:
del item["entityUrn"]
return skills
def get_profile(self, public_id=None, urn_id=None):
"""
Return data for a single profile.
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
# NOTE this still works for now, but will probably eventually have to be converted to
# https://www.linkedin.com/voyager/api/identity/profiles/ACoAAAKT9JQBsH7LwKaE9Myay9WcX8OVGuDq9Uw
res = self._fetch(f"/identity/profiles/{public_id or urn_id}/profileView")
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
# massage [profile] data
profile = data["profile"]
if "miniProfile" in profile:
if "picture" in profile["miniProfile"]:
profile["displayPictureUrl"] = profile["miniProfile"]["picture"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
profile["profile_id"] = get_id_from_urn(profile["miniProfile"]["entityUrn"])
del profile["miniProfile"]
del profile["defaultLocale"]
del profile["supportedLocales"]
del profile["versionTag"]
del profile["showEducationOnProfileTopCard"]
# massage [experience] data
experience = data["positionView"]["elements"]
for item in experience:
if "company" in item and "miniCompany" in item["company"]:
if "logo" in item["company"]["miniCompany"]:
logo = item["company"]["miniCompany"]["logo"].get(
"com.linkedin.common.VectorImage"
)
if logo:
item["companyLogoUrl"] = logo["rootUrl"]
del item["company"]["miniCompany"]
profile["experience"] = experience
# massage [skills] data
# skills = [item["name"] for item in data["skillView"]["elements"]]
# profile["skills"] = skills
profile["skills"] = self.get_profile_skills(public_id=public_id, urn_id=urn_id)
# massage [education] data
education = data["educationView"]["elements"]
for item in education:
if "school" in item:
if "logo" in item["school"]:
item["school"]["logoUrl"] = item["school"]["logo"][
"com.linkedin.common.VectorImage"
]["rootUrl"]
del item["school"]["logo"]
profile["education"] = education
# massage [languages] data
languages = data["languageView"]["elements"]
for item in languages:
del item["entityUrn"]
profile["languages"] = languages
# massage [publications] data
publications = data["publicationView"]["elements"]
for item in publications:
del item["entityUrn"]
for author in item.get("authors", []):
del author["entityUrn"]
profile["publications"] = publications
# massage [certifications] data
certifications = data["certificationView"]["elements"]
for item in certifications:
del item["entityUrn"]
profile["certifications"] = certifications
# massage [volunteer] data
volunteer = data["volunteerExperienceView"]["elements"]
for item in volunteer:
del item["entityUrn"]
profile["volunteer"] = volunteer
# massage [honors] data
honors = data["honorView"]["elements"]
for item in honors:
del item["entityUrn"]
profile["honors"] = honors
return profile
def get_profile_connections(self, urn_id):
"""
Return a list of profile ids connected to profile of given [urn_id]
"""
return self.search_people(connection_of=urn_id, network_depth="F")
def get_company_updates(
self, public_id=None, urn_id=None, max_results=None, results=[]
):
""""
Return a list of company posts
[public_id] - public identifier ie - microsoft
[urn_id] - id provided by the related URN
"""
params = {
"companyUniversalName": {public_id or urn_id},
"q": "companyFeedByUniversalName",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self._fetch(f"/feed/updates", params=params)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (
max_results is not None
and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS
)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_company_updates(
public_id=public_id, urn_id=urn_id, results=results, max_results=max_results
)
def get_profile_updates(
self, public_id=None, urn_id=None, max_results=None, results=[]
):
""""
Return a list of profile posts
[public_id] - public identifier i.e. tom-quirk-1928345
[urn_id] - id provided by the related URN
"""
params = {
"profileId": {public_id or urn_id},
"q": "memberShareFeed",
"moduleKey": "member-share",
"count": Linkedin._MAX_UPDATE_COUNT,
"start": len(results),
}
res = self._fetch(f"/feed/updates", params=params)
data = res.json()
if (
len(data["elements"]) == 0
or (max_results is not None and len(results) >= max_results)
or (
max_results is not None
and len(results) / max_results >= Linkedin._MAX_REPEATED_REQUESTS
)
):
return results
results.extend(data["elements"])
self.logger.debug(f"results grew: {len(results)}")
return self.get_profile_updates(
public_id=public_id, urn_id=urn_id, results=results, max_results=max_results
)
def get_current_profile_views(self):
"""
Get profile view statistics, including chart data.
"""
res = self._fetch(f"/identity/wvmpCards")
data = res.json()
return data["elements"][0]["value"][
"com.linkedin.voyager.identity.me.wvmpOverview.WvmpViewersCard"
]["insightCards"][0]["value"][
"com.linkedin.voyager.identity.me.wvmpOverview.WvmpSummaryInsightCard"
][
"numViews"
]
def get_school(self, public_id):
"""
Return data for a single school.
[public_id] - public identifier i.e. uq
"""
params = {
"decorationId": "com.linkedin.voyager.deco.organization.web.WebFullCompanyMain-12",
"q": "universalName",
"universalName": public_id,
}
res = self._fetch(f"/organization/companies?{urlencode(params)}")
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data))
return {}
school = data["elements"][0]
return school
def get_company(self, public_id):
"""
Return data for a single company.
[public_id] - public identifier i.e. univeristy-of-queensland
"""
params = {
"decorationId": "com.linkedin.voyager.deco.organization.web.WebFullCompanyMain-12",
"q": "universalName",
"universalName": public_id,
}
res = self._fetch(f"/organization/companies", params=params)
data = res.json()
if data and "status" in data and data["status"] != 200:
self.logger.info("request failed: {}".format(data["message"]))
return {}
company = data["elements"][0]
return company
def get_conversation_details(self, profile_urn_id):
"""
Return the conversation (or "message thread") details for a given [public_profile_id]
"""
# passing `params` doesn't work properly, think it's to do with List().
# Might be a bug in `requests`?
res = self._fetch(
f"/messaging/conversations?\
keyVersion=LEGACY_INBOX&q=participants&recipients=List({profile_urn_id})"
)
data = res.json()
item = data["elements"][0]
item["id"] = get_id_from_urn(item["entityUrn"])
return item
def get_conversations(self):
"""
Return list of conversations the user is in.
"""
params = {"keyVersion": "LEGACY_INBOX"}
res = self._fetch(f"/messaging/conversations", params=params)
return res.json()
def get_conversation(self, conversation_urn_id):
"""
Return the full conversation at a given [conversation_urn_id]
"""
res = self._fetch(f"/messaging/conversations/{conversation_urn_id}/events")
return res.json()
def send_message(self, conversation_urn_id=None, recipients=[], message_body=None):
"""
Send a message to a given conversation. If error, return true.
Recipients: List of profile urn id's
"""
params = {"action": "create"}
if not (conversation_urn_id or recipients) and not message_body:
return True
message_event = {
"eventCreate": {
"value": {
"com.linkedin.voyager.messaging.create.MessageCreate": {
"body": message_body,
"attachments": [],
"attributedBody": {"text": message_body, "attributes": []},
"mediaAttachments": [],
}
}
}
}
if conversation_urn_id and not recipients:
res = self._post(
f"/messaging/conversations/{conversation_urn_id}/events",
params=params,
data=json.dumps(message_event),
)
elif recipients and not conversation_urn_id:
message_event["recipients"] = recipients
message_event["subtype"] = "MEMBER_TO_MEMBER"
payload = {
"keyVersion": "LEGACY_INBOX",
"conversationCreate": message_event,
}
res = self._post(
f"/messaging/conversations", params=params, data=json.dumps(payload)
)
return res.status_code != 201
def mark_conversation_as_seen(self, conversation_urn_id):
"""
Send seen to a given conversation. If error, return True.
"""
payload = json.dumps({"patch": {"$set": {"read": True}}})
res = self._post(
f"/messaging/conversations/{conversation_urn_id}", data=payload
)
return res.status_code != 200
def get_user_profile(self):
""""
Return current user profile
"""
sleep(
random.randint(0, 1)
) # sleep a random duration to try and evade suspention
res = self._fetch(f"/me")
data = res.json()
return data
def get_invitations(self, start=0, limit=3):
"""
Return list of new invites
"""
params = {
"start": start,
"count": limit,
"includeInsights": True,
"q": "receivedInvitation",
}
res = self._fetch(
f"{self.client.API_BASE_URL}/relationships/invitationViews", params=params
)
if res.status_code != 200:
return []
response_payload = res.json()
return [element["invitation"] for element in response_payload["elements"]]
def reply_invitation(
self, invitation_entity_urn, invitation_shared_secret, action="accept"
):
"""
Reply to an invite, the default is to accept the invitation.
@Param: invitation_entity_urn: str
@Param: invitation_shared_secret: str
@Param: action: "accept" or "ignore"
Returns True if sucess, False otherwise
"""
invitation_id = get_id_from_urn(invitation_entity_urn)
params = {"action": action}
payload = json.dumps(
{
"invitationId": invitation_id,
"invitationSharedSecret": invitation_shared_secret,
"isGenericInvitation": False,
}
)
res = self._post(
f"{self.client.API_BASE_URL}/relationships/invitations/{invitation_id}",
params=params,
data=payload,
)
return res.status_code == 200
# def add_connection(self, profile_urn_id):
# payload = {
# "emberEntityName": "growth/invitation/norm-invitation",
# "invitee": {
# "com.linkedin.voyager.growth.invitation.InviteeProfile": {
# "profileId": profile_urn_id
# }
# },
# }
# print(payload)
# res = self._post(
# "/growth/normInvitations",
# data=payload,
# headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
# )
# return res.status_code != 201
def remove_connection(self, public_profile_id):
res = self._post(
f"/identity/profiles/{public_profile_id}/profileActions?action=disconnect",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
return res.status_code != 200
# TODO doesn't work
# def view_profile(self, public_profile_id):
# res = self._fetch(
# f"/identity/profiles/{public_profile_id}/profileView",
# headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
# )
# return res.status_code != 200
def get_profile_privacy_settings(self, public_profile_id):
res = self._fetch(
f"/identity/profiles/{public_profile_id}/privacySettings",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
if res.status_code != 200:
return {}
data = res.json()
return data.get("data", {})
def get_profile_member_badges(self, public_profile_id):
res = self._fetch(
f"/identity/profiles/{public_profile_id}/memberBadges",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
if res.status_code != 200:
return {}
data = res.json()
return data.get("data", {})
def get_profile_network_info(self, public_profile_id):
res = self._fetch(
f"/identity/profiles/{public_profile_id}/networkinfo",
headers={"accept": "application/vnd.linkedin.normalized+json+2.1"},
)
if res.status_code != 200:
return {}
data = res.json()
return data.get("data", {})
|
import uuid
import time
import ipaddress
from .. import context
from lyrebird import utils
from lyrebird import application
from lyrebird.log import get_logger
from lyrebird.mock.blueprints.apis.bandwidth import config
from urllib.parse import urlparse, unquote
from .http_data_helper import DataHelper
from .http_header_helper import HeadersHelper
logger = get_logger()
class HandlerContext:
"""
请求处理器上下文变量
用于保存一个请求处理过程中的request, response
"""
MOCK_PATH_PREFIX = '/mock'
def __init__(self, request):
self.id = str(uuid.uuid4())
self.request = request
self.response = None
self.client_req_time = None
self.client_resp_time = None
self.server_req_time = None
self.server_resp_time = None
self.flow = dict(
id=self.id,
size=0,
duration=0,
start_time=time.time(),
request={},
response={}
)
self.client_address = None
self.is_request_edited = False
self.is_response_edited = False
self.response_source = ''
self.is_proxiable = True
self.response_chunk_size = 2048
self._parse_request()
def _parse_request(self):
# Read stream
self.request.get_data()
# parse path
request_info = self._read_origin_request_info_from_url()
if not request_info['host']:
request_info_from_header = self._read_origin_request_info_from_header()
if len(request_info_from_header) > 0:
request_info = request_info_from_header
headers = HeadersHelper.origin2flow(self.request)
_request = dict(
headers=headers,
method=self.request.method,
query=self.request.args,
timestamp=round(time.time(), 3)
)
_request.update(request_info)
# handle request data
if self.request.method in ['POST', 'PUT']:
DataHelper.origin2flow(self.request, output=_request)
if self.request.headers.get('Lyrebird-Client-Address'):
self.client_address = self.request.headers.get('Lyrebird-Client-Address')
else:
self.client_address = self.request.remote_addr
self.flow['client_address'] = self.client_address
self.flow['request'] = _request
context.application.cache.add(self.flow)
logger.debug(f'[On client request] {self.flow['request']['url']}')
def _read_origin_request_info_from_url(self):
url_prefix = '/'+self.request.blueprint+'/'
raw_url = self.request.path[len(url_prefix):]
if self.request.query_string:
raw_url += '?' + self.request.query_string.decode()
parsed_path = urlparse(raw_url)
# urllib.unquote : fix bug - url contains ',' will be auto encoded by flask, that cause proxy not work.
# e.g /1.2,3 -> 1.2%2C3
_request = dict(
url=raw_url,
scheme=parsed_path.scheme,
host=parsed_path.hostname,
port=parsed_path.port if parsed_path.port else '80',
path=unquote(parsed_path.path)
)
return _request
def _read_origin_request_info_from_header(self):
proxy_headers = application.config['mock.proxy_headers']
scheme = self.request.headers.get(proxy_headers['scheme'], default='http')
host = self.request.headers.get(proxy_headers['host'])
port = self.request.headers.get(proxy_headers['port'], default='80')
if not host:
return {}
scheme = scheme.strip()
host = host.strip()
port = port.strip()
# if host is IP address then full_host=host:port
# else if is a domain the full_host=host
full_host = host
try:
ipaddress.ip_address(host)
full_host = host + ':' + port
except Exception:
pass
return dict(
url=scheme+'://'+full_host+self.request.full_path[len(self.MOCK_PATH_PREFIX):],
scheme=scheme,
host=host,
port=port,
path=self.request.path[len(self.MOCK_PATH_PREFIX):]
)
def set_request_edited(self):
self.is_request_edited = True
def set_response_edited(self):
self.is_response_edited = True
def set_response_source_mock(self):
self.response_source = 'mock'
def set_response_source_proxy(self):
self.response_source = 'proxy'
def get_request_body(self):
if self.is_request_edited:
self.flow['request']['headers'] = HeadersHelper.flow2origin(self.flow['request'])
_data = DataHelper.flow2origin(self.flow['request'])
else:
_data = self.request.data or self.request.form or None
return _data
def get_request_headers(self):
if self.is_request_edited:
self.flow['request']['headers'] = HeadersHelper.flow2origin(self.flow['request'])
headers = {}
unproxy_headers = application.config.get('proxy.ignored_headers', {})
for name, value in self.flow['request']['headers'].items():
if not value or name in ['Cache-Control', 'Host']:
continue
if name in unproxy_headers and unproxy_headers[name] in value:
continue
headers[name] = value
return headers
def get_response_generator(self):
if self.is_response_edited:
self.flow['response']['headers'] = HeadersHelper.flow2origin(self.flow['response'])
_generator = self._generator_bytes()
else:
_generator = self._generator_stream()
return _generator
def _generator_bytes(self):
def generator():
try:
_resp_data = DataHelper.flow2origin(self.flow['response']) or ''
length = len(_resp_data)
size = self.response_chunk_size
bandwidth = config.bandwidth
if bandwidth > 0:
sleep_time = self.response_chunk_size / (bandwidth * 1024)
else:
sleep_time = 0
for i in range(int(length/size) + 1):
time.sleep(sleep_time)
self.server_resp_time = time.time()
yield _resp_data[ i * size : (i+1) * size ]
finally:
self.update_client_resp_time()
return generator
def _generator_stream(self):
def generator():
upstream = self.response
try:
bandwidth = config.bandwidth
if bandwidth > 0:
sleep_time = self.response_chunk_size / (bandwidth * 1024)
else:
sleep_time = 0
buffer = []
for item in upstream.response:
buffer.append(item)
time.sleep(sleep_time)
self.server_resp_time = time.time()
yield item
finally:
self.response.data = b''.join(buffer)
DataHelper.origin2flow(self.response, output=self.flow['response'])
self.update_client_resp_time()
upstream.close()
return generator
def update_response_headers_code2flow(self, output_key='response'):
self.flow[output_key] = {
'code': self.response.status_code,
'timestamp': round(time.time(), 3)
}
HeadersHelper.origin2flow(self.response, output=self.flow[output_key])
def update_response_data2flow(self, output_key='response'):
DataHelper.origin2flow(self.response, output=self.flow[output_key])
def update_client_req_time(self):
self.client_req_time = time.time()
# 消息总线 客户端请求事件,启用此事件
method = self.flow['request']['method']
url = self.flow['request']['url']
_flow_client_req = {}
for key, value in self.flow.items():
_flow_client_req[key] = value
context.application.event_bus.publish(
'flow.request',
dict(
flow=_flow_client_req,
message=f"URL: {url}\nMethod: {method}\n"
)
)
def update_client_resp_time(self):
self.client_resp_time = time.time()
# 消息总线 客户端响应事件,启用此事件
resp_data = self.flow['response'].get('data', '')
if isinstance(resp_data, str):
self.flow['size'] = len(resp_data.encode())
else:
self.flow['size'] = len(resp_data)
self.flow['duration'] = self.server_resp_time - self.client_req_time
method = self.flow['request']['method']
url = self.flow['request']['url']
code = self.flow['response']['code']
duration = utils.convert_time(self.flow['duration'])
size = utils.convert_size(self.flow['size'])
# Import decoder for decoding the requested content
decode_flow = {}
application.encoders_decoders.decoder_handler(self.flow, output=decode_flow)
context.application.event_bus.publish(
'flow',
dict(
flow=decode_flow,
message=f"URL: {url}\nMethod: {method}\nStatusCode: {code}\nDuration: {duration}\nSize: {size}"
)
)
if context.application.work_mode == context.Mode.RECORD:
dm = context.application.data_manager
dm.save_data(self.flow)
def update_server_req_time(self):
self.server_req_time = time.time()
# 消息总线 向服务端转发请求事件,暂不使用
# context.application.event_bus.publish('flow',
# dict(name='server.request',
# time=self.server_req_time,
# id=self.id,
# flow=self.flow))
def update_server_resp_time(self):
self.server_resp_time = time.time()
# 消息总线 服务端响应请求事件,暂不使用
# context.application.event_bus.publish('flow',
# dict(name='server.response',
# time=self.server_resp_time,
# id=self.id,
# flow=self.flow))
def add_flow_action(self, action):
if self.flow.get('action'):
self.flow['action'].append(action)
else:
self.flow['action'] = [action]
| import uuid
import time
import ipaddress
from .. import context
from lyrebird import utils
from lyrebird import application
from lyrebird.log import get_logger
from lyrebird.mock.blueprints.apis.bandwidth import config
from urllib.parse import urlparse, unquote
from .http_data_helper import DataHelper
from .http_header_helper import HeadersHelper
logger = get_logger()
class HandlerContext:
"""
请求处理器上下文变量
用于保存一个请求处理过程中的request, response
"""
MOCK_PATH_PREFIX = '/mock'
def __init__(self, request):
self.id = str(uuid.uuid4())
self.request = request
self.response = None
self.client_req_time = None
self.client_resp_time = None
self.server_req_time = None
self.server_resp_time = None
self.flow = dict(
id=self.id,
size=0,
duration=0,
start_time=time.time(),
request={},
response={}
)
self.client_address = None
self.is_request_edited = False
self.is_response_edited = False
self.response_source = ''
self.is_proxiable = True
self.response_chunk_size = 2048
self._parse_request()
def _parse_request(self):
# Read stream
self.request.get_data()
# parse path
request_info = self._read_origin_request_info_from_url()
if not request_info['host']:
request_info_from_header = self._read_origin_request_info_from_header()
if len(request_info_from_header) > 0:
request_info = request_info_from_header
headers = HeadersHelper.origin2flow(self.request)
_request = dict(
headers=headers,
method=self.request.method,
query=self.request.args,
timestamp=round(time.time(), 3)
)
_request.update(request_info)
# handle request data
if self.request.method in ['POST', 'PUT']:
DataHelper.origin2flow(self.request, output=_request)
if self.request.headers.get('Lyrebird-Client-Address'):
self.client_address = self.request.headers.get('Lyrebird-Client-Address')
else:
self.client_address = self.request.remote_addr
self.flow['client_address'] = self.client_address
self.flow['request'] = _request
context.application.cache.add(self.flow)
logger.debug(f'[On client request] {self.flow["request"]["url"]}')
def _read_origin_request_info_from_url(self):
url_prefix = '/'+self.request.blueprint+'/'
raw_url = self.request.path[len(url_prefix):]
if self.request.query_string:
raw_url += '?' + self.request.query_string.decode()
parsed_path = urlparse(raw_url)
# urllib.unquote : fix bug - url contains ',' will be auto encoded by flask, that cause proxy not work.
# e.g /1.2,3 -> 1.2%2C3
_request = dict(
url=raw_url,
scheme=parsed_path.scheme,
host=parsed_path.hostname,
port=parsed_path.port if parsed_path.port else '80',
path=unquote(parsed_path.path)
)
return _request
def _read_origin_request_info_from_header(self):
proxy_headers = application.config['mock.proxy_headers']
scheme = self.request.headers.get(proxy_headers['scheme'], default='http')
host = self.request.headers.get(proxy_headers['host'])
port = self.request.headers.get(proxy_headers['port'], default='80')
if not host:
return {}
scheme = scheme.strip()
host = host.strip()
port = port.strip()
# if host is IP address then full_host=host:port
# else if is a domain the full_host=host
full_host = host
try:
ipaddress.ip_address(host)
full_host = host + ':' + port
except Exception:
pass
return dict(
url=scheme+'://'+full_host+self.request.full_path[len(self.MOCK_PATH_PREFIX):],
scheme=scheme,
host=host,
port=port,
path=self.request.path[len(self.MOCK_PATH_PREFIX):]
)
def set_request_edited(self):
self.is_request_edited = True
def set_response_edited(self):
self.is_response_edited = True
def set_response_source_mock(self):
self.response_source = 'mock'
def set_response_source_proxy(self):
self.response_source = 'proxy'
def get_request_body(self):
if self.is_request_edited:
self.flow['request']['headers'] = HeadersHelper.flow2origin(self.flow['request'])
_data = DataHelper.flow2origin(self.flow['request'])
else:
_data = self.request.data or self.request.form or None
return _data
def get_request_headers(self):
if self.is_request_edited:
self.flow['request']['headers'] = HeadersHelper.flow2origin(self.flow['request'])
headers = {}
unproxy_headers = application.config.get('proxy.ignored_headers', {})
for name, value in self.flow['request']['headers'].items():
if not value or name in ['Cache-Control', 'Host']:
continue
if name in unproxy_headers and unproxy_headers[name] in value:
continue
headers[name] = value
return headers
def get_response_generator(self):
if self.is_response_edited:
self.flow['response']['headers'] = HeadersHelper.flow2origin(self.flow['response'])
_generator = self._generator_bytes()
else:
_generator = self._generator_stream()
return _generator
def _generator_bytes(self):
def generator():
try:
_resp_data = DataHelper.flow2origin(self.flow['response']) or ''
length = len(_resp_data)
size = self.response_chunk_size
bandwidth = config.bandwidth
if bandwidth > 0:
sleep_time = self.response_chunk_size / (bandwidth * 1024)
else:
sleep_time = 0
for i in range(int(length/size) + 1):
time.sleep(sleep_time)
self.server_resp_time = time.time()
yield _resp_data[ i * size : (i+1) * size ]
finally:
self.update_client_resp_time()
return generator
def _generator_stream(self):
def generator():
upstream = self.response
try:
bandwidth = config.bandwidth
if bandwidth > 0:
sleep_time = self.response_chunk_size / (bandwidth * 1024)
else:
sleep_time = 0
buffer = []
for item in upstream.response:
buffer.append(item)
time.sleep(sleep_time)
self.server_resp_time = time.time()
yield item
finally:
self.response.data = b''.join(buffer)
DataHelper.origin2flow(self.response, output=self.flow['response'])
self.update_client_resp_time()
upstream.close()
return generator
def update_response_headers_code2flow(self, output_key='response'):
self.flow[output_key] = {
'code': self.response.status_code,
'timestamp': round(time.time(), 3)
}
HeadersHelper.origin2flow(self.response, output=self.flow[output_key])
def update_response_data2flow(self, output_key='response'):
DataHelper.origin2flow(self.response, output=self.flow[output_key])
def update_client_req_time(self):
self.client_req_time = time.time()
# 消息总线 客户端请求事件,启用此事件
method = self.flow['request']['method']
url = self.flow['request']['url']
_flow_client_req = {}
for key, value in self.flow.items():
_flow_client_req[key] = value
context.application.event_bus.publish(
'flow.request',
dict(
flow=_flow_client_req,
message=f"URL: {url}\nMethod: {method}\n"
)
)
def update_client_resp_time(self):
self.client_resp_time = time.time()
# 消息总线 客户端响应事件,启用此事件
resp_data = self.flow['response'].get('data', '')
if isinstance(resp_data, str):
self.flow['size'] = len(resp_data.encode())
else:
self.flow['size'] = len(resp_data)
self.flow['duration'] = self.server_resp_time - self.client_req_time
method = self.flow['request']['method']
url = self.flow['request']['url']
code = self.flow['response']['code']
duration = utils.convert_time(self.flow['duration'])
size = utils.convert_size(self.flow['size'])
# Import decoder for decoding the requested content
decode_flow = {}
application.encoders_decoders.decoder_handler(self.flow, output=decode_flow)
context.application.event_bus.publish(
'flow',
dict(
flow=decode_flow,
message=f"URL: {url}\nMethod: {method}\nStatusCode: {code}\nDuration: {duration}\nSize: {size}"
)
)
if context.application.work_mode == context.Mode.RECORD:
dm = context.application.data_manager
dm.save_data(self.flow)
def update_server_req_time(self):
self.server_req_time = time.time()
# 消息总线 向服务端转发请求事件,暂不使用
# context.application.event_bus.publish('flow',
# dict(name='server.request',
# time=self.server_req_time,
# id=self.id,
# flow=self.flow))
def update_server_resp_time(self):
self.server_resp_time = time.time()
# 消息总线 服务端响应请求事件,暂不使用
# context.application.event_bus.publish('flow',
# dict(name='server.response',
# time=self.server_resp_time,
# id=self.id,
# flow=self.flow))
def add_flow_action(self, action):
if self.flow.get('action'):
self.flow['action'].append(action)
else:
self.flow['action'] = [action]
|
#!/usr/bin/env python3
from glob import glob
from os import getcwd
from shutil import move
from sys import argv
print(argv)
if len(argv) < 4:
print(f"Usage: <name> <season> <extension>")
quit()
name = argv[1]
season = argv[2]
ext = argv[3]
files = sorted(glob(f"{getcwd()}/*.{ext}"))
to_move = []
for index, file_path in enumerate(files):
file = file_path.replace(f"{getcwd()}/", "")
new_file = f"{name} - {season}x{str(index + 1).rjust(2, "0")}.{ext}"
new_file_path = f"{getcwd()}/{new_file}"
print(f"{file} ==> {new_file}")
to_move.append([file_path, new_file_path])
do_process = "n"
do_process = str.lower(input("Proceed? [y/N] "))
if do_process == "y":
for movable in to_move:
move(movable[0], movable[1])
else:
quit()
| #!/usr/bin/env python3
from glob import glob
from os import getcwd
from shutil import move
from sys import argv
print(argv)
if len(argv) < 4:
print(f"Usage: <name> <season> <extension>")
quit()
name = argv[1]
season = argv[2]
ext = argv[3]
files = sorted(glob(f"{getcwd()}/*.{ext}"))
to_move = []
for index, file_path in enumerate(files):
file = file_path.replace(f"{getcwd()}/", "")
new_file = f"{name} - {season}x{str(index + 1).rjust(2, '0')}.{ext}"
new_file_path = f"{getcwd()}/{new_file}"
print(f"{file} ==> {new_file}")
to_move.append([file_path, new_file_path])
do_process = "n"
do_process = str.lower(input("Proceed? [y/N] "))
if do_process == "y":
for movable in to_move:
move(movable[0], movable[1])
else:
quit()
|
import asyncio
from typing import Callable, List
from playwright.async_api import Page, TimeoutError
from tqdm import tqdm
from tiktokpy.client import Client
from tiktokpy.utils import unique_dicts_by_key
from tiktokpy.utils.client import catch_response_and_store, catch_response_info
from tiktokpy.utils.logger import logger
FEED_LIST_ITEM = 'div[data-e2e="recommend-list-item-container"]'
USER_FEED_LIST = 'div[data-e2e="user-post-item-list"]'
USER_FEED_ITEM = f"{USER_FEED_LIST} > div"
USER_FEED_LAST_ITEM = f"{USER_FEED_ITEM}:last-child"
FOLLOW_BUTTON = 'button[data-e2e="follow-button"]'
UNFOLLOW_BUTTON = 'div[class*="DivFollowIcon"]'
MAIN_WRAPPER = "div[class*=DivThreeColumnContainer],main[class*=MainDetailWrapper]"
ERROR_TITLE = "main div[class*=ErrorContainer] p"
SEARCH_USERNAME = 'a[href="/{}"]'
class User:
def __init__(self, client: Client):
self.client = client
async def like(self, username: str, video_id: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Like video id {video_id} of @{username}")
like_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, like_info_queue, "/commit/item/digg"),
),
)
logger.info(f"🧭 Going to @{username}'s video {video_id} page for like")
await self.client.goto(
f"/@{username}/video/{video_id}",
page=page,
wait_until="networkidle",
)
like_selector = f'{FEED_LIST_ITEM}:first-child span[data-e2e="like-icon"]'
is_liked = await page.query_selector(f"{like_selector} > div > svg")
if is_liked:
logger.info(f"😏 @{username}'s video {video_id} already liked")
await page.close()
return
await page.click(like_selector)
like_info = await like_info_queue.get()
if like_info["status_code"] == 0:
logger.info(f"👍 @{username}'s video {video_id} liked")
else:
logger.warning(f"⚠️ @{username}'s video {video_id} probably not liked")
await page.close()
async def unlike(self, username: str, video_id: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Unlike video id {video_id} of @{username}")
like_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, like_info_queue, "/commit/item/digg"),
),
)
logger.info(f"🧭 Going to @{username}'s video {video_id} page for unlike")
await self.client.goto(
f"/@{username}/video/{video_id}",
page=page,
wait_until="networkidle",
)
like_selector = f'{FEED_LIST_ITEM}:first-child span[data-e2e="like-icon"]'
is_unliked = not await page.query_selector(f"{like_selector} > div > svg")
if is_unliked:
logger.info(f"😏 @{username}'s video {video_id} already unliked")
await page.close()
return
await page.click(like_selector)
unlike_info = await like_info_queue.get()
if unlike_info["status_code"] == 0:
logger.info(f"👎 @{username}'s video {video_id} unliked")
else:
logger.warning(f"⚠️ @{username}'s video {video_id} probably not unliked")
await page.close()
async def follow(self, username: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Follow {username}")
follow_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, follow_info_queue, "/commit/follow/user"),
),
)
logger.info(f"🧭 Going to {username}'s page for following")
await self.client.goto(
f"/@{username.lstrip("@")}",
page=page,
wait_until="networkidle",
)
follow_title: str = await page.eval_on_selector(
FOLLOW_BUTTON,
expression="element => element.textContent",
)
if follow_title.lower() != "follow":
logger.info(f"😏 {username} already followed")
await page.close()
return
await page.click(FOLLOW_BUTTON)
follow_info = await follow_info_queue.get()
if follow_info["status_code"] == 0:
logger.info(f"➕ {username} followed")
else:
logger.warning(f"⚠️ {username} probably not followed")
await page.close()
async def unfollow(self, username: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Unfollow {username}")
unfollow_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, unfollow_info_queue, "/commit/follow/user"),
),
)
logger.info(f"🧭 Going to {username}'s page for unfollowing")
await self.client.goto(
f"/@{username.lstrip("@")}",
page=page,
wait_until="networkidle",
)
follow_title: str = await page.eval_on_selector(
FOLLOW_BUTTON,
expression="element => element.textContent",
)
if follow_title.lower() != "following":
logger.info(f"😏 {username} already unfollowed")
return
await page.click(UNFOLLOW_BUTTON)
unfollow_info = await unfollow_info_queue.get()
if unfollow_info["status_code"] == 0:
logger.info(f"➖ {username} unfollowed")
else:
logger.warning(f"⚠️ {username} probably not unfollowed")
await page.close()
async def feed(self, username: str, amount: int) -> List[dict]:
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"📨 Request {username} feed")
_ = await self.client.goto(
f"/search/user?q={username.lstrip("@")}",
page=page,
wait_until="networkidle",
)
username_selector = SEARCH_USERNAME.format(username)
is_found_user = await page.query_selector(username_selector)
if not is_found_user:
logger.error(f'❗️ User "{username}" not found')
return []
result: List[dict] = []
page.on(
"response",
lambda res: asyncio.create_task(catch_response_and_store(res, result)),
)
try:
await page.click(username_selector)
await page.wait_for_selector(MAIN_WRAPPER)
await page.wait_for_load_state(state="networkidle")
except TimeoutError:
logger.error(f'❗️ Unexpected error. Timeout on searching user "{username}"...')
return []
logger.debug(f"📭 Got {username} feed")
error = await page.query_selector(ERROR_TITLE)
if error:
logger.info(f'😭 Error message on page: "{await error.text_content()}"')
return []
await page.wait_for_selector(USER_FEED_LIST, state="visible")
await self._paginate_feed_list(
page=page,
username=username,
result=result,
amount=amount,
)
await page.close()
return unique_dicts_by_key(result, "id")[:amount]
async def _paginate_feed_list(
self,
page: Page,
username: str,
result: List[dict],
amount: int,
):
result_unique_amount: Callable = lambda: len(unique_dicts_by_key(result, "id"))
pbar = tqdm(total=amount, desc=f"📈 Getting {username} feed")
pbar.n = min(result_unique_amount(), amount)
pbar.refresh()
attempts = 0
max_attempts = 3
last_result = result_unique_amount()
is_attempts_limit_reached = attempts >= max_attempts
is_items_enough = result_unique_amount() < amount
while is_attempts_limit_reached or is_items_enough:
logger.debug("🖱 Trying to scroll to last video item")
await page.evaluate(
f"""
document.querySelector('{USER_FEED_LAST_ITEM}')
.scrollIntoView();
""",
)
await page.wait_for_timeout(1_000)
elements = await page.query_selector_all(USER_FEED_ITEM)
logger.debug(f"🔎 Found {len(elements)} items on page by selector {USER_FEED_ITEM}")
pbar.n = min(result_unique_amount(), amount)
pbar.refresh()
if last_result == result_unique_amount():
attempts += 1
else:
attempts = 0
if attempts > max_attempts:
pbar.clear()
pbar.total = result_unique_amount()
logger.info(
f"⚠️ After {max_attempts} attempts found {result_unique_amount()} videos. "
f"Probably some videos are private",
)
break
last_result = result_unique_amount()
await page.wait_for_timeout(5_000)
pbar.close()
| import asyncio
from typing import Callable, List
from playwright.async_api import Page, TimeoutError
from tqdm import tqdm
from tiktokpy.client import Client
from tiktokpy.utils import unique_dicts_by_key
from tiktokpy.utils.client import catch_response_and_store, catch_response_info
from tiktokpy.utils.logger import logger
FEED_LIST_ITEM = 'div[data-e2e="recommend-list-item-container"]'
USER_FEED_LIST = 'div[data-e2e="user-post-item-list"]'
USER_FEED_ITEM = f"{USER_FEED_LIST} > div"
USER_FEED_LAST_ITEM = f"{USER_FEED_ITEM}:last-child"
FOLLOW_BUTTON = 'button[data-e2e="follow-button"]'
UNFOLLOW_BUTTON = 'div[class*="DivFollowIcon"]'
MAIN_WRAPPER = "div[class*=DivThreeColumnContainer],main[class*=MainDetailWrapper]"
ERROR_TITLE = "main div[class*=ErrorContainer] p"
SEARCH_USERNAME = 'a[href="/{}"]'
class User:
def __init__(self, client: Client):
self.client = client
async def like(self, username: str, video_id: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Like video id {video_id} of @{username}")
like_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, like_info_queue, "/commit/item/digg"),
),
)
logger.info(f"🧭 Going to @{username}'s video {video_id} page for like")
await self.client.goto(
f"/@{username}/video/{video_id}",
page=page,
wait_until="networkidle",
)
like_selector = f'{FEED_LIST_ITEM}:first-child span[data-e2e="like-icon"]'
is_liked = await page.query_selector(f"{like_selector} > div > svg")
if is_liked:
logger.info(f"😏 @{username}'s video {video_id} already liked")
await page.close()
return
await page.click(like_selector)
like_info = await like_info_queue.get()
if like_info["status_code"] == 0:
logger.info(f"👍 @{username}'s video {video_id} liked")
else:
logger.warning(f"⚠️ @{username}'s video {video_id} probably not liked")
await page.close()
async def unlike(self, username: str, video_id: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Unlike video id {video_id} of @{username}")
like_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, like_info_queue, "/commit/item/digg"),
),
)
logger.info(f"🧭 Going to @{username}'s video {video_id} page for unlike")
await self.client.goto(
f"/@{username}/video/{video_id}",
page=page,
wait_until="networkidle",
)
like_selector = f'{FEED_LIST_ITEM}:first-child span[data-e2e="like-icon"]'
is_unliked = not await page.query_selector(f"{like_selector} > div > svg")
if is_unliked:
logger.info(f"😏 @{username}'s video {video_id} already unliked")
await page.close()
return
await page.click(like_selector)
unlike_info = await like_info_queue.get()
if unlike_info["status_code"] == 0:
logger.info(f"👎 @{username}'s video {video_id} unliked")
else:
logger.warning(f"⚠️ @{username}'s video {video_id} probably not unliked")
await page.close()
async def follow(self, username: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Follow {username}")
follow_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, follow_info_queue, "/commit/follow/user"),
),
)
logger.info(f"🧭 Going to {username}'s page for following")
await self.client.goto(
f"/@{username.lstrip('@')}",
page=page,
wait_until="networkidle",
)
follow_title: str = await page.eval_on_selector(
FOLLOW_BUTTON,
expression="element => element.textContent",
)
if follow_title.lower() != "follow":
logger.info(f"😏 {username} already followed")
await page.close()
return
await page.click(FOLLOW_BUTTON)
follow_info = await follow_info_queue.get()
if follow_info["status_code"] == 0:
logger.info(f"➕ {username} followed")
else:
logger.warning(f"⚠️ {username} probably not followed")
await page.close()
async def unfollow(self, username: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Unfollow {username}")
unfollow_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, unfollow_info_queue, "/commit/follow/user"),
),
)
logger.info(f"🧭 Going to {username}'s page for unfollowing")
await self.client.goto(
f"/@{username.lstrip('@')}",
page=page,
wait_until="networkidle",
)
follow_title: str = await page.eval_on_selector(
FOLLOW_BUTTON,
expression="element => element.textContent",
)
if follow_title.lower() != "following":
logger.info(f"😏 {username} already unfollowed")
return
await page.click(UNFOLLOW_BUTTON)
unfollow_info = await unfollow_info_queue.get()
if unfollow_info["status_code"] == 0:
logger.info(f"➖ {username} unfollowed")
else:
logger.warning(f"⚠️ {username} probably not unfollowed")
await page.close()
async def feed(self, username: str, amount: int) -> List[dict]:
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"📨 Request {username} feed")
_ = await self.client.goto(
f"/search/user?q={username.lstrip('@')}",
page=page,
wait_until="networkidle",
)
username_selector = SEARCH_USERNAME.format(username)
is_found_user = await page.query_selector(username_selector)
if not is_found_user:
logger.error(f'❗️ User "{username}" not found')
return []
result: List[dict] = []
page.on(
"response",
lambda res: asyncio.create_task(catch_response_and_store(res, result)),
)
try:
await page.click(username_selector)
await page.wait_for_selector(MAIN_WRAPPER)
await page.wait_for_load_state(state="networkidle")
except TimeoutError:
logger.error(f'❗️ Unexpected error. Timeout on searching user "{username}"...')
return []
logger.debug(f"📭 Got {username} feed")
error = await page.query_selector(ERROR_TITLE)
if error:
logger.info(f'😭 Error message on page: "{await error.text_content()}"')
return []
await page.wait_for_selector(USER_FEED_LIST, state="visible")
await self._paginate_feed_list(
page=page,
username=username,
result=result,
amount=amount,
)
await page.close()
return unique_dicts_by_key(result, "id")[:amount]
async def _paginate_feed_list(
self,
page: Page,
username: str,
result: List[dict],
amount: int,
):
result_unique_amount: Callable = lambda: len(unique_dicts_by_key(result, "id"))
pbar = tqdm(total=amount, desc=f"📈 Getting {username} feed")
pbar.n = min(result_unique_amount(), amount)
pbar.refresh()
attempts = 0
max_attempts = 3
last_result = result_unique_amount()
is_attempts_limit_reached = attempts >= max_attempts
is_items_enough = result_unique_amount() < amount
while is_attempts_limit_reached or is_items_enough:
logger.debug("🖱 Trying to scroll to last video item")
await page.evaluate(
f"""
document.querySelector('{USER_FEED_LAST_ITEM}')
.scrollIntoView();
""",
)
await page.wait_for_timeout(1_000)
elements = await page.query_selector_all(USER_FEED_ITEM)
logger.debug(f"🔎 Found {len(elements)} items on page by selector {USER_FEED_ITEM}")
pbar.n = min(result_unique_amount(), amount)
pbar.refresh()
if last_result == result_unique_amount():
attempts += 1
else:
attempts = 0
if attempts > max_attempts:
pbar.clear()
pbar.total = result_unique_amount()
logger.info(
f"⚠️ After {max_attempts} attempts found {result_unique_amount()} videos. "
f"Probably some videos are private",
)
break
last_result = result_unique_amount()
await page.wait_for_timeout(5_000)
pbar.close()
|
# Copyright (c) 2011-2021, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import logging
from typing import Any, Dict, Optional, cast
import c2cwsgiutils.health_check
import pyramid.config
import pyramid.request
import requests
from c2cgeoportal_geoportal.lib.checker import build_url
LOG = logging.getLogger(__name__)
def init(config: pyramid.config.Configurator, health_check: c2cwsgiutils.health_check.HealthCheck) -> None:
"""
Initialize the check collector.
Add him in the c2cwsgichecks.
"""
global_settings = config.get_settings()
if "check_collector" not in global_settings:
return
settings = global_settings["check_collector"]
c2c_base = global_settings.get("c2c.base_path", "")
max_level = settings["max_level"]
for host in settings["hosts"]:
class Check:
def __init__(self, host: Dict[str, Any]):
self.host = host
def __call__(self, request: pyramid.request.Request) -> Optional[Dict[str, Any]]:
params = request.params
display = self.host["display"]
if "host" not in params or display == params["host"]:
url_headers = build_url(
"check_collector",
f"{self.host["url"].rstrip("/")}/{c2c_base.strip("/")}/health_check",
request,
)
r = requests.get(
params={"max_level": str(self.host.get("max_level", max_level))},
timeout=120,
**url_headers, # type: ignore
)
r.raise_for_status()
return cast(Dict[str, Any], r.json())
return None
health_check.add_custom_check(
name="check_collector_" + host["display"], check_cb=Check(host), level=settings["level"]
)
| # Copyright (c) 2011-2021, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import logging
from typing import Any, Dict, Optional, cast
import c2cwsgiutils.health_check
import pyramid.config
import pyramid.request
import requests
from c2cgeoportal_geoportal.lib.checker import build_url
LOG = logging.getLogger(__name__)
def init(config: pyramid.config.Configurator, health_check: c2cwsgiutils.health_check.HealthCheck) -> None:
"""
Initialize the check collector.
Add him in the c2cwsgichecks.
"""
global_settings = config.get_settings()
if "check_collector" not in global_settings:
return
settings = global_settings["check_collector"]
c2c_base = global_settings.get("c2c.base_path", "")
max_level = settings["max_level"]
for host in settings["hosts"]:
class Check:
def __init__(self, host: Dict[str, Any]):
self.host = host
def __call__(self, request: pyramid.request.Request) -> Optional[Dict[str, Any]]:
params = request.params
display = self.host["display"]
if "host" not in params or display == params["host"]:
url_headers = build_url(
"check_collector",
f"{self.host['url'].rstrip('/')}/{c2c_base.strip('/')}/health_check",
request,
)
r = requests.get(
params={"max_level": str(self.host.get("max_level", max_level))},
timeout=120,
**url_headers, # type: ignore
)
r.raise_for_status()
return cast(Dict[str, Any], r.json())
return None
health_check.add_custom_check(
name="check_collector_" + host["display"], check_cb=Check(host), level=settings["level"]
)
|
import json as _json
import multiprocessing as _mp
import os as _os
from collections import Callable
from functools import partial as _partial
from os import sep as _sep
import numpy as _np
import torch as _torch
import torch.utils.data as _data
from torch.utils.data import DataLoader as _DataLoader, Dataset as _Dataset
from torch.utils.data._utils.collate import default_collate as _default_collate
import dad_torch.data.datautils as _du
import dad_torch.utils as _etutils
from dad_torch.utils.logger import *
from .datautils import UnPaddedDDPSampler
def _job(total, func, i, f):
print(f"Working on: [ {i}/{total} ]", end='\r')
return func(f)
def multiRun(nproc: int, data_list: list, func: Callable) -> list:
_files = []
for ix, file in enumerate(data_list, 1):
_files.append([ix, file])
with _mp.Pool(processes=nproc) as pool:
return list(
pool.starmap(_partial(_job, len(_files), func), _files)
)
def safe_collate(batch):
r"""Safely select batches/skip dataset_cls(errors in file loading."""
return _default_collate([b for b in batch if b])
def num_workers(args, loader_args, distributed=False):
if distributed:
return (loader_args['num_workers'] + args['num_gpus'] - 1) // args['num_gpus']
return loader_args['num_workers']
def batch_size(args, loader_args, distributed=False):
if distributed:
loader_args['batch_size'] = loader_args['batch_size'] // args['num_gpus']
return loader_args['batch_size']
def _seed_worker(worker_id):
seed = (int(_torch.initial_seed()) + worker_id) % (2 ** 32 - 1)
_np.random.seed(seed)
def _et_data_job_func(mode, file, dataspec, args, dataset_cls):
test_dataset = dataset_cls(mode=mode, **args)
test_dataset.add(files=[file], verbose=False, **dataspec)
return test_dataset
def _et_data_job(mode, arg, dspec, cls, total, func, verbose, i, file):
if verbose:
print(f"Working on: [ {i} / {total} ]", end='\r')
return func(mode, file, dspec, arg, cls)
class DTDataHandle:
def __init__(self, args=None, dataloader_args=None, **kw):
self.args = _etutils.FrozenDict(args)
self.dataloader_args = _etutils.FrozenDict(dataloader_args)
def get_dataset(self, handle_key, files, dataspec: dict, dataset_cls=None):
dataset = dataset_cls(mode=handle_key, limit=self.args['load_limit'], **self.args)
dataset.add(files=files, verbose=self.args['verbose'], **dataspec)
return dataset
def get_train_dataset(self, split_file, dataspec: dict, dataset_cls=None):
if dataset_cls is None or self.dataloader_args.get('train', {}).get('dataset'):
return self.dataloader_args.get('train', {}).get('dataset')
r"""Load the train data from current fold/split."""
with open(dataspec['split_dir'] + _sep + split_file) as file:
split = _json.loads(file.read())
train_dataset = self.get_dataset('train', split.get('train', []),
dataspec, dataset_cls=dataset_cls)
return train_dataset
def get_validation_dataset(self, split_file, dataspec: dict, dataset_cls=None):
if dataset_cls is None or self.dataloader_args.get('validation', {}).get('dataset'):
return self.dataloader_args.get('validation', {}).get('dataset')
r""" Load the validation data from current fold/split."""
with open(dataspec['split_dir'] + _sep + split_file) as file:
split = _json.loads(file.read())
val_dataset = self.get_dataset('validation', split.get('validation', []),
dataspec, dataset_cls=dataset_cls)
if val_dataset and len(val_dataset) > 0:
return val_dataset
def get_test_dataset(self, split_file, dataspec: dict, dataset_cls=None):
if dataset_cls is None or self.dataloader_args.get('test', {}).get('dataset'):
return self.dataloader_args.get('test', {}).get('dataset')
with open(dataspec['split_dir'] + _sep + split_file) as file:
_files = _json.loads(file.read()).get('test', [])[:self.args['load_limit']]
if self.args['load_sparse'] and len(_files) > 1:
datasets = DTDataHandle.multi_load('test', _files, dataspec, self.args, dataset_cls)
success(f'\n{len(datasets)} sparse dataset loaded.', self.args['verbose'])
else:
datasets = self.get_dataset('test', _files, dataspec, dataset_cls=dataset_cls)
if len(datasets) > 0 and sum([len(t) for t in datasets if t]) > 0:
return datasets
def get_loader(self, handle_key='', distributed=False, use_unpadded_sampler=False, **kw):
args = {**self.args}
args['distributed'] = distributed
args['use_unpadded_sampler'] = use_unpadded_sampler
args.update(self.dataloader_args.get(handle_key, {}))
args.update(**kw)
if args.get('dataset') is None:
return None
loader_args = {
'dataset': None,
'batch_size': 1,
'sampler': None,
'shuffle': False,
'batch_sampler': None,
'num_workers': 0,
'pin_memory': False,
'drop_last': False,
'timeout': 0,
'worker_init_fn': _seed_worker if args.get('seed_all') else None
}
for k in loader_args.keys():
loader_args[k] = args.get(k, loader_args.get(k))
if args['distributed']:
sampler_args = {
'num_replicas': args.get('replicas'),
'rank': args.get('rank'),
'shuffle': args.get('shuffle'),
'seed': args.get('seed')
}
if loader_args.get('sampler') is None:
loader_args['shuffle'] = False # Shuffle is mutually exclusive with sampler
if args['use_unpadded_sampler']:
loader_args['sampler'] = UnPaddedDDPSampler(loader_args['dataset'], **sampler_args)
else:
loader_args['sampler'] = _data.distributed.DistributedSampler(loader_args['dataset'],
**sampler_args)
loader_args['num_workers'] = num_workers(args, loader_args, True)
loader_args['batch_size'] = batch_size(args, loader_args, True)
return _DataLoader(collate_fn=safe_collate, **loader_args)
def create_splits(self, dataspec, out_dir):
if _du.should_create_splits_(out_dir, dataspec, self.args):
_du.default_data_splitter_(dspec=dataspec, args=self.args)
info(f"{len(_os.listdir(dataspec["split_dir"]))} split(s) created in '{dataspec["split_dir"]}' directory.",
self.args['verbose'])
else:
splits_len = len(_os.listdir(dataspec['split_dir']))
info(f"{splits_len} split(s) loaded from "{dataspec["split_dir"]}' directory.",
self.args['verbose'] and splits_len > 0)
def init_dataspec_(self, dataspec: dict):
for k in dataspec:
if '_dir' in k:
path = _os.path.join(self.args['dataset_dir'], dataspec[k])
path = path.replace(f"{_sep}{_sep}", _sep)
if path.endswith(_sep):
path = path[:-1]
dataspec[k] = path
@staticmethod
def multi_load(mode, files, dataspec, args, dataset_cls, func=_et_data_job_func) -> list:
r"""Note: Only works with dad_torch's default args from dad_torch import args"""
_files = []
for ix, f in enumerate(files, 1):
_files.append([ix, f])
nw = min(num_workers(args, args, args['use_ddp']), len(_files))
with _mp.Pool(processes=max(1, nw)) as pool:
dataset_list = list(
pool.starmap(
_partial(_et_data_job, mode, args, dataspec, dataset_cls, len(_files), func, args['verbose']),
_files)
)
return [_d for _d in dataset_list if len(_d) >= 1]
class DTDataset(_Dataset):
def __init__(self, mode='init', limit=None, **kw):
self.mode = mode
self.limit = limit
self.indices = []
self.data = {}
self.args = _etutils.FrozenDict(kw)
self.dataspecs = _etutils.FrozenDict({})
def load_index(self, dataset_name, file):
r"""
Logic to load indices of a single file.
-Sometimes one image can have multiple indices like U-net where we have to get multiple patches of images.
"""
self.indices.append([dataset_name, file])
def _load_indices(self, dataspec_name, files, verbose=True):
r"""
We load the proper indices/names(whatever is called) of the files in order to prepare minibatches.
Only load lim numbr of files so that it is easer to debug(Default is infinite, -lim/--load-lim argument).
"""
_files = files[:self.limit]
if len(_files) > 1:
dataset_objs = DTDataHandle.multi_load(
self.mode, _files, self.dataspecs[dataspec_name], self.args, self.__class__
)
self.gather(dataset_objs)
else:
for f in _files:
self.load_index(dataspec_name, f)
success(f'\n{dataspec_name}, {self.mode}, {len(self)} indices Loaded.', verbose)
def gather(self, dataset_objs):
for d in dataset_objs:
attributes = vars(d)
for k, v in attributes.items():
if isinstance(v, _etutils.FrozenDict):
continue
if isinstance(v, list):
self.__getattribute__(f"{k}").extend(v)
elif isinstance(attributes[f"{k}"], dict):
self.__getattribute__(f"{k}").update(**v)
elif isinstance(attributes[f"{k}"], set):
self.__getattribute__(f"{k}").union(v)
def __getitem__(self, index):
r"""
Logic to load one file and send to model. The mini-batch generation will be handled by Dataloader.
Here we just need to write logic to deal with single file.
"""
raise NotImplementedError('Must be implemented by child class.')
def __len__(self):
return len(self.indices)
def transforms(self, **kw):
return None
def add(self, files, verbose=True, **kw):
r""" An extra layer for added flexibility."""
self.dataspecs[kw['name']] = kw
self._load_indices(dataspec_name=kw['name'], files=files, verbose=verbose)
| import json as _json
import multiprocessing as _mp
import os as _os
from collections import Callable
from functools import partial as _partial
from os import sep as _sep
import numpy as _np
import torch as _torch
import torch.utils.data as _data
from torch.utils.data import DataLoader as _DataLoader, Dataset as _Dataset
from torch.utils.data._utils.collate import default_collate as _default_collate
import dad_torch.data.datautils as _du
import dad_torch.utils as _etutils
from dad_torch.utils.logger import *
from .datautils import UnPaddedDDPSampler
def _job(total, func, i, f):
print(f"Working on: [ {i}/{total} ]", end='\r')
return func(f)
def multiRun(nproc: int, data_list: list, func: Callable) -> list:
_files = []
for ix, file in enumerate(data_list, 1):
_files.append([ix, file])
with _mp.Pool(processes=nproc) as pool:
return list(
pool.starmap(_partial(_job, len(_files), func), _files)
)
def safe_collate(batch):
r"""Safely select batches/skip dataset_cls(errors in file loading."""
return _default_collate([b for b in batch if b])
def num_workers(args, loader_args, distributed=False):
if distributed:
return (loader_args['num_workers'] + args['num_gpus'] - 1) // args['num_gpus']
return loader_args['num_workers']
def batch_size(args, loader_args, distributed=False):
if distributed:
loader_args['batch_size'] = loader_args['batch_size'] // args['num_gpus']
return loader_args['batch_size']
def _seed_worker(worker_id):
seed = (int(_torch.initial_seed()) + worker_id) % (2 ** 32 - 1)
_np.random.seed(seed)
def _et_data_job_func(mode, file, dataspec, args, dataset_cls):
test_dataset = dataset_cls(mode=mode, **args)
test_dataset.add(files=[file], verbose=False, **dataspec)
return test_dataset
def _et_data_job(mode, arg, dspec, cls, total, func, verbose, i, file):
if verbose:
print(f"Working on: [ {i} / {total} ]", end='\r')
return func(mode, file, dspec, arg, cls)
class DTDataHandle:
def __init__(self, args=None, dataloader_args=None, **kw):
self.args = _etutils.FrozenDict(args)
self.dataloader_args = _etutils.FrozenDict(dataloader_args)
def get_dataset(self, handle_key, files, dataspec: dict, dataset_cls=None):
dataset = dataset_cls(mode=handle_key, limit=self.args['load_limit'], **self.args)
dataset.add(files=files, verbose=self.args['verbose'], **dataspec)
return dataset
def get_train_dataset(self, split_file, dataspec: dict, dataset_cls=None):
if dataset_cls is None or self.dataloader_args.get('train', {}).get('dataset'):
return self.dataloader_args.get('train', {}).get('dataset')
r"""Load the train data from current fold/split."""
with open(dataspec['split_dir'] + _sep + split_file) as file:
split = _json.loads(file.read())
train_dataset = self.get_dataset('train', split.get('train', []),
dataspec, dataset_cls=dataset_cls)
return train_dataset
def get_validation_dataset(self, split_file, dataspec: dict, dataset_cls=None):
if dataset_cls is None or self.dataloader_args.get('validation', {}).get('dataset'):
return self.dataloader_args.get('validation', {}).get('dataset')
r""" Load the validation data from current fold/split."""
with open(dataspec['split_dir'] + _sep + split_file) as file:
split = _json.loads(file.read())
val_dataset = self.get_dataset('validation', split.get('validation', []),
dataspec, dataset_cls=dataset_cls)
if val_dataset and len(val_dataset) > 0:
return val_dataset
def get_test_dataset(self, split_file, dataspec: dict, dataset_cls=None):
if dataset_cls is None or self.dataloader_args.get('test', {}).get('dataset'):
return self.dataloader_args.get('test', {}).get('dataset')
with open(dataspec['split_dir'] + _sep + split_file) as file:
_files = _json.loads(file.read()).get('test', [])[:self.args['load_limit']]
if self.args['load_sparse'] and len(_files) > 1:
datasets = DTDataHandle.multi_load('test', _files, dataspec, self.args, dataset_cls)
success(f'\n{len(datasets)} sparse dataset loaded.', self.args['verbose'])
else:
datasets = self.get_dataset('test', _files, dataspec, dataset_cls=dataset_cls)
if len(datasets) > 0 and sum([len(t) for t in datasets if t]) > 0:
return datasets
def get_loader(self, handle_key='', distributed=False, use_unpadded_sampler=False, **kw):
args = {**self.args}
args['distributed'] = distributed
args['use_unpadded_sampler'] = use_unpadded_sampler
args.update(self.dataloader_args.get(handle_key, {}))
args.update(**kw)
if args.get('dataset') is None:
return None
loader_args = {
'dataset': None,
'batch_size': 1,
'sampler': None,
'shuffle': False,
'batch_sampler': None,
'num_workers': 0,
'pin_memory': False,
'drop_last': False,
'timeout': 0,
'worker_init_fn': _seed_worker if args.get('seed_all') else None
}
for k in loader_args.keys():
loader_args[k] = args.get(k, loader_args.get(k))
if args['distributed']:
sampler_args = {
'num_replicas': args.get('replicas'),
'rank': args.get('rank'),
'shuffle': args.get('shuffle'),
'seed': args.get('seed')
}
if loader_args.get('sampler') is None:
loader_args['shuffle'] = False # Shuffle is mutually exclusive with sampler
if args['use_unpadded_sampler']:
loader_args['sampler'] = UnPaddedDDPSampler(loader_args['dataset'], **sampler_args)
else:
loader_args['sampler'] = _data.distributed.DistributedSampler(loader_args['dataset'],
**sampler_args)
loader_args['num_workers'] = num_workers(args, loader_args, True)
loader_args['batch_size'] = batch_size(args, loader_args, True)
return _DataLoader(collate_fn=safe_collate, **loader_args)
def create_splits(self, dataspec, out_dir):
if _du.should_create_splits_(out_dir, dataspec, self.args):
_du.default_data_splitter_(dspec=dataspec, args=self.args)
info(f"{len(_os.listdir(dataspec['split_dir']))} split(s) created in '{dataspec['split_dir']}' directory.",
self.args['verbose'])
else:
splits_len = len(_os.listdir(dataspec['split_dir']))
info(f"{splits_len} split(s) loaded from '{dataspec['split_dir']}' directory.",
self.args['verbose'] and splits_len > 0)
def init_dataspec_(self, dataspec: dict):
for k in dataspec:
if '_dir' in k:
path = _os.path.join(self.args['dataset_dir'], dataspec[k])
path = path.replace(f"{_sep}{_sep}", _sep)
if path.endswith(_sep):
path = path[:-1]
dataspec[k] = path
@staticmethod
def multi_load(mode, files, dataspec, args, dataset_cls, func=_et_data_job_func) -> list:
r"""Note: Only works with dad_torch's default args from dad_torch import args"""
_files = []
for ix, f in enumerate(files, 1):
_files.append([ix, f])
nw = min(num_workers(args, args, args['use_ddp']), len(_files))
with _mp.Pool(processes=max(1, nw)) as pool:
dataset_list = list(
pool.starmap(
_partial(_et_data_job, mode, args, dataspec, dataset_cls, len(_files), func, args['verbose']),
_files)
)
return [_d for _d in dataset_list if len(_d) >= 1]
class DTDataset(_Dataset):
def __init__(self, mode='init', limit=None, **kw):
self.mode = mode
self.limit = limit
self.indices = []
self.data = {}
self.args = _etutils.FrozenDict(kw)
self.dataspecs = _etutils.FrozenDict({})
def load_index(self, dataset_name, file):
r"""
Logic to load indices of a single file.
-Sometimes one image can have multiple indices like U-net where we have to get multiple patches of images.
"""
self.indices.append([dataset_name, file])
def _load_indices(self, dataspec_name, files, verbose=True):
r"""
We load the proper indices/names(whatever is called) of the files in order to prepare minibatches.
Only load lim numbr of files so that it is easer to debug(Default is infinite, -lim/--load-lim argument).
"""
_files = files[:self.limit]
if len(_files) > 1:
dataset_objs = DTDataHandle.multi_load(
self.mode, _files, self.dataspecs[dataspec_name], self.args, self.__class__
)
self.gather(dataset_objs)
else:
for f in _files:
self.load_index(dataspec_name, f)
success(f'\n{dataspec_name}, {self.mode}, {len(self)} indices Loaded.', verbose)
def gather(self, dataset_objs):
for d in dataset_objs:
attributes = vars(d)
for k, v in attributes.items():
if isinstance(v, _etutils.FrozenDict):
continue
if isinstance(v, list):
self.__getattribute__(f"{k}").extend(v)
elif isinstance(attributes[f"{k}"], dict):
self.__getattribute__(f"{k}").update(**v)
elif isinstance(attributes[f"{k}"], set):
self.__getattribute__(f"{k}").union(v)
def __getitem__(self, index):
r"""
Logic to load one file and send to model. The mini-batch generation will be handled by Dataloader.
Here we just need to write logic to deal with single file.
"""
raise NotImplementedError('Must be implemented by child class.')
def __len__(self):
return len(self.indices)
def transforms(self, **kw):
return None
def add(self, files, verbose=True, **kw):
r""" An extra layer for added flexibility."""
self.dataspecs[kw['name']] = kw
self._load_indices(dataspec_name=kw['name'], files=files, verbose=verbose)
|
from os import path
from tkinter.filedialog import askdirectory, askopenfile
from tkinter.ttk import Progressbar
from tkinter import Menu, messagebox
from tor_handler import TorHandler
from toplevel_window_manager import ToplevelManager
from video_quality_selector_manager import VideoQualitySelector
import threading
import youtube_dl
import tkinter as tk
import re
import logging
logging.basicConfig(
filename="logs.log",
level=logging.DEBUG,
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
root = None
TB_URL = None
TB_DESTINATION_PATH = None
BTN_START_DOWNLOAD = None
BTN_SELECT_DIR = None
BTN_DOWNLOAD_FROM_TXT = None
RIGHT_CLICK_MENU = None
PROXY_BUTTON = None
TOPLEVEL_WINDOW = None
CONVERSION_MODE_BTN = None
TOR_HANDLER = None
USING_PROXY = False
TOR_PROXY_CHECKED = -1
CONVERSION_MODE = "mp3"
USERAGENTS_FILEPATH = "./useragents.txt"
CURRENT_SCRIPT_PATH = path.abspath(path.dirname(__file__))
UNEXPCTED_ERR_MSG = "Unexpected error occured. Please check logs for more info."
threads = []
# this regex matches youtube urls with optional 'www.' behind 'youtube'
# alternative complicated regex: ^((?:https?:)?\/\/)?((?:www|m)\.)?((?:youtube\.com|youtu.be))(\/(?:[\w\-]+\?v=|embed\/|v\/)?)([\w\-]+)(\S+)?$
YOUTUBE_URL_REGEX = re.compile("^(https?\:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$")
YOUTUBE_PLAYLIST_URL_REGEX = re.compile(
"^(?:https?:\/\/)?(?:www\.)?youtu\.?be(?:\.com)?.*?(?:v|list)=(.*?)(?:&|$)|^(?:https?:\/\/)?(?:www\.)?youtu\.?be(?:\.com)?(?:(?!=).)*\/(.*)$"
)
################################# PROGRESS BAR ##################################################################
# def create_toplevel_tk_window(label_text=None):
# global TOPLEVEL_WINDOW
# newWindow = tk.Toplevel()
# newWindow.title("Downloading...")
# newWindow.geometry("275x125")
# if label_text:
# label = tk.Label(master=newWindow, text=label_text, wraplength=newWindow.winfo_width())
# label.pack(padx=0,pady=0)
# TOPLEVEL_WINDOW = newWindow
# def show_progress(data):
# global TOPLEVEL_WINDOW
# try:
# # creating progress bar
# progress_bar = Progressbar(TOPLEVEL_WINDOW, length=250, s='black.Horizontal.TProgressbar')
# progress_bar['value'] = 0
# progress_bar.pack(padx=5, pady=25)
# if data['status'] == 'finished':
# progress_bar['value'] = 100
# if TOPLEVEL_WINDOW:
# TOPLEVEL_WINDOW.destroy()
# TOPLEVEL_WINDOW = None
# if data['status'] == 'downloading':
# p = data['_percent_str']
# p = p.replace('%', '')
# progress_bar['value'] = float(p)
# except Exception:
# show_error_message(UNEXPCTED_ERR_MSG)
# logging.exception(UNEXPCTED_ERR_MSG)
# if TOPLEVEL_WINDOW:
# TOPLEVEL_WINDOW.destroy()
# TOPLEVEL_WINDOW = None
###################################################################################################
##################################### UTILITIES #########################
def read_youtube_urls():
"""
Required format that the txt file containing the youtube urls must have:
url_1
url_2
.
.
.
url_n
:param filepath:
:return:
"""
yt_urls = []
file_to_read = askopenfile(mode="r", filetypes=[("Text file", "*.txt")])
if file_to_read is not None:
while True:
curr_url = file_to_read.readline()
cleaned_curr_url = curr_url.strip().rstrip("\n").strip("\r").strip("\t")
if not curr_url:
break
if not cleaned_curr_url:
continue
if YOUTUBE_URL_REGEX.findall(cleaned_curr_url):
yt_urls.append(cleaned_curr_url)
else:
show_error_message(
f'"{cleaned_curr_url}" IS NOT A VALID YOUTUBE URL. SKIPPED.'
)
return yt_urls
def select_download_dir():
global TB_DESTINATION_PATH
download_dir = askdirectory()
if TB_DESTINATION_PATH and download_dir:
TB_DESTINATION_PATH["state"] = tk.NORMAL
TB_DESTINATION_PATH.delete(0, tk.END)
TB_DESTINATION_PATH.insert(0, download_dir)
TB_DESTINATION_PATH["state"] = tk.DISABLED
###########################################################################
########################### THREADS ###################################
def convert_multiple_youtube_to_mp3():
t = threading.Thread(target=start_convert_multiple_youtube_to_mp3, args=())
t.start()
threads.append(t)
def convert_video_to_mp3():
t_d = threading.Thread(target=start_download, args=())
t_d.start()
threads.append(t_d)
#######################################################################
################################## PROXY STUFF $##########################
# def get_random_ua():
# # if file can be loaded in memory use: random.choice(open("useragents.txt").readlines())
# # Waterman's "Reservoir Algorithm" to get 1 line from file randomly in memory efficient way
# with open('useragents.txt') as f:
# line = next(f)
# for num, aline in enumerate(f, 2):
# if random.randrange(num):
# continue
# line = aline
# return line
def get_proxy():
# TODO: get random proxy if tor is not working
return TOR_HANDLER.socks5_url
##################################################################################
##################### YOUTUBE-DL YOUTUBE TO MP3 CONVERSION FOR GETTING VIDEO INFO AND OPTIONS THAT YOUTUBE-DL NEEDS ############
def get_available_formats(vids_info):
"""
Returns list of tuples of mp4 video formats in string representation and corresponding format_id
(excluding audio formats as the best is always chosen by default)
Args:
vids_info (list): the youtube info from the given video that needs to be downloaded
"""
formats = vids_info.get("formats", [vids_info])
available_formats_list = []
for f in formats:
if (
"audio" not in f["format"]
and f["ext"] == "mp4"
and "DASH" not in f["format"]
):
f_str = f"{f["ext"]} - {f["format"]}"
f_id = f["format_id"]
available_formats_list.append((f_id, f_str))
return available_formats_list
def get_vid_info(vid_url):
with youtube_dl.YoutubeDL() as ydl:
vid_info = ydl.extract_info(url=vid_url, download=False)
return vid_info
def get_video_options(
vid_dest: str,
conversion_mode: str,
video_quality_id: str = None
# progress_bar = True
):
global USING_PROXY
vid_name = "%(title)s.%(ext)s"
if conversion_mode == "mp3":
youtube_dl_options = {
"format": "bestaudio/best",
"outtmpl": path.join(vid_dest, vid_name),
"keepvideo": False,
"quiet": True,
# 'prefer_ffmpeg': True, # --> optional
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
else:
# if no format specified, youtube_dl will download best audio with 480p video quality
# NOTE: if youtube_dl cant combine audio with specified mp4 format, it will convert it to mkv format instead
# with given vid quality and best audio
if not video_quality_id:
f = "bestvideo[height<=480]+bestaudio/best[height<=480]"
else:
f = f"{video_quality_id}+bestaudio"
youtube_dl_options = {
"format": f,
"outtmpl": path.join(vid_dest, vid_name),
"quiet": True,
}
if USING_PROXY:
proxy = get_proxy()
if proxy:
youtube_dl_options["proxy"] = proxy
youtube_dl_options["nocheckcertificate"] = True
# if progress_bar:
# youtube_dl_options['progress_hooks'] = [show_progress]
return youtube_dl_options
################################################################################################################################
########################################## HANDLING ERROR MESSAGES AND CHECK FOR YOUTUBE URL VALIDITY #####################
def show_info_message(msg, title="Information"):
messagebox.showinfo(title, msg)
def show_error_message(msg, title="Error"):
messagebox.showerror(title, msg)
def url_check(url):
if url == "":
show_error_message("Youtube URL not provided!")
return False
elif url is None:
show_error_message("Unknown Youtube URL!")
return False
elif not YOUTUBE_URL_REGEX.findall(url):
show_error_message("Please provide a valid Youtube URL!")
return False
else:
return True
##############################################################################################
###################################### HANDLING SELECTION QUALITY OF VIDEO ###################
def select_video_quality(vids_info: list) -> str:
"""Returns the format id of the selected format from the available formats
Args:
vids_info (dict): info about video to download
Returns:
format_id: the selected format id, otherwise empty string '' is returned
"""
global root
available_formats = get_available_formats(vids_info)
return VideoQualitySelector(root, available_formats, vids_info["title"]).show()
##############################################################################################
########################################## BUTTONS TOGGLES ###################################
def toggle_download_btns_state():
global BTN_START_DOWNLOAD, BTN_DOWNLOAD_FROM_TXT
if BTN_START_DOWNLOAD:
if BTN_START_DOWNLOAD["state"] == tk.NORMAL:
BTN_START_DOWNLOAD["state"] = tk.DISABLED
else:
BTN_START_DOWNLOAD["state"] = tk.NORMAL
if BTN_DOWNLOAD_FROM_TXT:
if BTN_DOWNLOAD_FROM_TXT["state"] == tk.NORMAL:
BTN_DOWNLOAD_FROM_TXT["state"] = tk.DISABLED
else:
BTN_DOWNLOAD_FROM_TXT["state"] = tk.NORMAL
##############################################################################################
##################################### HANDLE SINGLE URL DOWNLOAD AND MULTIPLE URLS DOWNLOADS LOGIC ###############
def start_convert_multiple_youtube_to_mp3():
global CONVERSION_MODE
try:
vids_dest = get_download_destination_path()
urls_to_download = read_youtube_urls()
# only continue when there are urls to download
if not urls_to_download:
return
# disable both download btn and btn of download from txt file
toggle_download_btns_state()
vids_info = []
for yt_url in urls_to_download:
vids_info.append(get_vid_info(yt_url))
vids_options = get_video_options(vids_dest, CONVERSION_MODE)
# start downloading and converting the given youtube videos to mp3
with youtube_dl.YoutubeDL(vids_options) as ydl:
for vid_info in vids_info:
# create toplevel window to show download progress for each download
with ToplevelManager(label_text=f'Downloading {vid_info['title']} ...'):
# create_toplevel_tk_window(vid_info['title'])
ydl.download([vid_info["webpage_url"]])
toggle_download_btns_state()
show_info_message(
f"MP3 files downloaded successfully!",
"THE MP3 FILES HAVE BEEN DOWNLOADED SUCCESSFULLY!",
)
except Exception as e:
show_error_message(UNEXPCTED_ERR_MSG)
logging.exception(UNEXPCTED_ERR_MSG)
toggle_download_btns_state()
def start_download():
global CONVERSION_MODE
try:
vid_url = get_url_from_textbox()
vid_dest = get_download_destination_path()
if url_check(vid_url) is False:
return
toggle_download_btns_state()
vids_info = get_vid_info(vid_url)
# if link consists of multiple videos (playlist) then vids_info contains 'entries'
# otherwise there is 1 video
if "entries" in vids_info:
list_vids_options = [] # in case playlist of vids need to be downloaded
vids_options = None # in case playlist of mp3 need to be downloaded
if CONVERSION_MODE == "mp3":
vids_options = get_video_options(
vid_dest,
CONVERSION_MODE
# progress_bar=False
)
else:
list_selected_video_format = []
for idx, vid in enumerate(vids_info["entries"]):
selected_video_format = select_video_quality(vid)
# if not video format has been chosen, then just abort download
if not selected_video_format:
toggle_download_btns_state()
return
vid_opt = get_video_options(
vid_dest,
CONVERSION_MODE,
video_quality_id=selected_video_format
# progress_bar=False
)
list_vids_options.append(vid_opt)
if list_vids_options:
for vid_opt in list_vids_options:
with youtube_dl.YoutubeDL(vid_opt) as ydl:
ydl.download([vids_info["webpage_url"]])
else:
with youtube_dl.YoutubeDL(vids_options) as ydl:
ydl.download([vids_info["webpage_url"]])
else:
with ToplevelManager(label_text=f"Downloading {vids_info["title"]} ..."):
if CONVERSION_MODE == "mp3":
vids_options = get_video_options(vid_dest, CONVERSION_MODE)
else:
selected_video_format = select_video_quality(vids_info)
# if not video format has been chosen, then just abort download
if not selected_video_format:
toggle_download_btns_state()
return
vids_options = get_video_options(
vid_dest,
CONVERSION_MODE,
video_quality_id=selected_video_format,
)
# create_toplevel_tk_window(vids_info['title'])
with youtube_dl.YoutubeDL(vids_options) as ydl:
ydl.download([vids_info["webpage_url"]])
toggle_download_btns_state()
if "entries" in vids_info:
show_info_message(
f'Playlist {vids_info['title']} downloaded successfully!',
"PLAYLIST DOWNLOADED SUCCESSFULLY!",
)
else:
show_info_message(
f'MP3 file {vids_info['title']} downloaded successfully!',
"THE MP3 FILE HAS BEEN DOWNLOADED SUCCESSFULLY!",
)
except Exception as e:
show_error_message(UNEXPCTED_ERR_MSG)
logging.exception(UNEXPCTED_ERR_MSG)
toggle_download_btns_state()
def handle_proxy_btn():
global PROXY_BUTTON, USING_PROXY, TOR_PROXY_CHECKED
if PROXY_BUTTON:
if PROXY_BUTTON.config("text")[-1] == "Currently NOT using proxy":
TOR_PROXY_CHECKED += 1
can_connect_to_tor = False
if (
TOR_PROXY_CHECKED % 5 == 0
): # check TOR connection after every 5 clicks on the button
try:
(
can_connect_to_tor,
ip,
tor_ip,
) = TOR_HANDLER.test_tor_proxy_connection()
except Exception:
show_error_message(UNEXPCTED_ERR_MSG)
logging.error(UNEXPCTED_ERR_MSG)
return
if can_connect_to_tor:
show_info_message(
f"Testing TOR Proxy\nYour IP:\n{ip}\nTor IP:\n{tor_ip}\nTor IP working correctly!"
)
PROXY_BUTTON.config(text="Currently using TOR proxy")
USING_PROXY = True
else:
show_info_message(
"Your IP and Tor IP are the same: check whether you are running tor from commandline"
)
else:
PROXY_BUTTON.config(text="Currently NOT using proxy")
USING_PROXY = False
def toggle_download_mode():
global CONVERSION_MODE_BTN, CONVERSION_MODE
if CONVERSION_MODE_BTN:
if CONVERSION_MODE_BTN.config("text")[-1] == "Current conversion mode: mp3":
CONVERSION_MODE_BTN.config(text="Current conversion mode: mp4")
CONVERSION_MODE = "mp4"
else:
CONVERSION_MODE_BTN.config(text="Current conversion mode: mp3")
CONVERSION_MODE = "mp3"
##########################################################################################
###################################### WIDGETS CREATION (Buttons and Textboxes) #####################
def create_root_buttons():
global root, BTN_START_DOWNLOAD, BTN_SELECT_DIR, BTN_DOWNLOAD_FROM_TXT, PROXY_BUTTON, CONVERSION_MODE_BTN
PROXY_BUTTON = tk.Button(
master=root, text="Currently NOT using proxy", command=handle_proxy_btn
)
CONVERSION_MODE_BTN = tk.Button(
master=root, text="Current conversion mode: mp3", command=toggle_download_mode
)
BTN_START_DOWNLOAD = tk.Button(
master=root,
text="Start download",
width=25,
height=5,
command=convert_video_to_mp3,
)
BTN_SELECT_DIR = tk.Button(
master=root,
text="Select download directory",
width=25,
height=5,
command=select_download_dir,
)
BTN_DOWNLOAD_FROM_TXT = tk.Button(
master=root,
text="Convert multiple youtube videos",
width=25,
height=5,
command=convert_multiple_youtube_to_mp3,
)
BTN_START_DOWNLOAD.pack(pady=5)
BTN_SELECT_DIR.pack(pady=5)
BTN_DOWNLOAD_FROM_TXT.pack(pady=5)
PROXY_BUTTON.pack(pady=5)
CONVERSION_MODE_BTN.pack(pady=5)
def create_root_textboxes():
global TB_URL, TB_DESTINATION_PATH
# create url label and textbox
url_label = tk.Label(text="Youtube Video URL (required)")
TB_URL = tk.Entry(width=80)
url_label.pack()
TB_URL.pack()
# create destination label and textbox
destination_label = tk.Label(
text="Destination path (where to download the video/mp3)."
)
TB_DESTINATION_PATH = tk.Entry(state=tk.NORMAL, width=80)
# insert current directory for the user for convinience
TB_DESTINATION_PATH.insert(0, CURRENT_SCRIPT_PATH)
TB_DESTINATION_PATH["state"] = tk.DISABLED
destination_label.pack()
TB_DESTINATION_PATH.pack()
###############################################################################################
########################################## GETTERS ##########################################
def get_url_from_textbox():
return TB_URL.get().strip()
def get_download_destination_path():
dest = TB_DESTINATION_PATH.get().strip()
# if destination textbox is left empty, then just default to current directory of the script
if dest == "" or dest is None:
return CURRENT_SCRIPT_PATH
return TB_DESTINATION_PATH.get()
##############################################################################################
########################################## SHOW RIGHT CLICK MENU ###############################
def right_click_menu():
global root, RIGHT_CLICK_MENU
if root:
RIGHT_CLICK_MENU = Menu(root, tearoff=0)
RIGHT_CLICK_MENU.add_command(
label="Cut", command=lambda: root.focus_get().event_generate("<<Cut>>")
)
RIGHT_CLICK_MENU.add_command(
label="Copy", command=lambda: root.focus_get().event_generate("<<Copy>>")
)
RIGHT_CLICK_MENU.add_command(
label="Paste", command=lambda: root.focus_get().event_generate("<<Paste>>")
)
root.bind("<Button-3>", right_click_handler)
def right_click_handler(event):
global RIGHT_CLICK_MENU
try:
RIGHT_CLICK_MENU.tk_popup(event.x_root, event.y_root)
finally:
RIGHT_CLICK_MENU.grab_release()
##############################################################################################
#################################### HANDLE CLOSING OF TKINTER WINDOW ######################
def exit_handler():
global threads, root
for t in threads:
if not t.is_alive():
t.handled = True
else:
t.handled = False
threads = [t for t in threads if not t.handled]
if not threads:
root.destroy()
##############################################################################################
########################################## MAIN GUI ##########################################
def init_tkinter_root(size):
global root
root = tk.Tk()
root.protocol("WM_DELETE_WINDOW", exit_handler)
root.wm_iconbitmap("logo.ico")
root.title("Youtube to MP3")
root.geometry(size)
root.minsize(400, 350)
root.maxsize(1000, 600)
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
# Add widgets
create_root_textboxes()
create_root_buttons()
right_click_menu()
root.mainloop()
def main(size_width=575, size_height=475):
global TOR_HANDLER
TOR_HANDLER = TorHandler() # init tor handler object
init_tkinter_root(f"{size_width}x{size_height}")
if __name__ == "__main__":
main()
| from os import path
from tkinter.filedialog import askdirectory, askopenfile
from tkinter.ttk import Progressbar
from tkinter import Menu, messagebox
from tor_handler import TorHandler
from toplevel_window_manager import ToplevelManager
from video_quality_selector_manager import VideoQualitySelector
import threading
import youtube_dl
import tkinter as tk
import re
import logging
logging.basicConfig(
filename="logs.log",
level=logging.DEBUG,
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
root = None
TB_URL = None
TB_DESTINATION_PATH = None
BTN_START_DOWNLOAD = None
BTN_SELECT_DIR = None
BTN_DOWNLOAD_FROM_TXT = None
RIGHT_CLICK_MENU = None
PROXY_BUTTON = None
TOPLEVEL_WINDOW = None
CONVERSION_MODE_BTN = None
TOR_HANDLER = None
USING_PROXY = False
TOR_PROXY_CHECKED = -1
CONVERSION_MODE = "mp3"
USERAGENTS_FILEPATH = "./useragents.txt"
CURRENT_SCRIPT_PATH = path.abspath(path.dirname(__file__))
UNEXPCTED_ERR_MSG = "Unexpected error occured. Please check logs for more info."
threads = []
# this regex matches youtube urls with optional 'www.' behind 'youtube'
# alternative complicated regex: ^((?:https?:)?\/\/)?((?:www|m)\.)?((?:youtube\.com|youtu.be))(\/(?:[\w\-]+\?v=|embed\/|v\/)?)([\w\-]+)(\S+)?$
YOUTUBE_URL_REGEX = re.compile("^(https?\:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$")
YOUTUBE_PLAYLIST_URL_REGEX = re.compile(
"^(?:https?:\/\/)?(?:www\.)?youtu\.?be(?:\.com)?.*?(?:v|list)=(.*?)(?:&|$)|^(?:https?:\/\/)?(?:www\.)?youtu\.?be(?:\.com)?(?:(?!=).)*\/(.*)$"
)
################################# PROGRESS BAR ##################################################################
# def create_toplevel_tk_window(label_text=None):
# global TOPLEVEL_WINDOW
# newWindow = tk.Toplevel()
# newWindow.title("Downloading...")
# newWindow.geometry("275x125")
# if label_text:
# label = tk.Label(master=newWindow, text=label_text, wraplength=newWindow.winfo_width())
# label.pack(padx=0,pady=0)
# TOPLEVEL_WINDOW = newWindow
# def show_progress(data):
# global TOPLEVEL_WINDOW
# try:
# # creating progress bar
# progress_bar = Progressbar(TOPLEVEL_WINDOW, length=250, s='black.Horizontal.TProgressbar')
# progress_bar['value'] = 0
# progress_bar.pack(padx=5, pady=25)
# if data['status'] == 'finished':
# progress_bar['value'] = 100
# if TOPLEVEL_WINDOW:
# TOPLEVEL_WINDOW.destroy()
# TOPLEVEL_WINDOW = None
# if data['status'] == 'downloading':
# p = data['_percent_str']
# p = p.replace('%', '')
# progress_bar['value'] = float(p)
# except Exception:
# show_error_message(UNEXPCTED_ERR_MSG)
# logging.exception(UNEXPCTED_ERR_MSG)
# if TOPLEVEL_WINDOW:
# TOPLEVEL_WINDOW.destroy()
# TOPLEVEL_WINDOW = None
###################################################################################################
##################################### UTILITIES #########################
def read_youtube_urls():
"""
Required format that the txt file containing the youtube urls must have:
url_1
url_2
.
.
.
url_n
:param filepath:
:return:
"""
yt_urls = []
file_to_read = askopenfile(mode="r", filetypes=[("Text file", "*.txt")])
if file_to_read is not None:
while True:
curr_url = file_to_read.readline()
cleaned_curr_url = curr_url.strip().rstrip("\n").strip("\r").strip("\t")
if not curr_url:
break
if not cleaned_curr_url:
continue
if YOUTUBE_URL_REGEX.findall(cleaned_curr_url):
yt_urls.append(cleaned_curr_url)
else:
show_error_message(
f'"{cleaned_curr_url}" IS NOT A VALID YOUTUBE URL. SKIPPED.'
)
return yt_urls
def select_download_dir():
global TB_DESTINATION_PATH
download_dir = askdirectory()
if TB_DESTINATION_PATH and download_dir:
TB_DESTINATION_PATH["state"] = tk.NORMAL
TB_DESTINATION_PATH.delete(0, tk.END)
TB_DESTINATION_PATH.insert(0, download_dir)
TB_DESTINATION_PATH["state"] = tk.DISABLED
###########################################################################
########################### THREADS ###################################
def convert_multiple_youtube_to_mp3():
t = threading.Thread(target=start_convert_multiple_youtube_to_mp3, args=())
t.start()
threads.append(t)
def convert_video_to_mp3():
t_d = threading.Thread(target=start_download, args=())
t_d.start()
threads.append(t_d)
#######################################################################
################################## PROXY STUFF $##########################
# def get_random_ua():
# # if file can be loaded in memory use: random.choice(open("useragents.txt").readlines())
# # Waterman's "Reservoir Algorithm" to get 1 line from file randomly in memory efficient way
# with open('useragents.txt') as f:
# line = next(f)
# for num, aline in enumerate(f, 2):
# if random.randrange(num):
# continue
# line = aline
# return line
def get_proxy():
# TODO: get random proxy if tor is not working
return TOR_HANDLER.socks5_url
##################################################################################
##################### YOUTUBE-DL YOUTUBE TO MP3 CONVERSION FOR GETTING VIDEO INFO AND OPTIONS THAT YOUTUBE-DL NEEDS ############
def get_available_formats(vids_info):
"""
Returns list of tuples of mp4 video formats in string representation and corresponding format_id
(excluding audio formats as the best is always chosen by default)
Args:
vids_info (list): the youtube info from the given video that needs to be downloaded
"""
formats = vids_info.get("formats", [vids_info])
available_formats_list = []
for f in formats:
if (
"audio" not in f["format"]
and f["ext"] == "mp4"
and "DASH" not in f["format"]
):
f_str = f"{f['ext']} - {f['format']}"
f_id = f["format_id"]
available_formats_list.append((f_id, f_str))
return available_formats_list
def get_vid_info(vid_url):
with youtube_dl.YoutubeDL() as ydl:
vid_info = ydl.extract_info(url=vid_url, download=False)
return vid_info
def get_video_options(
vid_dest: str,
conversion_mode: str,
video_quality_id: str = None
# progress_bar = True
):
global USING_PROXY
vid_name = "%(title)s.%(ext)s"
if conversion_mode == "mp3":
youtube_dl_options = {
"format": "bestaudio/best",
"outtmpl": path.join(vid_dest, vid_name),
"keepvideo": False,
"quiet": True,
# 'prefer_ffmpeg': True, # --> optional
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
else:
# if no format specified, youtube_dl will download best audio with 480p video quality
# NOTE: if youtube_dl cant combine audio with specified mp4 format, it will convert it to mkv format instead
# with given vid quality and best audio
if not video_quality_id:
f = "bestvideo[height<=480]+bestaudio/best[height<=480]"
else:
f = f"{video_quality_id}+bestaudio"
youtube_dl_options = {
"format": f,
"outtmpl": path.join(vid_dest, vid_name),
"quiet": True,
}
if USING_PROXY:
proxy = get_proxy()
if proxy:
youtube_dl_options["proxy"] = proxy
youtube_dl_options["nocheckcertificate"] = True
# if progress_bar:
# youtube_dl_options['progress_hooks'] = [show_progress]
return youtube_dl_options
################################################################################################################################
########################################## HANDLING ERROR MESSAGES AND CHECK FOR YOUTUBE URL VALIDITY #####################
def show_info_message(msg, title="Information"):
messagebox.showinfo(title, msg)
def show_error_message(msg, title="Error"):
messagebox.showerror(title, msg)
def url_check(url):
if url == "":
show_error_message("Youtube URL not provided!")
return False
elif url is None:
show_error_message("Unknown Youtube URL!")
return False
elif not YOUTUBE_URL_REGEX.findall(url):
show_error_message("Please provide a valid Youtube URL!")
return False
else:
return True
##############################################################################################
###################################### HANDLING SELECTION QUALITY OF VIDEO ###################
def select_video_quality(vids_info: list) -> str:
"""Returns the format id of the selected format from the available formats
Args:
vids_info (dict): info about video to download
Returns:
format_id: the selected format id, otherwise empty string '' is returned
"""
global root
available_formats = get_available_formats(vids_info)
return VideoQualitySelector(root, available_formats, vids_info["title"]).show()
##############################################################################################
########################################## BUTTONS TOGGLES ###################################
def toggle_download_btns_state():
global BTN_START_DOWNLOAD, BTN_DOWNLOAD_FROM_TXT
if BTN_START_DOWNLOAD:
if BTN_START_DOWNLOAD["state"] == tk.NORMAL:
BTN_START_DOWNLOAD["state"] = tk.DISABLED
else:
BTN_START_DOWNLOAD["state"] = tk.NORMAL
if BTN_DOWNLOAD_FROM_TXT:
if BTN_DOWNLOAD_FROM_TXT["state"] == tk.NORMAL:
BTN_DOWNLOAD_FROM_TXT["state"] = tk.DISABLED
else:
BTN_DOWNLOAD_FROM_TXT["state"] = tk.NORMAL
##############################################################################################
##################################### HANDLE SINGLE URL DOWNLOAD AND MULTIPLE URLS DOWNLOADS LOGIC ###############
def start_convert_multiple_youtube_to_mp3():
global CONVERSION_MODE
try:
vids_dest = get_download_destination_path()
urls_to_download = read_youtube_urls()
# only continue when there are urls to download
if not urls_to_download:
return
# disable both download btn and btn of download from txt file
toggle_download_btns_state()
vids_info = []
for yt_url in urls_to_download:
vids_info.append(get_vid_info(yt_url))
vids_options = get_video_options(vids_dest, CONVERSION_MODE)
# start downloading and converting the given youtube videos to mp3
with youtube_dl.YoutubeDL(vids_options) as ydl:
for vid_info in vids_info:
# create toplevel window to show download progress for each download
with ToplevelManager(label_text=f'Downloading {vid_info["title"]} ...'):
# create_toplevel_tk_window(vid_info['title'])
ydl.download([vid_info["webpage_url"]])
toggle_download_btns_state()
show_info_message(
f"MP3 files downloaded successfully!",
"THE MP3 FILES HAVE BEEN DOWNLOADED SUCCESSFULLY!",
)
except Exception as e:
show_error_message(UNEXPCTED_ERR_MSG)
logging.exception(UNEXPCTED_ERR_MSG)
toggle_download_btns_state()
def start_download():
global CONVERSION_MODE
try:
vid_url = get_url_from_textbox()
vid_dest = get_download_destination_path()
if url_check(vid_url) is False:
return
toggle_download_btns_state()
vids_info = get_vid_info(vid_url)
# if link consists of multiple videos (playlist) then vids_info contains 'entries'
# otherwise there is 1 video
if "entries" in vids_info:
list_vids_options = [] # in case playlist of vids need to be downloaded
vids_options = None # in case playlist of mp3 need to be downloaded
if CONVERSION_MODE == "mp3":
vids_options = get_video_options(
vid_dest,
CONVERSION_MODE
# progress_bar=False
)
else:
list_selected_video_format = []
for idx, vid in enumerate(vids_info["entries"]):
selected_video_format = select_video_quality(vid)
# if not video format has been chosen, then just abort download
if not selected_video_format:
toggle_download_btns_state()
return
vid_opt = get_video_options(
vid_dest,
CONVERSION_MODE,
video_quality_id=selected_video_format
# progress_bar=False
)
list_vids_options.append(vid_opt)
if list_vids_options:
for vid_opt in list_vids_options:
with youtube_dl.YoutubeDL(vid_opt) as ydl:
ydl.download([vids_info["webpage_url"]])
else:
with youtube_dl.YoutubeDL(vids_options) as ydl:
ydl.download([vids_info["webpage_url"]])
else:
with ToplevelManager(label_text=f"Downloading {vids_info['title']} ..."):
if CONVERSION_MODE == "mp3":
vids_options = get_video_options(vid_dest, CONVERSION_MODE)
else:
selected_video_format = select_video_quality(vids_info)
# if not video format has been chosen, then just abort download
if not selected_video_format:
toggle_download_btns_state()
return
vids_options = get_video_options(
vid_dest,
CONVERSION_MODE,
video_quality_id=selected_video_format,
)
# create_toplevel_tk_window(vids_info['title'])
with youtube_dl.YoutubeDL(vids_options) as ydl:
ydl.download([vids_info["webpage_url"]])
toggle_download_btns_state()
if "entries" in vids_info:
show_info_message(
f'Playlist {vids_info["title"]} downloaded successfully!',
"PLAYLIST DOWNLOADED SUCCESSFULLY!",
)
else:
show_info_message(
f'MP3 file {vids_info["title"]} downloaded successfully!',
"THE MP3 FILE HAS BEEN DOWNLOADED SUCCESSFULLY!",
)
except Exception as e:
show_error_message(UNEXPCTED_ERR_MSG)
logging.exception(UNEXPCTED_ERR_MSG)
toggle_download_btns_state()
def handle_proxy_btn():
global PROXY_BUTTON, USING_PROXY, TOR_PROXY_CHECKED
if PROXY_BUTTON:
if PROXY_BUTTON.config("text")[-1] == "Currently NOT using proxy":
TOR_PROXY_CHECKED += 1
can_connect_to_tor = False
if (
TOR_PROXY_CHECKED % 5 == 0
): # check TOR connection after every 5 clicks on the button
try:
(
can_connect_to_tor,
ip,
tor_ip,
) = TOR_HANDLER.test_tor_proxy_connection()
except Exception:
show_error_message(UNEXPCTED_ERR_MSG)
logging.error(UNEXPCTED_ERR_MSG)
return
if can_connect_to_tor:
show_info_message(
f"Testing TOR Proxy\nYour IP:\n{ip}\nTor IP:\n{tor_ip}\nTor IP working correctly!"
)
PROXY_BUTTON.config(text="Currently using TOR proxy")
USING_PROXY = True
else:
show_info_message(
"Your IP and Tor IP are the same: check whether you are running tor from commandline"
)
else:
PROXY_BUTTON.config(text="Currently NOT using proxy")
USING_PROXY = False
def toggle_download_mode():
global CONVERSION_MODE_BTN, CONVERSION_MODE
if CONVERSION_MODE_BTN:
if CONVERSION_MODE_BTN.config("text")[-1] == "Current conversion mode: mp3":
CONVERSION_MODE_BTN.config(text="Current conversion mode: mp4")
CONVERSION_MODE = "mp4"
else:
CONVERSION_MODE_BTN.config(text="Current conversion mode: mp3")
CONVERSION_MODE = "mp3"
##########################################################################################
###################################### WIDGETS CREATION (Buttons and Textboxes) #####################
def create_root_buttons():
global root, BTN_START_DOWNLOAD, BTN_SELECT_DIR, BTN_DOWNLOAD_FROM_TXT, PROXY_BUTTON, CONVERSION_MODE_BTN
PROXY_BUTTON = tk.Button(
master=root, text="Currently NOT using proxy", command=handle_proxy_btn
)
CONVERSION_MODE_BTN = tk.Button(
master=root, text="Current conversion mode: mp3", command=toggle_download_mode
)
BTN_START_DOWNLOAD = tk.Button(
master=root,
text="Start download",
width=25,
height=5,
command=convert_video_to_mp3,
)
BTN_SELECT_DIR = tk.Button(
master=root,
text="Select download directory",
width=25,
height=5,
command=select_download_dir,
)
BTN_DOWNLOAD_FROM_TXT = tk.Button(
master=root,
text="Convert multiple youtube videos",
width=25,
height=5,
command=convert_multiple_youtube_to_mp3,
)
BTN_START_DOWNLOAD.pack(pady=5)
BTN_SELECT_DIR.pack(pady=5)
BTN_DOWNLOAD_FROM_TXT.pack(pady=5)
PROXY_BUTTON.pack(pady=5)
CONVERSION_MODE_BTN.pack(pady=5)
def create_root_textboxes():
global TB_URL, TB_DESTINATION_PATH
# create url label and textbox
url_label = tk.Label(text="Youtube Video URL (required)")
TB_URL = tk.Entry(width=80)
url_label.pack()
TB_URL.pack()
# create destination label and textbox
destination_label = tk.Label(
text="Destination path (where to download the video/mp3)."
)
TB_DESTINATION_PATH = tk.Entry(state=tk.NORMAL, width=80)
# insert current directory for the user for convinience
TB_DESTINATION_PATH.insert(0, CURRENT_SCRIPT_PATH)
TB_DESTINATION_PATH["state"] = tk.DISABLED
destination_label.pack()
TB_DESTINATION_PATH.pack()
###############################################################################################
########################################## GETTERS ##########################################
def get_url_from_textbox():
return TB_URL.get().strip()
def get_download_destination_path():
dest = TB_DESTINATION_PATH.get().strip()
# if destination textbox is left empty, then just default to current directory of the script
if dest == "" or dest is None:
return CURRENT_SCRIPT_PATH
return TB_DESTINATION_PATH.get()
##############################################################################################
########################################## SHOW RIGHT CLICK MENU ###############################
def right_click_menu():
global root, RIGHT_CLICK_MENU
if root:
RIGHT_CLICK_MENU = Menu(root, tearoff=0)
RIGHT_CLICK_MENU.add_command(
label="Cut", command=lambda: root.focus_get().event_generate("<<Cut>>")
)
RIGHT_CLICK_MENU.add_command(
label="Copy", command=lambda: root.focus_get().event_generate("<<Copy>>")
)
RIGHT_CLICK_MENU.add_command(
label="Paste", command=lambda: root.focus_get().event_generate("<<Paste>>")
)
root.bind("<Button-3>", right_click_handler)
def right_click_handler(event):
global RIGHT_CLICK_MENU
try:
RIGHT_CLICK_MENU.tk_popup(event.x_root, event.y_root)
finally:
RIGHT_CLICK_MENU.grab_release()
##############################################################################################
#################################### HANDLE CLOSING OF TKINTER WINDOW ######################
def exit_handler():
global threads, root
for t in threads:
if not t.is_alive():
t.handled = True
else:
t.handled = False
threads = [t for t in threads if not t.handled]
if not threads:
root.destroy()
##############################################################################################
########################################## MAIN GUI ##########################################
def init_tkinter_root(size):
global root
root = tk.Tk()
root.protocol("WM_DELETE_WINDOW", exit_handler)
root.wm_iconbitmap("logo.ico")
root.title("Youtube to MP3")
root.geometry(size)
root.minsize(400, 350)
root.maxsize(1000, 600)
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
# Add widgets
create_root_textboxes()
create_root_buttons()
right_click_menu()
root.mainloop()
def main(size_width=575, size_height=475):
global TOR_HANDLER
TOR_HANDLER = TorHandler() # init tor handler object
init_tkinter_root(f"{size_width}x{size_height}")
if __name__ == "__main__":
main()
|
import base64
import binascii
import codecs
import secrets
import discord
from discord.ext import commands
class Encryption(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group()
async def encode(self, ctx):
""" All encode methods """
if ctx.invoked_subcommand is None:
await ctx.send(embed=discord.Embed(description="Need a subcommand , sending help for more information",color=discord.Color.dark_gold()))
await ctx.send_help("encode")
@commands.group()
async def decode(self, ctx):
""" All decode methods """
if ctx.invoked_subcommand is None:
await ctx.send(embed=discord.Embed(description="Need a subcommand , sending help for more information",color=discord.Color.dark_gold()))
await ctx.send_help("decode")
async def encryptout(self, ctx, convert, txtinput):
if len(txtinput) > 1900:
return await ctx.send(
f"The result was too long, sorry **{ctx.author.name}**"
)
try:
await ctx.send(f"📑 **{convert}**```fix\n{txtinput.decode("UTF-8")}```")
except AttributeError:
await ctx.send(f"📑 **{convert}**```fix\n{txtinput}```")
@encode.command(name="base32", aliases=["b32"])
async def encode_base32(self, ctx, *, txtinput: commands.clean_content):
""" Encode in base32 """
await self.encryptout(
ctx, "Text -> base32", base64.b32encode(txtinput.encode("UTF-8"))
)
@decode.command(name="base32", aliases=["b32"])
async def decode_base32(self, ctx, *, txtinput: str):
""" Decode in base32 """
try:
await self.encryptout(
ctx, "base32 -> Text", base64.b32decode(txtinput.encode("UTF-8"))
)
except Exception:
await ctx.send("Invalid base32...")
@encode.command(name="base64", aliases=["b64"])
async def encode_base64(self, ctx, *, txtinput: commands.clean_content):
""" Encode in base64 """
await self.encryptout(
ctx, "Text -> base64", base64.urlsafe_b64encode(txtinput.encode("UTF-8"))
)
@decode.command(name="base64", aliases=["b64"])
async def decode_base64(self, ctx, *, txtinput: str):
""" Decode in base64 """
try:
await self.encryptout(
ctx,
"base64 -> Text",
base64.urlsafe_b64decode(txtinput.encode("UTF-8")),
)
except Exception:
await ctx.send("Invalid base64...")
@encode.command(name="rot13", aliases=["r13"])
async def encode_rot13(self, ctx, *, txtinput: commands.clean_content):
""" Encode in rot13 """
await self.encryptout(ctx, "Text -> rot13", codecs.decode(txtinput, "rot_13"))
@decode.command(name="rot13", aliases=["r13"])
async def decode_rot13(self, ctx, *, txtinput: str):
""" Decode in rot13 """
try:
await self.encryptout(
ctx, "rot13 -> Text", codecs.decode(txtinput, "rot_13")
)
except Exception:
await ctx.send("Invalid rot13...")
@encode.command(name="hex")
async def encode_hex(self, ctx, *, txtinput: commands.clean_content):
""" Encode in hex """
await self.encryptout(
ctx, "Text -> hex", binascii.hexlify(txtinput.encode("UTF-8"))
)
@decode.command(name="hex")
async def decode_hex(self, ctx, *, txtinput: str):
""" Decode in hex """
try:
await self.encryptout(
ctx, "hex -> Text", binascii.unhexlify(txtinput.encode("UTF-8"))
)
except Exception:
await ctx.send("Invalid hex...")
@encode.command(name="base85", aliases=["b85"])
async def encode_base85(self, ctx, *, txtinput: commands.clean_content):
""" Encode in base85 """
await self.encryptout(
ctx, "Text -> base85", base64.b85encode(txtinput.encode("UTF-8"))
)
@decode.command(name="base85", aliases=["b85"])
async def decode_base85(self, ctx, *, txtinput: str):
""" Decode in base85 """
try:
await self.encryptout(
ctx, "base85 -> Text", base64.b85decode(txtinput.encode("UTF-8"))
)
except Exception:
await ctx.send("Invalid base85...")
@encode.command(name="ascii85", aliases=["a85"])
async def encode_ascii85(self, ctx, *, txtinput: commands.clean_content):
""" Encode in ASCII85 """
await self.encryptout(
ctx, "Text -> ASCII85", base64.a85encode(txtinput.encode("UTF-8"))
)
@decode.command(name="ascii85", aliases=["a85"])
async def decode_ascii85(self, ctx, *, txtinput: str):
""" Decode in ASCII85 """
try:
await self.encryptout(
ctx, "ASCII85 -> Text", base64.a85decode(txtinput.encode("UTF-8"))
)
except Exception:
await ctx.send("Invalid ASCII85...")
@commands.command()
async def password(self, ctx):
""" Generates a random password string for you """
if hasattr(ctx, "guild") and ctx.guild is not None:
await ctx.send(
f"Sending you a private message with your random generated password **{ctx.author.mention}**"
)
await ctx.author.send(
f"🎁 **Here is your password:**\n{secrets.token_urlsafe(20)}"
)
def setup(bot):
bot.add_cog(Encryption(bot)) | import base64
import binascii
import codecs
import secrets
import discord
from discord.ext import commands
class Encryption(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group()
async def encode(self, ctx):
""" All encode methods """
if ctx.invoked_subcommand is None:
await ctx.send(embed=discord.Embed(description="Need a subcommand , sending help for more information",color=discord.Color.dark_gold()))
await ctx.send_help("encode")
@commands.group()
async def decode(self, ctx):
""" All decode methods """
if ctx.invoked_subcommand is None:
await ctx.send(embed=discord.Embed(description="Need a subcommand , sending help for more information",color=discord.Color.dark_gold()))
await ctx.send_help("decode")
async def encryptout(self, ctx, convert, txtinput):
if len(txtinput) > 1900:
return await ctx.send(
f"The result was too long, sorry **{ctx.author.name}**"
)
try:
await ctx.send(f"📑 **{convert}**```fix\n{txtinput.decode('UTF-8')}```")
except AttributeError:
await ctx.send(f"📑 **{convert}**```fix\n{txtinput}```")
@encode.command(name="base32", aliases=["b32"])
async def encode_base32(self, ctx, *, txtinput: commands.clean_content):
""" Encode in base32 """
await self.encryptout(
ctx, "Text -> base32", base64.b32encode(txtinput.encode("UTF-8"))
)
@decode.command(name="base32", aliases=["b32"])
async def decode_base32(self, ctx, *, txtinput: str):
""" Decode in base32 """
try:
await self.encryptout(
ctx, "base32 -> Text", base64.b32decode(txtinput.encode("UTF-8"))
)
except Exception:
await ctx.send("Invalid base32...")
@encode.command(name="base64", aliases=["b64"])
async def encode_base64(self, ctx, *, txtinput: commands.clean_content):
""" Encode in base64 """
await self.encryptout(
ctx, "Text -> base64", base64.urlsafe_b64encode(txtinput.encode("UTF-8"))
)
@decode.command(name="base64", aliases=["b64"])
async def decode_base64(self, ctx, *, txtinput: str):
""" Decode in base64 """
try:
await self.encryptout(
ctx,
"base64 -> Text",
base64.urlsafe_b64decode(txtinput.encode("UTF-8")),
)
except Exception:
await ctx.send("Invalid base64...")
@encode.command(name="rot13", aliases=["r13"])
async def encode_rot13(self, ctx, *, txtinput: commands.clean_content):
""" Encode in rot13 """
await self.encryptout(ctx, "Text -> rot13", codecs.decode(txtinput, "rot_13"))
@decode.command(name="rot13", aliases=["r13"])
async def decode_rot13(self, ctx, *, txtinput: str):
""" Decode in rot13 """
try:
await self.encryptout(
ctx, "rot13 -> Text", codecs.decode(txtinput, "rot_13")
)
except Exception:
await ctx.send("Invalid rot13...")
@encode.command(name="hex")
async def encode_hex(self, ctx, *, txtinput: commands.clean_content):
""" Encode in hex """
await self.encryptout(
ctx, "Text -> hex", binascii.hexlify(txtinput.encode("UTF-8"))
)
@decode.command(name="hex")
async def decode_hex(self, ctx, *, txtinput: str):
""" Decode in hex """
try:
await self.encryptout(
ctx, "hex -> Text", binascii.unhexlify(txtinput.encode("UTF-8"))
)
except Exception:
await ctx.send("Invalid hex...")
@encode.command(name="base85", aliases=["b85"])
async def encode_base85(self, ctx, *, txtinput: commands.clean_content):
""" Encode in base85 """
await self.encryptout(
ctx, "Text -> base85", base64.b85encode(txtinput.encode("UTF-8"))
)
@decode.command(name="base85", aliases=["b85"])
async def decode_base85(self, ctx, *, txtinput: str):
""" Decode in base85 """
try:
await self.encryptout(
ctx, "base85 -> Text", base64.b85decode(txtinput.encode("UTF-8"))
)
except Exception:
await ctx.send("Invalid base85...")
@encode.command(name="ascii85", aliases=["a85"])
async def encode_ascii85(self, ctx, *, txtinput: commands.clean_content):
""" Encode in ASCII85 """
await self.encryptout(
ctx, "Text -> ASCII85", base64.a85encode(txtinput.encode("UTF-8"))
)
@decode.command(name="ascii85", aliases=["a85"])
async def decode_ascii85(self, ctx, *, txtinput: str):
""" Decode in ASCII85 """
try:
await self.encryptout(
ctx, "ASCII85 -> Text", base64.a85decode(txtinput.encode("UTF-8"))
)
except Exception:
await ctx.send("Invalid ASCII85...")
@commands.command()
async def password(self, ctx):
""" Generates a random password string for you """
if hasattr(ctx, "guild") and ctx.guild is not None:
await ctx.send(
f"Sending you a private message with your random generated password **{ctx.author.mention}**"
)
await ctx.author.send(
f"🎁 **Here is your password:**\n{secrets.token_urlsafe(20)}"
)
def setup(bot):
bot.add_cog(Encryption(bot)) |
"""A container for specifying and manipulating a graph with distinct inputs and outputs."""
import time
import warnings
from collections import OrderedDict
from typing import Any, Dict, List, NoReturn, Optional, Tuple, Union
import aesara
from aesara.configdefaults import config
from aesara.graph.basic import Apply, Constant, Variable, applys_between
from aesara.graph.basic import as_string as graph_as_string
from aesara.graph.basic import clone_get_equiv, graph_inputs, io_toposort, vars_between
from aesara.graph.toolbox import AlreadyThere, Feature, ReplaceValidate
from aesara.graph.utils import MetaObject, TestValueError, get_variable_trace_string
from aesara.misc.ordered_set import OrderedSet
class InconsistencyError(Exception):
"""
This exception should be thrown by listeners to FunctionGraph when the
graph's state is invalid.
"""
class MissingInputError(Exception):
"""
A symbolic input needed to compute the outputs is missing.
"""
def __init__(self, *args, **kwargs):
if kwargs:
# The call to list is needed for Python 3
assert list(kwargs.keys()) == ["variable"]
error_msg = get_variable_trace_string(kwargs["variable"])
if error_msg:
args = args + (error_msg,)
s = "\n".join(args) # Needed to have the new line print correctly
super().__init__(s)
class FunctionGraph(MetaObject):
"""
A `FunctionGraph` represents a subgraph bound by a set of input variables and
a set of output variables, ie a subgraph that specifies an Aesara function.
The inputs list should contain all the inputs on which the outputs depend.
``Variable``s of type ``Constant`` are not counted as inputs.
The `FunctionGraph` supports the replace operation which allows to replace
a variable in the subgraph by another, e.g. replace ``(x + x).out`` by
``(2 * x).out``. This is the basis for optimization in Aesara.
This class is also responsible for verifying that a graph is valid
(ie, all the dtypes and broadcast patterns are compatible with the
way the ``Variable``s are used) and for tracking the ``Variable``s with
a ``clients`` field that specifies which ``Apply`` nodes use the ``Variable``.
The ``clients`` field combined with the ``Variable.owner`` field and the
``Apply`` nodes' ``Apply.inputs`` field allows the graph to be traversed in
both directions.
It can also be extended with new features using
``FunctionGraph.attach_feature(<Feature instance>)``.
See ``Feature`` for event types and documentation.
Extra features allow the `FunctionGraph` to verify new properties of
a graph as it is optimized.
Historically, the `FunctionGraph` was called an ``Env``. Keep this in mind
while reading out-of-date documentation, e-mail support threads, etc.
The constructor creates a `FunctionGraph` which operates on the subgraph
bound by the inputs and outputs sets.
This class keeps a pointer to the inputs and outputs, and also modifies
them.
"""
def __init__(
self,
inputs: Optional[List[Variable]] = None,
outputs: Optional[List[Variable]] = None,
features: Optional[List[Feature]] = None,
clone: bool = True,
update_mapping: Optional[Dict[Variable, Variable]] = None,
memo: Optional[Dict[Variable, Variable]] = None,
copy_inputs: bool = True,
copy_orphans: bool = True,
):
"""
Create a `FunctionGraph` which operates on the subgraph between the
`inputs` and `outputs`.
Parameters
----------
inputs
Input variables of the graph.
outputs
Output variables of the graph.
clone
If ``True``, the graph will be cloned.
features
A list of features to be added to the `FunctionGraph`.
update_mapping
Mapping between the `inputs` with updates and the `outputs`
corresponding to their updates.
memo
See ``clone_get_equiv``.
copy_inputs
See ``clone_get_equiv``.
copy_orphans
See ``clone_get_equiv``.
"""
if outputs is None:
raise ValueError("No outputs specified")
if inputs is None:
inputs = [i for i in graph_inputs(outputs) if not isinstance(i, Constant)]
if clone:
memo = clone_get_equiv(
inputs,
outputs,
copy_inputs=copy_inputs,
copy_orphans=copy_orphans,
memo=memo,
)
outputs = [memo[o] for o in outputs]
inputs = [memo[i] for i in inputs]
self.execute_callbacks_time = 0
self.execute_callbacks_times = {}
if features is None:
features = []
self._features = []
# All apply nodes in the subgraph defined by inputs and
# outputs are cached in this field
self.apply_nodes = set()
# Ditto for variable nodes.
# It must contain all fgraph.inputs and all apply_nodes
# outputs even if they aren't used in the graph.
self.variables = set()
self.outputs = list(outputs)
self.clients = {}
for f in features:
self.attach_feature(f)
self.attach_feature(ReplaceValidate())
self.inputs = []
for in_var in inputs:
if in_var.owner is not None:
raise ValueError(
"One of the provided inputs is the output of "
"an already existing node. "
"If that is okay, either discard that "
"input's owner or use graph.clone."
)
self.add_input(in_var, check=False)
for output in outputs:
self.import_var(output, reason="init")
for i, output in enumerate(outputs):
self.clients[output].append(("output", i))
self.profile = None
self.update_mapping = update_mapping
def add_input(self, var: Variable, check: bool = True) -> NoReturn:
"""Add a new variable as an input to this `FunctionGraph`.
Parameters
----------
var : aesara.graph.basic.Variable
"""
if check and var in self.inputs:
return
self.inputs.append(var)
self.setup_var(var)
self.variables.add(var)
def setup_var(self, var: Variable) -> NoReturn:
"""Set up a variable so it belongs to this `FunctionGraph`.
Parameters
----------
var : aesara.graph.basic.Variable
"""
self.clients.setdefault(var, [])
def setup_node(self, node: Apply) -> NoReturn:
"""Set up node so it belongs to this `FunctionGraph`.
Parameters
----------
node : aesara.graph.basic.Apply
"""
if node.op.view_map and not all(
isinstance(view, (list, tuple)) for view in node.op.view_map.values()
):
raise Exception(
f"Op '{node.op}' have a bad view map '{node.op.view_map}',"
" the values must be tuples or lists."
)
if node.op.destroy_map and not all(
isinstance(destroy, (list, tuple))
for destroy in node.op.destroy_map.values()
):
raise Exception(
f"Op '{node.op}' have a bad destroy map '{node.op.destroy_map}',"
" the values must be tuples or lists."
)
def disown(self) -> NoReturn:
"""Clear internal variables."""
for f in self._features:
self.remove_feature(f)
self.clients = {}
self.apply_nodes = set()
self.variables = set()
self.inputs = None
self.outputs = None
self.profile = None
self.update_mapping = None
def get_clients(self, var: Variable) -> List[Tuple[Apply, int]]:
"""Return a list of all the `(node, i)` pairs such that `node.inputs[i]` is `var`."""
return self.clients[var]
def add_client(self, var: Variable, new_client: Tuple[Apply, int]) -> NoReturn:
"""Update the clients of `var` with `new_clients`.
Parameters
----------
var : Variable.
new_client : (Apply, int)
A `(node, i)` pair such that `node.inputs[i]` is `var`.
"""
self.clients[var].append(new_client)
def remove_client(
self, var: Variable, client_to_remove: Tuple[Apply, int], reason: str = None
) -> NoReturn:
"""Recursively removes clients of a variable.
This is the main method to remove variables or `Apply` nodes from
a `FunctionGraph`.
This will remove `var` from the `FunctionGraph` if it doesn't have any
clients remaining. If it has an owner and all the outputs of the owner
have no clients, it will also be removed.
Parameters
----------
var : Variable
The clients of `var` that will be removed.
client_to_remove : pair of (Apply, int)
A `(node, i)` pair such that `node.inputs[i]` will no longer be
`var` in this `FunctionGraph`.
"""
removal_stack = [(var, client_to_remove)]
while removal_stack:
var, client_to_remove = removal_stack.pop()
try:
var_clients = self.clients[var]
var_clients.remove(client_to_remove)
except ValueError:
# In this case, the original `var` could've been removed from
# the current `var`'s client list before this call.
# There's nothing inherently wrong with that, so we continue as
# if it were removed here.
var_clients = None
if var_clients:
continue
# Now, `var` has no more clients, so check if we need to remove it
# and its `Apply` node
if not var.owner:
# The `var` is a `Constant` or an input without a client, so we
# remove it
self.variables.remove(var)
else:
apply_node = var.owner
if not any(
output for output in apply_node.outputs if self.clients[output]
):
# The `Apply` node is not used and is not an output, so we
# remove it and its outputs
if not hasattr(apply_node.tag, "removed_by"):
apply_node.tag.removed_by = []
apply_node.tag.removed_by.append(str(reason))
self.apply_nodes.remove(apply_node)
self.variables.difference_update(apply_node.outputs)
self.execute_callbacks("on_prune", apply_node, reason)
for i, in_var in enumerate(apply_node.inputs):
removal_stack.append((in_var, (apply_node, i)))
def import_var(
self, var: Variable, reason: str = None, import_missing: bool = False
) -> NoReturn:
"""Import variables into this `FunctionGraph`.
This will also import the `variable`'s `Apply` node.
Parameters:
----------
variable : aesara.graph.basic.Variable
The variable to be imported.
reason : str
The name of the optimization or operation in progress.
import_missing : bool
Add missing inputs instead of raising an exception.
"""
# Imports the owners of the variables
if var.owner and var.owner not in self.apply_nodes:
self.import_node(var.owner, reason=reason, import_missing=import_missing)
elif (
var.owner is None
and not isinstance(var, Constant)
and var not in self.inputs
):
from aesara.graph.null_type import NullType
if isinstance(var.type, NullType):
raise TypeError(
f"Computation graph contains a NaN. {var.type.why_null}"
)
if import_missing:
self.add_input(var)
else:
raise MissingInputError(f"Undeclared input: {var}", variable=var)
self.setup_var(var)
self.variables.add(var)
def import_node(
self,
apply_node: Apply,
check: bool = True,
reason: str = None,
import_missing: bool = False,
) -> NoReturn:
"""Recursively import everything between an `Apply` node and the `FunctionGraph`'s outputs.
Parameters:
----------
apply_node : aesara.graph.basic.Apply
The node to be imported.
check : bool
Check that the inputs for the imported nodes are also present in
the `FunctionGraph`.
reason : str
The name of the optimization or operation in progress.
import_missing : bool
Add missing inputs instead of raising an exception.
"""
# We import the nodes in topological order. We only are interested in
# new nodes, so we use all variables we know of as if they were the
# input set. (The functions in the graph module only use the input set
# to know where to stop going down.)
new_nodes = io_toposort(self.variables, apply_node.outputs)
if check:
for node in new_nodes:
for var in node.inputs:
if (
var.owner is None
and not isinstance(var, Constant)
and var not in self.inputs
):
if import_missing:
self.add_input(var)
else:
error_msg = (
f"Input {node.inputs.index(var)} ({var})"
" of the graph (indices start "
f"from 0), used to compute {node}, was not "
"provided and not given a value. Use the "
"Aesara flag exception_verbosity='high', "
"for more information on this error."
)
raise MissingInputError(error_msg, variable=var)
for node in new_nodes:
assert node not in self.apply_nodes
self.setup_node(node)
self.apply_nodes.add(node)
if not hasattr(node.tag, "imported_by"):
node.tag.imported_by = []
node.tag.imported_by.append(str(reason))
for output in node.outputs:
self.setup_var(output)
self.variables.add(output)
for i, input in enumerate(node.inputs):
if input not in self.variables:
self.setup_var(input)
self.variables.add(input)
self.add_client(input, (node, i))
self.execute_callbacks("on_import", node, reason)
def change_input(
self,
node: Apply,
i: int,
new_var: Variable,
reason: str = None,
import_missing: bool = False,
) -> NoReturn:
"""Change ``node.inputs[i]`` to `new_var`.
``new_var.type == old_var.type`` must be ``True``, where ``old_var`` is the
current value of ``node.inputs[i]`` which we want to replace.
For each feature that has an `on_change_input` method, this method calls:
``feature.on_change_input(function_graph, node, i, old_var, new_var, reason)``
Parameters
----------
node : aesara.graph.basic.Apply or str
The node for which an input is to be changed. If the value is
the string ``"output"`` then the ``self.outputs`` will be used
instead of ``node.inputs``.
i : int
The index in `node.inputs` that we want to change.
new_var : aesara.graph.basic.Variable
The new variable to take the place of ``node.inputs[i]``.
import_missing : bool
Add missing inputs instead of raising an exception.
"""
# TODO: ERROR HANDLING FOR LISTENERS (should it complete the change or revert it?)
if node == "output":
r = self.outputs[i]
if not r.type == new_var.type:
raise TypeError(
"The type of the replacement must be the"
" same as the type of the original Variable.",
r,
new_var,
)
self.outputs[i] = new_var
else:
r = node.inputs[i]
if not r.type == new_var.type:
raise TypeError(
"The type of the replacement must be the"
" same as the type of the original Variable.",
r,
new_var,
)
node.inputs[i] = new_var
if r is new_var:
return
self.import_var(new_var, reason=reason, import_missing=import_missing)
self.add_client(new_var, (node, i))
self.remove_client(r, (node, i), reason=reason)
# Precondition: the substitution is semantically valid However it may
# introduce cycles to the graph, in which case the transaction will be
# reverted later.
self.execute_callbacks("on_change_input", node, i, r, new_var, reason=reason)
def replace(
self,
var: Variable,
new_var: Variable,
reason: str = None,
verbose: bool = None,
import_missing: bool = False,
) -> NoReturn:
"""Replace a variable in the `FunctionGraph`.
This is the main interface to manipulate the subgraph in `FunctionGraph`.
For every node that uses `var` as input, makes it use `new_var` instead.
Parameters:
----------
var : aesara.graph.basic.Variable
The variable to be replaced.
new_var : aesara.graph.basic.Variable
The variable to replace `var`.
reason : str
The name of the optimization or operation in progress.
verbose : bool
Print `reason`, `var`, and `new_var`.
import_missing : bool
Import missing variables.
"""
if verbose is None:
verbose = config.optimizer_verbose
if verbose:
print(reason, var, new_var)
new_var = var.type.filter_variable(new_var, allow_convert=True)
if var not in self.variables:
# TODO: Raise an actual exception here.
# Old comment:
# this variable isn't in the graph... don't raise an
# exception here, just return silently because it makes it
# easier to implement some optimizations for
# multiple-output ops
# raise ValueError()
warnings.warn(
f"Variable {var} cannot be replaced; it isn't in the FunctionGraph"
)
return
if config.compute_test_value != "off":
try:
tval = aesara.graph.op.get_test_value(var)
new_tval = aesara.graph.op.get_test_value(new_var)
except TestValueError:
pass
else:
tval_shape = getattr(tval, "shape", None)
new_tval_shape = getattr(new_tval, "shape", None)
if tval_shape != new_tval_shape:
raise AssertionError(
"The replacement variable has a test value with "
"a shape different from the original variable's "
f"test value. Original: {tval_shape}, new: {new_tval_shape}"
)
for node, i in list(self.clients[var]):
assert (node == "output" and self.outputs[i] is var) or (
node.inputs[i] is var
)
self.change_input(
node, i, new_var, reason=reason, import_missing=import_missing
)
def replace_all(self, pairs: List[Tuple[Variable, Variable]], **kwargs) -> NoReturn:
"""Replace variables in the `FunctionGraph` according to `(var, new_var)` pairs in a list."""
for var, new_var in pairs:
self.replace(var, new_var, **kwargs)
def attach_feature(self, feature: Feature) -> NoReturn:
"""
Adds a graph.toolbox.Feature to this function_graph and triggers its
on_attach callback.
"""
# Filter out literally identical `Feature`s
if feature in self._features:
return # the feature is already present
# Filter out functionally identical `Feature`s.
# `Feature`s may use their `on_attach` method to raise
# `AlreadyThere` if they detect that some
# installed `Feature` does the same thing already
attach = getattr(feature, "on_attach", None)
if attach is not None:
try:
attach(self)
except AlreadyThere:
return
self.execute_callbacks_times.setdefault(feature, 0)
# It would be nice if we could require a specific class instead of
# a "workalike" so we could do actual error checking
# if not isinstance(feature, Feature):
# raise TypeError("Expected Feature instance, got "+\
# str(type(feature)))
# Add the feature
self._features.append(feature)
def remove_feature(self, feature: Feature) -> NoReturn:
"""
Removes the feature from the graph.
Calls feature.on_detach(function_graph) if an on_detach method
is defined.
"""
try:
# Why do we catch the exeception anyway?
self._features.remove(feature)
except ValueError:
return
detach = getattr(feature, "on_detach", None)
if detach is not None:
detach(self)
def execute_callbacks(self, name: str, *args, **kwargs) -> NoReturn:
"""Execute callbacks
Calls `getattr(feature, name)(*args)` for each feature which has
a method called after name.
"""
t0 = time.time()
for feature in self._features:
try:
fn = getattr(feature, name)
except AttributeError:
# this is safe because there is no work done inside the
# try; the AttributeError reall must come from feature.${name}
# not existing
continue
tf0 = time.time()
fn(self, *args, **kwargs)
self.execute_callbacks_times[feature] += time.time() - tf0
self.execute_callbacks_time += time.time() - t0
def collect_callbacks(self, name: str, *args) -> Dict[Feature, Any]:
"""Collects callbacks
Returns a dictionary d such that
`d[feature] == getattr(feature, name)(*args)`
For each feature which has a method called after name.
"""
d = {}
for feature in self._features:
try:
fn = getattr(feature, name)
except AttributeError:
continue
d[feature] = fn(*args)
return d
def toposort(self) -> List[Apply]:
"""Toposort
Return an ordering of the graph's Apply nodes such that
* All the nodes of the inputs of a node are before that node.
* Satisfies the orderings provided by each feature that has
an 'orderings' method.
If a feature has an 'orderings' method, it will be called with
this FunctionGraph as sole argument. It should return a dictionary of
`{node: predecessors}` where predecessors is a list of nodes that
should be computed before the key node.
"""
if len(self.apply_nodes) < 2:
# optimization
# when there are 0 or 1 nodes, no sorting is necessary
# This special case happens a lot because the OpWiseCLinker
# produces 1-element graphs.
return list(self.apply_nodes)
fg = self
ords = self.orderings()
order = io_toposort(fg.inputs, fg.outputs, ords)
return order
def orderings(self) -> Dict[Apply, List[Apply]]:
"""Return `dict` `d` s.t. `d[node]` is a list of nodes that must be evaluated before `node` itself can be evaluated.
This is used primarily by the destroy_handler feature to ensure that
the clients of any destroyed inputs have already computed their
outputs.
Notes
-----
This only calls the `orderings()` function on all features. It does not
take care of computing the dependencies by itself.
"""
assert isinstance(self._features, list)
all_orderings = []
for feature in self._features:
if hasattr(feature, "orderings"):
orderings = feature.orderings(self)
if not isinstance(orderings, OrderedDict):
raise TypeError(
"Non-deterministic return value from "
+ str(feature.orderings)
+ ". Nondeterministic object is "
+ str(orderings)
)
if len(orderings) > 0:
all_orderings.append(orderings)
for node, prereqs in orderings.items():
if not isinstance(prereqs, (list, OrderedSet)):
raise TypeError(
"prereqs must be a type with a "
"deterministic iteration order, or toposort "
" will be non-deterministic."
)
if len(all_orderings) == 1:
# If there is only 1 ordering, we reuse it directly.
return all_orderings[0].copy()
else:
# If there is more than 1 ordering, combine them.
ords = OrderedDict()
for orderings in all_orderings:
for node, prereqs in orderings.items():
ords.setdefault(node, []).extend(prereqs)
return ords
def check_integrity(self) -> NoReturn:
"""
Call this for a diagnosis if things go awry.
"""
nodes = set(applys_between(self.inputs, self.outputs))
if self.apply_nodes != nodes:
missing = nodes.difference(self.apply_nodes)
excess = self.apply_nodes.difference(nodes)
raise Exception(
"The nodes are inappropriately cached. missing, in excess: ",
missing,
excess,
)
for node in nodes:
for i, variable in enumerate(node.inputs):
clients = self.clients[variable]
if (node, i) not in clients:
raise Exception(
f"Inconsistent clients list {(node, i)} in {clients}"
)
variables = set(vars_between(self.inputs, self.outputs))
if set(self.variables) != variables:
missing = variables.difference(self.variables)
excess = self.variables.difference(variables)
raise Exception(
"The variables are inappropriately cached. missing, in excess: ",
missing,
excess,
)
for variable in variables:
if (
variable.owner is None
and variable not in self.inputs
and not isinstance(variable, Constant)
):
raise Exception(f"Undeclared input: {variable}")
for node, i in self.clients[variable]:
if node == "output":
if self.outputs[i] is not variable:
raise Exception(
f"Inconsistent clients list: {variable}, {self.outputs[i]}"
)
continue
if node not in nodes:
raise Exception(
f"Client not in FunctionGraph: {variable}, {(node, i)}"
)
if node.inputs[i] is not variable:
raise Exception(
f"Inconsistent clients list: {variable}, {node.inputs[i]}"
)
def __repr__(self):
return f"FunctionGraph({", ".join(graph_as_string(self.inputs, self.outputs))})"
def clone(self, check_integrity=True) -> "FunctionGraph":
"""
Clone the graph and get a memo( a dict )that map old node to new node
"""
return self.clone_get_equiv(check_integrity)[0]
def clone_get_equiv(
self, check_integrity: bool = True, attach_feature: bool = True
) -> Union["FunctionGraph", Dict[Variable, Variable]]:
"""Clone the graph and get a dict that maps old nodes to new ones
Parameters:
check_integrity: bool
Whether to check integrity. Default is True.
attach_feature: bool
Whether to attach feature of origin graph to cloned graph.
Default is True.
Returns:
e: FunctionGraph
Cloned fgraph. Every node in cloned graph is cloned.
equiv: dict
A dict that map old node to new node.
"""
equiv = clone_get_equiv(self.inputs, self.outputs)
if check_integrity:
self.check_integrity()
e = FunctionGraph(
[equiv[i] for i in self.inputs],
[equiv[o] for o in self.outputs],
clone=False,
)
if check_integrity:
e.check_integrity()
if attach_feature:
for feature in self._features:
e.attach_feature(feature)
return e, equiv
def __getstate__(self):
"""
This is needed as some features introduce instance methods.
This is not picklable.
"""
d = self.__dict__.copy()
for feature in self._features:
for attr in getattr(feature, "pickle_rm_attr", []):
del d[attr]
# The class Updater take fct as parameter and they are lambda function, so unpicklable.
# execute_callbacks_times have reference to optimizer, and they can't
# be pickled as the decorators with parameters aren't pickable.
if "execute_callbacks_times" in d:
del d["execute_callbacks_times"]
return d
def __setstate__(self, dct):
self.__dict__.update(dct)
for feature in self._features:
if hasattr(feature, "unpickle"):
feature.unpickle(self)
def __contains__(self, item: Union[Variable, Apply]) -> bool:
if isinstance(item, Variable):
return item in self.variables
elif isinstance(item, Apply):
return item in self.apply_nodes
else:
raise TypeError()
| """A container for specifying and manipulating a graph with distinct inputs and outputs."""
import time
import warnings
from collections import OrderedDict
from typing import Any, Dict, List, NoReturn, Optional, Tuple, Union
import aesara
from aesara.configdefaults import config
from aesara.graph.basic import Apply, Constant, Variable, applys_between
from aesara.graph.basic import as_string as graph_as_string
from aesara.graph.basic import clone_get_equiv, graph_inputs, io_toposort, vars_between
from aesara.graph.toolbox import AlreadyThere, Feature, ReplaceValidate
from aesara.graph.utils import MetaObject, TestValueError, get_variable_trace_string
from aesara.misc.ordered_set import OrderedSet
class InconsistencyError(Exception):
"""
This exception should be thrown by listeners to FunctionGraph when the
graph's state is invalid.
"""
class MissingInputError(Exception):
"""
A symbolic input needed to compute the outputs is missing.
"""
def __init__(self, *args, **kwargs):
if kwargs:
# The call to list is needed for Python 3
assert list(kwargs.keys()) == ["variable"]
error_msg = get_variable_trace_string(kwargs["variable"])
if error_msg:
args = args + (error_msg,)
s = "\n".join(args) # Needed to have the new line print correctly
super().__init__(s)
class FunctionGraph(MetaObject):
"""
A `FunctionGraph` represents a subgraph bound by a set of input variables and
a set of output variables, ie a subgraph that specifies an Aesara function.
The inputs list should contain all the inputs on which the outputs depend.
``Variable``s of type ``Constant`` are not counted as inputs.
The `FunctionGraph` supports the replace operation which allows to replace
a variable in the subgraph by another, e.g. replace ``(x + x).out`` by
``(2 * x).out``. This is the basis for optimization in Aesara.
This class is also responsible for verifying that a graph is valid
(ie, all the dtypes and broadcast patterns are compatible with the
way the ``Variable``s are used) and for tracking the ``Variable``s with
a ``clients`` field that specifies which ``Apply`` nodes use the ``Variable``.
The ``clients`` field combined with the ``Variable.owner`` field and the
``Apply`` nodes' ``Apply.inputs`` field allows the graph to be traversed in
both directions.
It can also be extended with new features using
``FunctionGraph.attach_feature(<Feature instance>)``.
See ``Feature`` for event types and documentation.
Extra features allow the `FunctionGraph` to verify new properties of
a graph as it is optimized.
Historically, the `FunctionGraph` was called an ``Env``. Keep this in mind
while reading out-of-date documentation, e-mail support threads, etc.
The constructor creates a `FunctionGraph` which operates on the subgraph
bound by the inputs and outputs sets.
This class keeps a pointer to the inputs and outputs, and also modifies
them.
"""
def __init__(
self,
inputs: Optional[List[Variable]] = None,
outputs: Optional[List[Variable]] = None,
features: Optional[List[Feature]] = None,
clone: bool = True,
update_mapping: Optional[Dict[Variable, Variable]] = None,
memo: Optional[Dict[Variable, Variable]] = None,
copy_inputs: bool = True,
copy_orphans: bool = True,
):
"""
Create a `FunctionGraph` which operates on the subgraph between the
`inputs` and `outputs`.
Parameters
----------
inputs
Input variables of the graph.
outputs
Output variables of the graph.
clone
If ``True``, the graph will be cloned.
features
A list of features to be added to the `FunctionGraph`.
update_mapping
Mapping between the `inputs` with updates and the `outputs`
corresponding to their updates.
memo
See ``clone_get_equiv``.
copy_inputs
See ``clone_get_equiv``.
copy_orphans
See ``clone_get_equiv``.
"""
if outputs is None:
raise ValueError("No outputs specified")
if inputs is None:
inputs = [i for i in graph_inputs(outputs) if not isinstance(i, Constant)]
if clone:
memo = clone_get_equiv(
inputs,
outputs,
copy_inputs=copy_inputs,
copy_orphans=copy_orphans,
memo=memo,
)
outputs = [memo[o] for o in outputs]
inputs = [memo[i] for i in inputs]
self.execute_callbacks_time = 0
self.execute_callbacks_times = {}
if features is None:
features = []
self._features = []
# All apply nodes in the subgraph defined by inputs and
# outputs are cached in this field
self.apply_nodes = set()
# Ditto for variable nodes.
# It must contain all fgraph.inputs and all apply_nodes
# outputs even if they aren't used in the graph.
self.variables = set()
self.outputs = list(outputs)
self.clients = {}
for f in features:
self.attach_feature(f)
self.attach_feature(ReplaceValidate())
self.inputs = []
for in_var in inputs:
if in_var.owner is not None:
raise ValueError(
"One of the provided inputs is the output of "
"an already existing node. "
"If that is okay, either discard that "
"input's owner or use graph.clone."
)
self.add_input(in_var, check=False)
for output in outputs:
self.import_var(output, reason="init")
for i, output in enumerate(outputs):
self.clients[output].append(("output", i))
self.profile = None
self.update_mapping = update_mapping
def add_input(self, var: Variable, check: bool = True) -> NoReturn:
"""Add a new variable as an input to this `FunctionGraph`.
Parameters
----------
var : aesara.graph.basic.Variable
"""
if check and var in self.inputs:
return
self.inputs.append(var)
self.setup_var(var)
self.variables.add(var)
def setup_var(self, var: Variable) -> NoReturn:
"""Set up a variable so it belongs to this `FunctionGraph`.
Parameters
----------
var : aesara.graph.basic.Variable
"""
self.clients.setdefault(var, [])
def setup_node(self, node: Apply) -> NoReturn:
"""Set up node so it belongs to this `FunctionGraph`.
Parameters
----------
node : aesara.graph.basic.Apply
"""
if node.op.view_map and not all(
isinstance(view, (list, tuple)) for view in node.op.view_map.values()
):
raise Exception(
f"Op '{node.op}' have a bad view map '{node.op.view_map}',"
" the values must be tuples or lists."
)
if node.op.destroy_map and not all(
isinstance(destroy, (list, tuple))
for destroy in node.op.destroy_map.values()
):
raise Exception(
f"Op '{node.op}' have a bad destroy map '{node.op.destroy_map}',"
" the values must be tuples or lists."
)
def disown(self) -> NoReturn:
"""Clear internal variables."""
for f in self._features:
self.remove_feature(f)
self.clients = {}
self.apply_nodes = set()
self.variables = set()
self.inputs = None
self.outputs = None
self.profile = None
self.update_mapping = None
def get_clients(self, var: Variable) -> List[Tuple[Apply, int]]:
"""Return a list of all the `(node, i)` pairs such that `node.inputs[i]` is `var`."""
return self.clients[var]
def add_client(self, var: Variable, new_client: Tuple[Apply, int]) -> NoReturn:
"""Update the clients of `var` with `new_clients`.
Parameters
----------
var : Variable.
new_client : (Apply, int)
A `(node, i)` pair such that `node.inputs[i]` is `var`.
"""
self.clients[var].append(new_client)
def remove_client(
self, var: Variable, client_to_remove: Tuple[Apply, int], reason: str = None
) -> NoReturn:
"""Recursively removes clients of a variable.
This is the main method to remove variables or `Apply` nodes from
a `FunctionGraph`.
This will remove `var` from the `FunctionGraph` if it doesn't have any
clients remaining. If it has an owner and all the outputs of the owner
have no clients, it will also be removed.
Parameters
----------
var : Variable
The clients of `var` that will be removed.
client_to_remove : pair of (Apply, int)
A `(node, i)` pair such that `node.inputs[i]` will no longer be
`var` in this `FunctionGraph`.
"""
removal_stack = [(var, client_to_remove)]
while removal_stack:
var, client_to_remove = removal_stack.pop()
try:
var_clients = self.clients[var]
var_clients.remove(client_to_remove)
except ValueError:
# In this case, the original `var` could've been removed from
# the current `var`'s client list before this call.
# There's nothing inherently wrong with that, so we continue as
# if it were removed here.
var_clients = None
if var_clients:
continue
# Now, `var` has no more clients, so check if we need to remove it
# and its `Apply` node
if not var.owner:
# The `var` is a `Constant` or an input without a client, so we
# remove it
self.variables.remove(var)
else:
apply_node = var.owner
if not any(
output for output in apply_node.outputs if self.clients[output]
):
# The `Apply` node is not used and is not an output, so we
# remove it and its outputs
if not hasattr(apply_node.tag, "removed_by"):
apply_node.tag.removed_by = []
apply_node.tag.removed_by.append(str(reason))
self.apply_nodes.remove(apply_node)
self.variables.difference_update(apply_node.outputs)
self.execute_callbacks("on_prune", apply_node, reason)
for i, in_var in enumerate(apply_node.inputs):
removal_stack.append((in_var, (apply_node, i)))
def import_var(
self, var: Variable, reason: str = None, import_missing: bool = False
) -> NoReturn:
"""Import variables into this `FunctionGraph`.
This will also import the `variable`'s `Apply` node.
Parameters:
----------
variable : aesara.graph.basic.Variable
The variable to be imported.
reason : str
The name of the optimization or operation in progress.
import_missing : bool
Add missing inputs instead of raising an exception.
"""
# Imports the owners of the variables
if var.owner and var.owner not in self.apply_nodes:
self.import_node(var.owner, reason=reason, import_missing=import_missing)
elif (
var.owner is None
and not isinstance(var, Constant)
and var not in self.inputs
):
from aesara.graph.null_type import NullType
if isinstance(var.type, NullType):
raise TypeError(
f"Computation graph contains a NaN. {var.type.why_null}"
)
if import_missing:
self.add_input(var)
else:
raise MissingInputError(f"Undeclared input: {var}", variable=var)
self.setup_var(var)
self.variables.add(var)
def import_node(
self,
apply_node: Apply,
check: bool = True,
reason: str = None,
import_missing: bool = False,
) -> NoReturn:
"""Recursively import everything between an `Apply` node and the `FunctionGraph`'s outputs.
Parameters:
----------
apply_node : aesara.graph.basic.Apply
The node to be imported.
check : bool
Check that the inputs for the imported nodes are also present in
the `FunctionGraph`.
reason : str
The name of the optimization or operation in progress.
import_missing : bool
Add missing inputs instead of raising an exception.
"""
# We import the nodes in topological order. We only are interested in
# new nodes, so we use all variables we know of as if they were the
# input set. (The functions in the graph module only use the input set
# to know where to stop going down.)
new_nodes = io_toposort(self.variables, apply_node.outputs)
if check:
for node in new_nodes:
for var in node.inputs:
if (
var.owner is None
and not isinstance(var, Constant)
and var not in self.inputs
):
if import_missing:
self.add_input(var)
else:
error_msg = (
f"Input {node.inputs.index(var)} ({var})"
" of the graph (indices start "
f"from 0), used to compute {node}, was not "
"provided and not given a value. Use the "
"Aesara flag exception_verbosity='high', "
"for more information on this error."
)
raise MissingInputError(error_msg, variable=var)
for node in new_nodes:
assert node not in self.apply_nodes
self.setup_node(node)
self.apply_nodes.add(node)
if not hasattr(node.tag, "imported_by"):
node.tag.imported_by = []
node.tag.imported_by.append(str(reason))
for output in node.outputs:
self.setup_var(output)
self.variables.add(output)
for i, input in enumerate(node.inputs):
if input not in self.variables:
self.setup_var(input)
self.variables.add(input)
self.add_client(input, (node, i))
self.execute_callbacks("on_import", node, reason)
def change_input(
self,
node: Apply,
i: int,
new_var: Variable,
reason: str = None,
import_missing: bool = False,
) -> NoReturn:
"""Change ``node.inputs[i]`` to `new_var`.
``new_var.type == old_var.type`` must be ``True``, where ``old_var`` is the
current value of ``node.inputs[i]`` which we want to replace.
For each feature that has an `on_change_input` method, this method calls:
``feature.on_change_input(function_graph, node, i, old_var, new_var, reason)``
Parameters
----------
node : aesara.graph.basic.Apply or str
The node for which an input is to be changed. If the value is
the string ``"output"`` then the ``self.outputs`` will be used
instead of ``node.inputs``.
i : int
The index in `node.inputs` that we want to change.
new_var : aesara.graph.basic.Variable
The new variable to take the place of ``node.inputs[i]``.
import_missing : bool
Add missing inputs instead of raising an exception.
"""
# TODO: ERROR HANDLING FOR LISTENERS (should it complete the change or revert it?)
if node == "output":
r = self.outputs[i]
if not r.type == new_var.type:
raise TypeError(
"The type of the replacement must be the"
" same as the type of the original Variable.",
r,
new_var,
)
self.outputs[i] = new_var
else:
r = node.inputs[i]
if not r.type == new_var.type:
raise TypeError(
"The type of the replacement must be the"
" same as the type of the original Variable.",
r,
new_var,
)
node.inputs[i] = new_var
if r is new_var:
return
self.import_var(new_var, reason=reason, import_missing=import_missing)
self.add_client(new_var, (node, i))
self.remove_client(r, (node, i), reason=reason)
# Precondition: the substitution is semantically valid However it may
# introduce cycles to the graph, in which case the transaction will be
# reverted later.
self.execute_callbacks("on_change_input", node, i, r, new_var, reason=reason)
def replace(
self,
var: Variable,
new_var: Variable,
reason: str = None,
verbose: bool = None,
import_missing: bool = False,
) -> NoReturn:
"""Replace a variable in the `FunctionGraph`.
This is the main interface to manipulate the subgraph in `FunctionGraph`.
For every node that uses `var` as input, makes it use `new_var` instead.
Parameters:
----------
var : aesara.graph.basic.Variable
The variable to be replaced.
new_var : aesara.graph.basic.Variable
The variable to replace `var`.
reason : str
The name of the optimization or operation in progress.
verbose : bool
Print `reason`, `var`, and `new_var`.
import_missing : bool
Import missing variables.
"""
if verbose is None:
verbose = config.optimizer_verbose
if verbose:
print(reason, var, new_var)
new_var = var.type.filter_variable(new_var, allow_convert=True)
if var not in self.variables:
# TODO: Raise an actual exception here.
# Old comment:
# this variable isn't in the graph... don't raise an
# exception here, just return silently because it makes it
# easier to implement some optimizations for
# multiple-output ops
# raise ValueError()
warnings.warn(
f"Variable {var} cannot be replaced; it isn't in the FunctionGraph"
)
return
if config.compute_test_value != "off":
try:
tval = aesara.graph.op.get_test_value(var)
new_tval = aesara.graph.op.get_test_value(new_var)
except TestValueError:
pass
else:
tval_shape = getattr(tval, "shape", None)
new_tval_shape = getattr(new_tval, "shape", None)
if tval_shape != new_tval_shape:
raise AssertionError(
"The replacement variable has a test value with "
"a shape different from the original variable's "
f"test value. Original: {tval_shape}, new: {new_tval_shape}"
)
for node, i in list(self.clients[var]):
assert (node == "output" and self.outputs[i] is var) or (
node.inputs[i] is var
)
self.change_input(
node, i, new_var, reason=reason, import_missing=import_missing
)
def replace_all(self, pairs: List[Tuple[Variable, Variable]], **kwargs) -> NoReturn:
"""Replace variables in the `FunctionGraph` according to `(var, new_var)` pairs in a list."""
for var, new_var in pairs:
self.replace(var, new_var, **kwargs)
def attach_feature(self, feature: Feature) -> NoReturn:
"""
Adds a graph.toolbox.Feature to this function_graph and triggers its
on_attach callback.
"""
# Filter out literally identical `Feature`s
if feature in self._features:
return # the feature is already present
# Filter out functionally identical `Feature`s.
# `Feature`s may use their `on_attach` method to raise
# `AlreadyThere` if they detect that some
# installed `Feature` does the same thing already
attach = getattr(feature, "on_attach", None)
if attach is not None:
try:
attach(self)
except AlreadyThere:
return
self.execute_callbacks_times.setdefault(feature, 0)
# It would be nice if we could require a specific class instead of
# a "workalike" so we could do actual error checking
# if not isinstance(feature, Feature):
# raise TypeError("Expected Feature instance, got "+\
# str(type(feature)))
# Add the feature
self._features.append(feature)
def remove_feature(self, feature: Feature) -> NoReturn:
"""
Removes the feature from the graph.
Calls feature.on_detach(function_graph) if an on_detach method
is defined.
"""
try:
# Why do we catch the exeception anyway?
self._features.remove(feature)
except ValueError:
return
detach = getattr(feature, "on_detach", None)
if detach is not None:
detach(self)
def execute_callbacks(self, name: str, *args, **kwargs) -> NoReturn:
"""Execute callbacks
Calls `getattr(feature, name)(*args)` for each feature which has
a method called after name.
"""
t0 = time.time()
for feature in self._features:
try:
fn = getattr(feature, name)
except AttributeError:
# this is safe because there is no work done inside the
# try; the AttributeError reall must come from feature.${name}
# not existing
continue
tf0 = time.time()
fn(self, *args, **kwargs)
self.execute_callbacks_times[feature] += time.time() - tf0
self.execute_callbacks_time += time.time() - t0
def collect_callbacks(self, name: str, *args) -> Dict[Feature, Any]:
"""Collects callbacks
Returns a dictionary d such that
`d[feature] == getattr(feature, name)(*args)`
For each feature which has a method called after name.
"""
d = {}
for feature in self._features:
try:
fn = getattr(feature, name)
except AttributeError:
continue
d[feature] = fn(*args)
return d
def toposort(self) -> List[Apply]:
"""Toposort
Return an ordering of the graph's Apply nodes such that
* All the nodes of the inputs of a node are before that node.
* Satisfies the orderings provided by each feature that has
an 'orderings' method.
If a feature has an 'orderings' method, it will be called with
this FunctionGraph as sole argument. It should return a dictionary of
`{node: predecessors}` where predecessors is a list of nodes that
should be computed before the key node.
"""
if len(self.apply_nodes) < 2:
# optimization
# when there are 0 or 1 nodes, no sorting is necessary
# This special case happens a lot because the OpWiseCLinker
# produces 1-element graphs.
return list(self.apply_nodes)
fg = self
ords = self.orderings()
order = io_toposort(fg.inputs, fg.outputs, ords)
return order
def orderings(self) -> Dict[Apply, List[Apply]]:
"""Return `dict` `d` s.t. `d[node]` is a list of nodes that must be evaluated before `node` itself can be evaluated.
This is used primarily by the destroy_handler feature to ensure that
the clients of any destroyed inputs have already computed their
outputs.
Notes
-----
This only calls the `orderings()` function on all features. It does not
take care of computing the dependencies by itself.
"""
assert isinstance(self._features, list)
all_orderings = []
for feature in self._features:
if hasattr(feature, "orderings"):
orderings = feature.orderings(self)
if not isinstance(orderings, OrderedDict):
raise TypeError(
"Non-deterministic return value from "
+ str(feature.orderings)
+ ". Nondeterministic object is "
+ str(orderings)
)
if len(orderings) > 0:
all_orderings.append(orderings)
for node, prereqs in orderings.items():
if not isinstance(prereqs, (list, OrderedSet)):
raise TypeError(
"prereqs must be a type with a "
"deterministic iteration order, or toposort "
" will be non-deterministic."
)
if len(all_orderings) == 1:
# If there is only 1 ordering, we reuse it directly.
return all_orderings[0].copy()
else:
# If there is more than 1 ordering, combine them.
ords = OrderedDict()
for orderings in all_orderings:
for node, prereqs in orderings.items():
ords.setdefault(node, []).extend(prereqs)
return ords
def check_integrity(self) -> NoReturn:
"""
Call this for a diagnosis if things go awry.
"""
nodes = set(applys_between(self.inputs, self.outputs))
if self.apply_nodes != nodes:
missing = nodes.difference(self.apply_nodes)
excess = self.apply_nodes.difference(nodes)
raise Exception(
"The nodes are inappropriately cached. missing, in excess: ",
missing,
excess,
)
for node in nodes:
for i, variable in enumerate(node.inputs):
clients = self.clients[variable]
if (node, i) not in clients:
raise Exception(
f"Inconsistent clients list {(node, i)} in {clients}"
)
variables = set(vars_between(self.inputs, self.outputs))
if set(self.variables) != variables:
missing = variables.difference(self.variables)
excess = self.variables.difference(variables)
raise Exception(
"The variables are inappropriately cached. missing, in excess: ",
missing,
excess,
)
for variable in variables:
if (
variable.owner is None
and variable not in self.inputs
and not isinstance(variable, Constant)
):
raise Exception(f"Undeclared input: {variable}")
for node, i in self.clients[variable]:
if node == "output":
if self.outputs[i] is not variable:
raise Exception(
f"Inconsistent clients list: {variable}, {self.outputs[i]}"
)
continue
if node not in nodes:
raise Exception(
f"Client not in FunctionGraph: {variable}, {(node, i)}"
)
if node.inputs[i] is not variable:
raise Exception(
f"Inconsistent clients list: {variable}, {node.inputs[i]}"
)
def __repr__(self):
return f"FunctionGraph({', '.join(graph_as_string(self.inputs, self.outputs))})"
def clone(self, check_integrity=True) -> "FunctionGraph":
"""
Clone the graph and get a memo( a dict )that map old node to new node
"""
return self.clone_get_equiv(check_integrity)[0]
def clone_get_equiv(
self, check_integrity: bool = True, attach_feature: bool = True
) -> Union["FunctionGraph", Dict[Variable, Variable]]:
"""Clone the graph and get a dict that maps old nodes to new ones
Parameters:
check_integrity: bool
Whether to check integrity. Default is True.
attach_feature: bool
Whether to attach feature of origin graph to cloned graph.
Default is True.
Returns:
e: FunctionGraph
Cloned fgraph. Every node in cloned graph is cloned.
equiv: dict
A dict that map old node to new node.
"""
equiv = clone_get_equiv(self.inputs, self.outputs)
if check_integrity:
self.check_integrity()
e = FunctionGraph(
[equiv[i] for i in self.inputs],
[equiv[o] for o in self.outputs],
clone=False,
)
if check_integrity:
e.check_integrity()
if attach_feature:
for feature in self._features:
e.attach_feature(feature)
return e, equiv
def __getstate__(self):
"""
This is needed as some features introduce instance methods.
This is not picklable.
"""
d = self.__dict__.copy()
for feature in self._features:
for attr in getattr(feature, "pickle_rm_attr", []):
del d[attr]
# The class Updater take fct as parameter and they are lambda function, so unpicklable.
# execute_callbacks_times have reference to optimizer, and they can't
# be pickled as the decorators with parameters aren't pickable.
if "execute_callbacks_times" in d:
del d["execute_callbacks_times"]
return d
def __setstate__(self, dct):
self.__dict__.update(dct)
for feature in self._features:
if hasattr(feature, "unpickle"):
feature.unpickle(self)
def __contains__(self, item: Union[Variable, Apply]) -> bool:
if isinstance(item, Variable):
return item in self.variables
elif isinstance(item, Apply):
return item in self.apply_nodes
else:
raise TypeError()
|
from __future__ import annotations
import json
from typing import Union
from anyio import TASK_STATUS_IGNORED
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.trace import async_get_trace, async_list_traces
from homeassistant.components.websocket_api import async_register_command
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.json import ExtendedJSONEncoder
from .base import ReactTask
from ..base import ReactBase
from ..enums import ReactStage
from ..const import (
DOMAIN,
)
async def async_setup_task(react: ReactBase) -> TASK_STATUS_IGNORED:
"""Set up this task."""
return Task(react=react)
class Task(ReactTask):
"""Setup the React websocket API."""
stages = [ReactStage.SETUP]
async def async_execute(self) -> None:
"""Execute the task."""
async_register_command(self.react.hass, react_status)
async_register_command(self.react.hass, react_subscribe)
async_register_command(self.react.hass, react_get_traces)
async_register_command(self.react.hass, react_get_trace)
@websocket_api.websocket_command(
{
vol.Required("type"): "react/trace/list",
vol.Required("workflow_id"): cv.string,
}
)
@websocket_api.require_admin
@websocket_api.async_response
async def react_get_traces(hass, connection, msg):
react: ReactBase = hass.data.get(DOMAIN)
workflow_id = msg.get("workflow_id")
if workflow_id is None:
return
key = f"{DOMAIN}.{msg["workflow_id"]}" if "workflow_id" in msg else None
traces = await async_list_traces(hass, DOMAIN, key)
connection.send_result(msg["id"], traces)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required("type"): "react/trace/get",
vol.Required("workflow_id"): str,
vol.Required("run_id"): str,
}
)
@websocket_api.async_response
async def react_get_trace(hass, connection, msg):
key = f"{DOMAIN}.{msg["workflow_id"]}"
run_id = msg["run_id"]
try:
requested_trace = await async_get_trace(hass, key, run_id)
except KeyError:
connection.send_error(
msg["id"], websocket_api.ERR_NOT_FOUND, "The trace could not be found"
)
return
message = websocket_api.messages.result_message(msg["id"], requested_trace)
connection.send_message(
json.dumps(message, cls=ExtendedJSONEncoder, allow_nan=False)
)
@websocket_api.websocket_command({vol.Required("type"): "react/status"})
@websocket_api.require_admin
@websocket_api.async_response
async def react_status(hass, connection, msg):
react: ReactBase = hass.data.get(DOMAIN)
connection.send_message(
websocket_api.result_message(
msg["id"],
{
"startup": react.status.startup,
"background_task": False,
# "lovelace_mode": react.core.lovelace_mode,
"reloading_data": react.status.reloading_data,
"upgrading_all": react.status.upgrading_all,
"disabled": react.system.disabled,
"disabled_reason": react.system.disabled_reason,
# "has_pending_tasks": react.queue.has_pending_tasks,
"stage": react.stage,
},
)
)
@websocket_api.websocket_command(
{
vol.Required("type"): "react/subscribe",
vol.Required("signal"): str,
}
)
@websocket_api.require_admin
@websocket_api.async_response
async def react_subscribe(
hass: HomeAssistant,
connection: websocket_api.ActiveConnection,
msg: dict,
) -> None:
"""Handle websocket subscriptions."""
@callback
def forward_messages(data: Union[dict, None] = None):
"""Forward events to websocket."""
connection.send_message(websocket_api.event_message(msg["id"], data))
connection.subscriptions[msg["id"]] = async_dispatcher_connect(
hass,
msg["signal"],
forward_messages,
)
connection.send_message(websocket_api.result_message(msg["id"]))
| from __future__ import annotations
import json
from typing import Union
from anyio import TASK_STATUS_IGNORED
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.trace import async_get_trace, async_list_traces
from homeassistant.components.websocket_api import async_register_command
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.json import ExtendedJSONEncoder
from .base import ReactTask
from ..base import ReactBase
from ..enums import ReactStage
from ..const import (
DOMAIN,
)
async def async_setup_task(react: ReactBase) -> TASK_STATUS_IGNORED:
"""Set up this task."""
return Task(react=react)
class Task(ReactTask):
"""Setup the React websocket API."""
stages = [ReactStage.SETUP]
async def async_execute(self) -> None:
"""Execute the task."""
async_register_command(self.react.hass, react_status)
async_register_command(self.react.hass, react_subscribe)
async_register_command(self.react.hass, react_get_traces)
async_register_command(self.react.hass, react_get_trace)
@websocket_api.websocket_command(
{
vol.Required("type"): "react/trace/list",
vol.Required("workflow_id"): cv.string,
}
)
@websocket_api.require_admin
@websocket_api.async_response
async def react_get_traces(hass, connection, msg):
react: ReactBase = hass.data.get(DOMAIN)
workflow_id = msg.get("workflow_id")
if workflow_id is None:
return
key = f"{DOMAIN}.{msg['workflow_id']}" if "workflow_id" in msg else None
traces = await async_list_traces(hass, DOMAIN, key)
connection.send_result(msg["id"], traces)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required("type"): "react/trace/get",
vol.Required("workflow_id"): str,
vol.Required("run_id"): str,
}
)
@websocket_api.async_response
async def react_get_trace(hass, connection, msg):
key = f"{DOMAIN}.{msg['workflow_id']}"
run_id = msg["run_id"]
try:
requested_trace = await async_get_trace(hass, key, run_id)
except KeyError:
connection.send_error(
msg["id"], websocket_api.ERR_NOT_FOUND, "The trace could not be found"
)
return
message = websocket_api.messages.result_message(msg["id"], requested_trace)
connection.send_message(
json.dumps(message, cls=ExtendedJSONEncoder, allow_nan=False)
)
@websocket_api.websocket_command({vol.Required("type"): "react/status"})
@websocket_api.require_admin
@websocket_api.async_response
async def react_status(hass, connection, msg):
react: ReactBase = hass.data.get(DOMAIN)
connection.send_message(
websocket_api.result_message(
msg["id"],
{
"startup": react.status.startup,
"background_task": False,
# "lovelace_mode": react.core.lovelace_mode,
"reloading_data": react.status.reloading_data,
"upgrading_all": react.status.upgrading_all,
"disabled": react.system.disabled,
"disabled_reason": react.system.disabled_reason,
# "has_pending_tasks": react.queue.has_pending_tasks,
"stage": react.stage,
},
)
)
@websocket_api.websocket_command(
{
vol.Required("type"): "react/subscribe",
vol.Required("signal"): str,
}
)
@websocket_api.require_admin
@websocket_api.async_response
async def react_subscribe(
hass: HomeAssistant,
connection: websocket_api.ActiveConnection,
msg: dict,
) -> None:
"""Handle websocket subscriptions."""
@callback
def forward_messages(data: Union[dict, None] = None):
"""Forward events to websocket."""
connection.send_message(websocket_api.event_message(msg["id"], data))
connection.subscriptions[msg["id"]] = async_dispatcher_connect(
hass,
msg["signal"],
forward_messages,
)
connection.send_message(websocket_api.result_message(msg["id"]))
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import warnings
import numpy as np
import librosa
from librosa.util import valid_audio
from librosa.util.exceptions import ParameterError
from pyannote.core import SlidingWindow, SlidingWindowFeature
from soundfile import SoundFile
import soundfile as sf
def get_audio_duration(current_file):
"""Return audio file duration
Parameters
----------
current_file : dict
Dictionary given by pyannote.database.
Returns
-------
duration : float
Audio file duration.
"""
with SoundFile(current_file["audio"], "r") as f:
duration = float(f.frames) / f.samplerate
return duration
def get_audio_sample_rate(current_file):
"""Return audio file sampling rate
Parameters
----------
current_file : dict
Dictionary given by pyannote.database.
Returns
-------
sample_rate : int
Sampling rate
"""
with SoundFile(current_file["audio"], "r") as f:
sample_rate = f.samplerate
return sample_rate
def read_audio(current_file, sample_rate=None, mono=True):
"""Read audio file
Parameters
----------
current_file : dict
Dictionary given by pyannote.database.
sample_rate: int, optional
Target sampling rate. Defaults to using native sampling rate.
mono : int, optional
Convert multi-channel to mono. Defaults to True.
Returns
-------
y : (n_samples, n_channels) np.array
Audio samples.
sample_rate : int
Sampling rate.
Notes
-----
In case `current_file` contains a `channel` key, data of this (1-indexed)
channel will be returned.
"""
y, file_sample_rate = sf.read(
current_file["audio"], dtype="float32", always_2d=True
)
# extract specific channel if requested
channel = current_file.get("channel", None)
if channel is not None:
y = y[:, channel - 1 : channel]
# convert to mono
if mono and y.shape[1] > 1:
y = np.mean(y, axis=1, keepdims=True)
# resample if sample rates mismatch
if (sample_rate is not None) and (file_sample_rate != sample_rate):
if y.shape[1] == 1:
# librosa expects mono audio to be of shape (n,), but we have (n, 1).
y = librosa.core.resample(y[:, 0], file_sample_rate, sample_rate)[:, None]
else:
y = librosa.core.resample(y.T, file_sample_rate, sample_rate).T
else:
sample_rate = file_sample_rate
return y, sample_rate
class RawAudio:
"""Raw audio with on-the-fly data augmentation
Parameters
----------
sample_rate: int, optional
Target sampling rate. Defaults to using native sampling rate.
mono : int, optional
Convert multi-channel to mono. Defaults to True.
augmentation : `pyannote.audio.augmentation.Augmentation`, optional
Data augmentation.
"""
def __init__(self, sample_rate=None, mono=True, augmentation=None):
super().__init__()
self.sample_rate = sample_rate
self.mono = mono
self.augmentation = augmentation
if sample_rate is not None:
self.sliding_window_ = SlidingWindow(
start=-0.5 / sample_rate,
duration=1.0 / sample_rate,
step=1.0 / sample_rate,
)
@property
def dimension(self):
return 1
@property
def sliding_window(self):
return self.sliding_window_
def get_features(self, y, sample_rate):
# convert to mono
if self.mono:
y = np.mean(y, axis=1, keepdims=True)
# resample if sample rates mismatch
if (self.sample_rate is not None) and (self.sample_rate != sample_rate):
if y.shape[1] == 1:
# librosa expects mono audio to be of shape (n,), but we have (n, 1).
y = librosa.core.resample(y[:, 0], sample_rate, self.sample_rate)[:, None]
else:
y = librosa.core.resample(y.T, sample_rate, self.sample_rate).T
sample_rate = self.sample_rate
# augment data
if self.augmentation is not None:
y = self.augmentation(y, sample_rate)
# TODO: how time consuming is this thing (needs profiling...)
try:
valid = valid_audio(y[:, 0], mono=True)
except ParameterError as e:
msg = f"Something went wrong when augmenting waveform."
raise ValueError(msg)
return y
def __call__(self, current_file, return_sr=False):
"""Obtain waveform
Parameters
----------
current_file : dict
`pyannote.database` files.
return_sr : `bool`, optional
Return sample rate. Defaults to False
Returns
-------
waveform : `pyannote.core.SlidingWindowFeature`
Waveform
sample_rate : `int`
Only when `return_sr` is set to True
"""
if "waveform" in current_file:
if self.sample_rate is None:
msg = (
"`RawAudio` needs to be instantiated with an actual "
"`sample_rate` if one wants to use precomputed "
"waveform."
)
raise ValueError(msg)
sample_rate = self.sample_rate
y = current_file["waveform"]
if len(y.shape) != 2:
msg = (
f"Precomputed waveform should be provided as a "
f"(n_samples, n_channels) `np.ndarray`."
)
raise ValueError(msg)
else:
y, sample_rate = sf.read(
current_file["audio"], dtype="float32", always_2d=True
)
# extract specific channel if requested
channel = current_file.get("channel", None)
if channel is not None:
y = y[:, channel - 1 : channel]
y = self.get_features(y, sample_rate)
sliding_window = SlidingWindow(
start=-0.5 / sample_rate, duration=1.0 / sample_rate, step=1.0 / sample_rate
)
if return_sr:
return (
SlidingWindowFeature(y, sliding_window),
sample_rate if self.sample_rate is None else self.sample_rate,
)
return SlidingWindowFeature(y, sliding_window)
def get_context_duration(self):
return 0.0
def crop(self, current_file, segment, mode="center", fixed=None):
"""Fast version of self(current_file).crop(segment, **kwargs)
Parameters
----------
current_file : dict
`pyannote.database` file.
segment : `pyannote.core.Segment`
Segment from which to extract features.
mode : {'loose', 'strict', 'center'}, optional
In 'strict' mode, only frames fully included in 'segment' are
returned. In 'loose' mode, any intersecting frames are returned. In
'center' mode, first and last frames are chosen to be the ones
whose centers are the closest to 'focus' start and end times.
Defaults to 'center'.
fixed : float, optional
Overrides `Segment` 'focus' duration and ensures that the number of
returned frames is fixed (which might otherwise not be the case
because of rounding errors). Has no effect in 'strict' or 'loose'
modes.
Returns
-------
waveform : (n_samples, n_channels) numpy array
Waveform
See also
--------
`pyannote.core.SlidingWindowFeature.crop`
"""
if self.sample_rate is None:
msg = (
"`RawAudio` needs to be instantiated with an actual "
"`sample_rate` if one wants to use `crop`."
)
raise ValueError(msg)
# find the start and end positions of the required segment
((start, end),) = self.sliding_window_.crop(
segment, mode=mode, fixed=fixed, return_ranges=True
)
# this is expected number of samples.
# this will be useful later in case of on-the-fly resampling
n_samples = end - start
if "waveform" in current_file:
y = current_file["waveform"]
if len(y.shape) != 2:
msg = (
f"Precomputed waveform should be provided as a "
f"(n_samples, n_channels) `np.ndarray`."
)
raise ValueError(msg)
sample_rate = self.sample_rate
data = y[start:end]
else:
# read file with SoundFile, which supports various fomats
# including NIST sphere
with SoundFile(current_file["audio"], "r") as audio_file:
sample_rate = audio_file.samplerate
# if the sample rates are mismatched,
# recompute the start and end
if sample_rate != self.sample_rate:
sliding_window = SlidingWindow(
start=-0.5 / sample_rate,
duration=1.0 / sample_rate,
step=1.0 / sample_rate,
)
((start, end),) = sliding_window.crop(
segment, mode=mode, fixed=fixed, return_ranges=True
)
try:
audio_file.seek(start)
data = audio_file.read(end - start, dtype="float32", always_2d=True)
except RuntimeError as e:
msg = (
f"SoundFile failed to seek-and-read in "
f"{current_file["audio"]}: loading the whole file..."
)
warnings.warn(msg)
return self(current_file).crop(segment, mode=mode, fixed=fixed)
# extract specific channel if requested
channel = current_file.get("channel", None)
if channel is not None:
data = data[:, channel - 1 : channel]
return self.get_features(data, sample_rate)
# # THIS SCRIPT CAN BE USED TO CRASH-TEST THE ON-THE-FLY RESAMPLING
# import numpy as np
# from pyannote.audio.features import RawAudio
# from pyannote.core import Segment
# from pyannote.audio.features.utils import get_audio_duration
# from tqdm import tqdm
#
# TEST_FILE = '/Users/bredin/Corpora/etape/BFMTV_BFMStory_2010-09-03_175900.wav'
# current_file = {'audio': TEST_FILE}
# duration = get_audio_duration(current_file)
#
# for sample_rate in [8000, 16000, 44100, 48000]:
# raw_audio = RawAudio(sample_rate=sample_rate)
# for i in tqdm(range(1000), desc=f'{sample_rate:d}Hz'):
# start = np.random.rand() * (duration - 1.)
# data = raw_audio.crop(current_file, Segment(start, start + 1), fixed=1.)
# assert len(data) == sample_rate
| #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import warnings
import numpy as np
import librosa
from librosa.util import valid_audio
from librosa.util.exceptions import ParameterError
from pyannote.core import SlidingWindow, SlidingWindowFeature
from soundfile import SoundFile
import soundfile as sf
def get_audio_duration(current_file):
"""Return audio file duration
Parameters
----------
current_file : dict
Dictionary given by pyannote.database.
Returns
-------
duration : float
Audio file duration.
"""
with SoundFile(current_file["audio"], "r") as f:
duration = float(f.frames) / f.samplerate
return duration
def get_audio_sample_rate(current_file):
"""Return audio file sampling rate
Parameters
----------
current_file : dict
Dictionary given by pyannote.database.
Returns
-------
sample_rate : int
Sampling rate
"""
with SoundFile(current_file["audio"], "r") as f:
sample_rate = f.samplerate
return sample_rate
def read_audio(current_file, sample_rate=None, mono=True):
"""Read audio file
Parameters
----------
current_file : dict
Dictionary given by pyannote.database.
sample_rate: int, optional
Target sampling rate. Defaults to using native sampling rate.
mono : int, optional
Convert multi-channel to mono. Defaults to True.
Returns
-------
y : (n_samples, n_channels) np.array
Audio samples.
sample_rate : int
Sampling rate.
Notes
-----
In case `current_file` contains a `channel` key, data of this (1-indexed)
channel will be returned.
"""
y, file_sample_rate = sf.read(
current_file["audio"], dtype="float32", always_2d=True
)
# extract specific channel if requested
channel = current_file.get("channel", None)
if channel is not None:
y = y[:, channel - 1 : channel]
# convert to mono
if mono and y.shape[1] > 1:
y = np.mean(y, axis=1, keepdims=True)
# resample if sample rates mismatch
if (sample_rate is not None) and (file_sample_rate != sample_rate):
if y.shape[1] == 1:
# librosa expects mono audio to be of shape (n,), but we have (n, 1).
y = librosa.core.resample(y[:, 0], file_sample_rate, sample_rate)[:, None]
else:
y = librosa.core.resample(y.T, file_sample_rate, sample_rate).T
else:
sample_rate = file_sample_rate
return y, sample_rate
class RawAudio:
"""Raw audio with on-the-fly data augmentation
Parameters
----------
sample_rate: int, optional
Target sampling rate. Defaults to using native sampling rate.
mono : int, optional
Convert multi-channel to mono. Defaults to True.
augmentation : `pyannote.audio.augmentation.Augmentation`, optional
Data augmentation.
"""
def __init__(self, sample_rate=None, mono=True, augmentation=None):
super().__init__()
self.sample_rate = sample_rate
self.mono = mono
self.augmentation = augmentation
if sample_rate is not None:
self.sliding_window_ = SlidingWindow(
start=-0.5 / sample_rate,
duration=1.0 / sample_rate,
step=1.0 / sample_rate,
)
@property
def dimension(self):
return 1
@property
def sliding_window(self):
return self.sliding_window_
def get_features(self, y, sample_rate):
# convert to mono
if self.mono:
y = np.mean(y, axis=1, keepdims=True)
# resample if sample rates mismatch
if (self.sample_rate is not None) and (self.sample_rate != sample_rate):
if y.shape[1] == 1:
# librosa expects mono audio to be of shape (n,), but we have (n, 1).
y = librosa.core.resample(y[:, 0], sample_rate, self.sample_rate)[:, None]
else:
y = librosa.core.resample(y.T, sample_rate, self.sample_rate).T
sample_rate = self.sample_rate
# augment data
if self.augmentation is not None:
y = self.augmentation(y, sample_rate)
# TODO: how time consuming is this thing (needs profiling...)
try:
valid = valid_audio(y[:, 0], mono=True)
except ParameterError as e:
msg = f"Something went wrong when augmenting waveform."
raise ValueError(msg)
return y
def __call__(self, current_file, return_sr=False):
"""Obtain waveform
Parameters
----------
current_file : dict
`pyannote.database` files.
return_sr : `bool`, optional
Return sample rate. Defaults to False
Returns
-------
waveform : `pyannote.core.SlidingWindowFeature`
Waveform
sample_rate : `int`
Only when `return_sr` is set to True
"""
if "waveform" in current_file:
if self.sample_rate is None:
msg = (
"`RawAudio` needs to be instantiated with an actual "
"`sample_rate` if one wants to use precomputed "
"waveform."
)
raise ValueError(msg)
sample_rate = self.sample_rate
y = current_file["waveform"]
if len(y.shape) != 2:
msg = (
f"Precomputed waveform should be provided as a "
f"(n_samples, n_channels) `np.ndarray`."
)
raise ValueError(msg)
else:
y, sample_rate = sf.read(
current_file["audio"], dtype="float32", always_2d=True
)
# extract specific channel if requested
channel = current_file.get("channel", None)
if channel is not None:
y = y[:, channel - 1 : channel]
y = self.get_features(y, sample_rate)
sliding_window = SlidingWindow(
start=-0.5 / sample_rate, duration=1.0 / sample_rate, step=1.0 / sample_rate
)
if return_sr:
return (
SlidingWindowFeature(y, sliding_window),
sample_rate if self.sample_rate is None else self.sample_rate,
)
return SlidingWindowFeature(y, sliding_window)
def get_context_duration(self):
return 0.0
def crop(self, current_file, segment, mode="center", fixed=None):
"""Fast version of self(current_file).crop(segment, **kwargs)
Parameters
----------
current_file : dict
`pyannote.database` file.
segment : `pyannote.core.Segment`
Segment from which to extract features.
mode : {'loose', 'strict', 'center'}, optional
In 'strict' mode, only frames fully included in 'segment' are
returned. In 'loose' mode, any intersecting frames are returned. In
'center' mode, first and last frames are chosen to be the ones
whose centers are the closest to 'focus' start and end times.
Defaults to 'center'.
fixed : float, optional
Overrides `Segment` 'focus' duration and ensures that the number of
returned frames is fixed (which might otherwise not be the case
because of rounding errors). Has no effect in 'strict' or 'loose'
modes.
Returns
-------
waveform : (n_samples, n_channels) numpy array
Waveform
See also
--------
`pyannote.core.SlidingWindowFeature.crop`
"""
if self.sample_rate is None:
msg = (
"`RawAudio` needs to be instantiated with an actual "
"`sample_rate` if one wants to use `crop`."
)
raise ValueError(msg)
# find the start and end positions of the required segment
((start, end),) = self.sliding_window_.crop(
segment, mode=mode, fixed=fixed, return_ranges=True
)
# this is expected number of samples.
# this will be useful later in case of on-the-fly resampling
n_samples = end - start
if "waveform" in current_file:
y = current_file["waveform"]
if len(y.shape) != 2:
msg = (
f"Precomputed waveform should be provided as a "
f"(n_samples, n_channels) `np.ndarray`."
)
raise ValueError(msg)
sample_rate = self.sample_rate
data = y[start:end]
else:
# read file with SoundFile, which supports various fomats
# including NIST sphere
with SoundFile(current_file["audio"], "r") as audio_file:
sample_rate = audio_file.samplerate
# if the sample rates are mismatched,
# recompute the start and end
if sample_rate != self.sample_rate:
sliding_window = SlidingWindow(
start=-0.5 / sample_rate,
duration=1.0 / sample_rate,
step=1.0 / sample_rate,
)
((start, end),) = sliding_window.crop(
segment, mode=mode, fixed=fixed, return_ranges=True
)
try:
audio_file.seek(start)
data = audio_file.read(end - start, dtype="float32", always_2d=True)
except RuntimeError as e:
msg = (
f"SoundFile failed to seek-and-read in "
f"{current_file['audio']}: loading the whole file..."
)
warnings.warn(msg)
return self(current_file).crop(segment, mode=mode, fixed=fixed)
# extract specific channel if requested
channel = current_file.get("channel", None)
if channel is not None:
data = data[:, channel - 1 : channel]
return self.get_features(data, sample_rate)
# # THIS SCRIPT CAN BE USED TO CRASH-TEST THE ON-THE-FLY RESAMPLING
# import numpy as np
# from pyannote.audio.features import RawAudio
# from pyannote.core import Segment
# from pyannote.audio.features.utils import get_audio_duration
# from tqdm import tqdm
#
# TEST_FILE = '/Users/bredin/Corpora/etape/BFMTV_BFMStory_2010-09-03_175900.wav'
# current_file = {'audio': TEST_FILE}
# duration = get_audio_duration(current_file)
#
# for sample_rate in [8000, 16000, 44100, 48000]:
# raw_audio = RawAudio(sample_rate=sample_rate)
# for i in tqdm(range(1000), desc=f'{sample_rate:d}Hz'):
# start = np.random.rand() * (duration - 1.)
# data = raw_audio.crop(current_file, Segment(start, start + 1), fixed=1.)
# assert len(data) == sample_rate
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import posixpath
import threading
from desktop.conf import DEFAULT_USER
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import HttpClient
from desktop.lib.rest.resource import Resource
from hadoop import cluster
from hadoop.yarn.resource_manager_api import get_resource_manager
LOG = logging.getLogger(__name__)
_API_VERSION = 'v1'
_JSON_CONTENT_TYPE = 'application/json'
API_CACHE = None
API_CACHE_LOCK = threading.Lock()
def get_mapreduce_api(username):
global API_CACHE
if API_CACHE is None:
API_CACHE_LOCK.acquire()
try:
if API_CACHE is None:
yarn_cluster = cluster.get_cluster_conf_for_job_submission()
if yarn_cluster is None:
raise PopupException(_('No Resource Manager are available.'))
API_CACHE = MapreduceApi(username, yarn_cluster.PROXY_API_URL.get(), yarn_cluster.SECURITY_ENABLED.get(), yarn_cluster.SSL_CERT_CA_VERIFY.get())
finally:
API_CACHE_LOCK.release()
API_CACHE.setuser(username) # Set the correct user
return API_CACHE
class MapreduceApi(object):
def __init__(self, username, mr_url, security_enabled=False, ssl_cert_ca_verify=False):
self._user = username
self._url = posixpath.join(mr_url, 'proxy')
self._client = HttpClient(self._url, logger=LOG)
self._root = Resource(self._client)
self._security_enabled = security_enabled
self._thread_local = threading.local() # To store user info
if self._security_enabled:
self._client.set_kerberos_auth()
self._client.set_verify(ssl_cert_ca_verify)
def __str__(self):
return "MapreduceApi at %s" % (self._url,)
def _get_params(self):
params = {}
if self.username != DEFAULT_USER.get(): # We impersonate if needed
params['doAs'] = self.username
if not self._security_enabled:
params['user.name'] = DEFAULT_USER.get()
return params
@property
def url(self):
return self._url
@property
def username(self):
try:
return self._thread_local.user
except AttributeError:
return DEFAULT_USER.get()
def setuser(self, user):
curr = self._user
self._thread_local.user = user
return curr
def job(self, user, job_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def counters(self, job_id):
app_id = job_id.replace('job', 'application')
response = self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/counters' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
# If it hits the job history server, it will return HTML.
# Simply return None in this case because there isn't much data there.
if isinstance(response, basestring):
return None
else:
return response
def tasks(self, job_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def job_attempts(self, job_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/jobattempts' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def conf(self, job_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/conf' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def task(self, job_id, task_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks/%(task_id)s' % {'app_id': app_id, 'job_id': job_id, 'task_id': task_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def task_counters(self, job_id, task_id):
app_id = job_id.replace('job', 'application')
job_id = job_id.replace('application', 'job')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks/%(task_id)s/counters' % {'app_id': app_id, 'job_id': job_id, 'task_id': task_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def task_attempts(self, job_id, task_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks/%(task_id)s/attempts' % {'app_id': app_id, 'job_id': job_id, 'task_id': task_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def task_attempt(self, job_id, task_id, attempt_id):
app_id = job_id.replace('job', 'application')
job_id = job_id.replace('application', 'job')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks/%(task_id)s/attempts/%(attempt_id)s' % {'app_id': app_id, 'job_id': job_id, 'task_id': task_id, 'attempt_id': attempt_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def kill(self, job_id):
app_id = job_id.replace('job', 'application')
get_resource_manager(self._user).kill(app_id) # We need to call the RM
| #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import posixpath
import threading
from desktop.conf import DEFAULT_USER
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import HttpClient
from desktop.lib.rest.resource import Resource
from hadoop import cluster
from hadoop.yarn.resource_manager_api import get_resource_manager
LOG = logging.getLogger(__name__)
_API_VERSION = 'v1'
_JSON_CONTENT_TYPE = 'application/json'
API_CACHE = None
API_CACHE_LOCK = threading.Lock()
def get_mapreduce_api(username):
global API_CACHE
if API_CACHE is None:
API_CACHE_LOCK.acquire()
try:
if API_CACHE is None:
yarn_cluster = cluster.get_cluster_conf_for_job_submission()
if yarn_cluster is None:
raise PopupException(_('No Resource Manager are available.'))
API_CACHE = MapreduceApi(username, yarn_cluster.PROXY_API_URL.get(), yarn_cluster.SECURITY_ENABLED.get(), yarn_cluster.SSL_CERT_CA_VERIFY.get())
finally:
API_CACHE_LOCK.release()
API_CACHE.setuser(username) # Set the correct user
return API_CACHE
class MapreduceApi(object):
def __init__(self, username, mr_url, security_enabled=False, ssl_cert_ca_verify=False):
self._user = username
self._url = posixpath.join(mr_url, 'proxy')
self._client = HttpClient(self._url, logger=LOG)
self._root = Resource(self._client)
self._security_enabled = security_enabled
self._thread_local = threading.local() # To store user info
if self._security_enabled:
self._client.set_kerberos_auth()
self._client.set_verify(ssl_cert_ca_verify)
def __str__(self):
return "MapreduceApi at %s" % (self._url,)
def _get_params(self):
params = {}
if self.username != DEFAULT_USER.get(): # We impersonate if needed
params['doAs'] = self.username
if not self._security_enabled:
params['user.name'] = DEFAULT_USER.get()
return params
@property
def url(self):
return self._url
@property
def username(self):
try:
return self._thread_local.user
except AttributeError:
return DEFAULT_USER.get()
def setuser(self, user):
curr = self._user
self._thread_local.user = user
return curr
def job(self, user, job_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def counters(self, job_id):
app_id = job_id.replace('job', 'application')
response = self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/counters' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
# If it hits the job history server, it will return HTML.
# Simply return None in this case because there isn't much data there.
if isinstance(response, basestring):
return None
else:
return response
def tasks(self, job_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def job_attempts(self, job_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/jobattempts' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def conf(self, job_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/conf' % {'app_id': app_id, 'job_id': job_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def task(self, job_id, task_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks/%(task_id)s' % {'app_id': app_id, 'job_id': job_id, 'task_id': task_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def task_counters(self, job_id, task_id):
app_id = job_id.replace('job', 'application')
job_id = job_id.replace('application', 'job')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks/%(task_id)s/counters' % {'app_id': app_id, 'job_id': job_id, 'task_id': task_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def task_attempts(self, job_id, task_id):
app_id = job_id.replace('job', 'application')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks/%(task_id)s/attempts' % {'app_id': app_id, 'job_id': job_id, 'task_id': task_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def task_attempt(self, job_id, task_id, attempt_id):
app_id = job_id.replace('job', 'application')
job_id = job_id.replace('application', 'job')
return self._root.get('%(app_id)s/ws/%(version)s/mapreduce/jobs/%(job_id)s/tasks/%(task_id)s/attempts/%(attempt_id)s' % {'app_id': app_id, 'job_id': job_id, 'task_id': task_id, 'attempt_id': attempt_id, 'version': _API_VERSION}, params=self._get_params(), headers={'Accept': _JSON_CONTENT_TYPE})
def kill(self, job_id):
app_id = job_id.replace('job', 'application')
get_resource_manager(self._user).kill(app_id) # We need to call the RM
|
import argparse
import binascii
import collections
import re
import sys
import webbrowser
import wcwidth
from .main import available_fonts_for_codepoint
from .preview_server import FontPreviewServer
__RE_GET_NAME__ = re.compile(r'"\.?([^"]+)"')
__RE_UNICODE_HEX__ = re.compile(r'^U\+[0-9a-fA-F]{4,6}$')
def parser_arg():
from . import __version__
parser = argparse.ArgumentParser(
prog="which_fonts_support",
description='Find which fonts support specified character',
epilog='Github: https://github.com/7sDream/which_fonts_support',
)
parser.add_argument(
'char', default='',
help='the character, if you want to check character not in BMP, ' +
'use U+XXXX or U+XXXXXX format.'
)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument(
'-f', '--fc-list', type=str, default='fc-list', metavar='PATH',
help='provide custom fc-list executable file path',
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help='show each style full name',
)
parser.add_argument(
'-p', '--preview', action='store_true',
help='show font preview for the char in browser',
)
return parser.parse_args()
def get_char_codepoint(c):
assert len(c) == 1
codepoint = ord(c)
return {
'decimal': codepoint,
'hex': hex(codepoint)[2:].rjust(6, '0'),
'utf8': binascii.hexlify(c.encode('utf8')).decode("ascii"),
}
def cli():
args = parser_arg()
if __RE_UNICODE_HEX__.match(args.char):
args.char = chr(int(args.char[2:], 16))
if len(args.char) == 0:
args.char = input('Input one character: ')
if len(args.char) != 1:
sys.stderr.write('Please provide ONE character')
exit(1)
cp = get_char_codepoint(args.char)
codepoint = cp['decimal']
codepoint_hex_str = cp['hex']
codepoint_utf8_seq = cp['utf8']
fullname_to_family_map = {
fullname: family for family, fullname in available_fonts_for_codepoint(codepoint, args.fc_list)
}
family_to_fullname_list_map = collections.defaultdict(list)
for fullname, family in fullname_to_family_map.items():
family_to_fullname_list_map[family].append(fullname)
if len(fullname_to_family_map) == 0:
print("No fonts support this character")
exit(0)
family_style_counts = collections.Counter(fullname_to_family_map.values())
families = sorted(family_style_counts)
max_width = max(map(wcwidth.wcswidth, families)) if not args.verbose else 0
print(
f'Font(s) support the char [ {args.char} ]' +
f'({codepoint}, U+{codepoint_hex_str}, {codepoint_utf8_seq}):'
)
font_preview_server = FontPreviewServer(args.char)
for family in families:
style_count = family_style_counts[family]
print(
family,
' ' * (max_width - wcwidth.wcswidth(family))
if not args.verbose else '',
f' with {style_count} style{'s' if style_count > 1 else ''}'
if not args.verbose else '',
sep=''
)
if style_count > 1 and args.verbose:
for fullname in sorted(family_to_fullname_list_map[family]):
print(' ' * 4, fullname, sep='')
font_preview_server.add_font(family)
if args.preview:
font_preview_server.start()
print('-' * 80)
print("Opening your browser for preview...")
webbrowser.open("http://localhost:" + str(font_preview_server.port) + '/')
input("Press Enter when you finish preview...")
font_preview_server.stop()
if __name__ == '__main__':
cli()
| import argparse
import binascii
import collections
import re
import sys
import webbrowser
import wcwidth
from .main import available_fonts_for_codepoint
from .preview_server import FontPreviewServer
__RE_GET_NAME__ = re.compile(r'"\.?([^"]+)"')
__RE_UNICODE_HEX__ = re.compile(r'^U\+[0-9a-fA-F]{4,6}$')
def parser_arg():
from . import __version__
parser = argparse.ArgumentParser(
prog="which_fonts_support",
description='Find which fonts support specified character',
epilog='Github: https://github.com/7sDream/which_fonts_support',
)
parser.add_argument(
'char', default='',
help='the character, if you want to check character not in BMP, ' +
'use U+XXXX or U+XXXXXX format.'
)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument(
'-f', '--fc-list', type=str, default='fc-list', metavar='PATH',
help='provide custom fc-list executable file path',
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help='show each style full name',
)
parser.add_argument(
'-p', '--preview', action='store_true',
help='show font preview for the char in browser',
)
return parser.parse_args()
def get_char_codepoint(c):
assert len(c) == 1
codepoint = ord(c)
return {
'decimal': codepoint,
'hex': hex(codepoint)[2:].rjust(6, '0'),
'utf8': binascii.hexlify(c.encode('utf8')).decode("ascii"),
}
def cli():
args = parser_arg()
if __RE_UNICODE_HEX__.match(args.char):
args.char = chr(int(args.char[2:], 16))
if len(args.char) == 0:
args.char = input('Input one character: ')
if len(args.char) != 1:
sys.stderr.write('Please provide ONE character')
exit(1)
cp = get_char_codepoint(args.char)
codepoint = cp['decimal']
codepoint_hex_str = cp['hex']
codepoint_utf8_seq = cp['utf8']
fullname_to_family_map = {
fullname: family for family, fullname in available_fonts_for_codepoint(codepoint, args.fc_list)
}
family_to_fullname_list_map = collections.defaultdict(list)
for fullname, family in fullname_to_family_map.items():
family_to_fullname_list_map[family].append(fullname)
if len(fullname_to_family_map) == 0:
print("No fonts support this character")
exit(0)
family_style_counts = collections.Counter(fullname_to_family_map.values())
families = sorted(family_style_counts)
max_width = max(map(wcwidth.wcswidth, families)) if not args.verbose else 0
print(
f'Font(s) support the char [ {args.char} ]' +
f'({codepoint}, U+{codepoint_hex_str}, {codepoint_utf8_seq}):'
)
font_preview_server = FontPreviewServer(args.char)
for family in families:
style_count = family_style_counts[family]
print(
family,
' ' * (max_width - wcwidth.wcswidth(family))
if not args.verbose else '',
f' with {style_count} style{"s" if style_count > 1 else ""}'
if not args.verbose else '',
sep=''
)
if style_count > 1 and args.verbose:
for fullname in sorted(family_to_fullname_list_map[family]):
print(' ' * 4, fullname, sep='')
font_preview_server.add_font(family)
if args.preview:
font_preview_server.start()
print('-' * 80)
print("Opening your browser for preview...")
webbrowser.open("http://localhost:" + str(font_preview_server.port) + '/')
input("Press Enter when you finish preview...")
font_preview_server.stop()
if __name__ == '__main__':
cli()
|
import logging
import uuid
from datetime import datetime
import architect
from django.contrib.auth.models import AnonymousUser, User
from django.db import models
from django.utils.functional import cached_property
from dimagi.utils.web import get_ip
from corehq.apps.domain.utils import get_domain_from_url
from corehq.util.models import ForeignValue, NullJsonField, foreign_value_init
log = logging.getLogger(__name__)
def make_uuid():
return uuid.uuid4().hex
def getdate():
return datetime.utcnow()
STANDARD_HEADER_KEYS = [
'X_FORWARDED_FOR',
'X_FORWARDED_HOST',
'X_FORWARDED_SERVER',
'VIA',
'HTTP_REFERER',
'REQUEST_METHOD',
'QUERY_STRING',
'HTTP_ACCEPT_CHARSET',
'HTTP_CONNECTION',
'HTTP_COOKIE',
'SERVER_NAME',
'SERVER_PORT',
'HTTP_ACCEPT',
'REMOTE_ADDR',
'HTTP_ACCEPT_LANGUAGE',
'CONTENT_TYPE',
'HTTP_ACCEPT_ENCODING',
# settings.AUDIT_TRACE_ID_HEADER (django-ified) will be added here
]
class UserAgent(models.Model):
value = models.CharField(max_length=255, db_index=True, unique=True)
class HttpAccept(models.Model):
value = models.CharField(max_length=255, db_index=True, unique=True)
class ViewName(models.Model):
value = models.CharField(max_length=255, db_index=True, unique=True)
class AuditEvent(models.Model):
id = models.BigAutoField(primary_key=True)
user = models.CharField(max_length=255, null=True, blank=True)
domain = models.CharField(max_length=126, null=True, blank=True)
event_date = models.DateTimeField(default=getdate, db_index=True)
path = models.CharField(max_length=255, blank=True, default='')
ip_address = models.CharField(max_length=45, blank=True, default='')
session_key = models.CharField(max_length=255, blank=True, null=True)
user_agent_fk = models.ForeignKey(
UserAgent, null=True, db_index=False, on_delete=models.PROTECT)
user_agent = ForeignValue(user_agent_fk, truncate=True)
@property
def doc_type(self):
return type(self).__name__
@property
def description(self):
raise NotImplementedError("abstract property")
class Meta:
abstract = True
index_together = [
("user", "event_date"),
("domain", "event_date"),
]
def __str__(self):
return "[%s] %s" % (self.doc_type, self.description)
@classmethod
def create_audit(cls, request, user):
audit = cls()
audit.domain = get_domain(request)
audit.path = request.path[:255]
audit.ip_address = get_ip(request)
audit.session_key = request.session.session_key
audit.user_agent = request.META.get('HTTP_USER_AGENT')
if isinstance(user, AnonymousUser):
audit.user = None
elif user is None:
audit.user = None
elif isinstance(user, User):
audit.user = user.username
else:
audit.user = user
return audit
@architect.install('partition', type='range', subtype='date', constraint='month', column='event_date')
@foreign_value_init
class NavigationEventAudit(AuditEvent):
"""
Audit event to track happenings within the system, ie, view access
"""
params = models.CharField(max_length=512, blank=True, default='')
view_fk = models.ForeignKey(
ViewName, null=True, db_index=False, on_delete=models.PROTECT)
view = ForeignValue(view_fk, truncate=True)
view_kwargs = NullJsonField(default=dict)
headers = NullJsonField(default=dict)
status_code = models.SmallIntegerField(default=0)
@property
def description(self):
return self.user or ""
@cached_property
def request_path(self):
return f"{self.path}?{self.params}"
@classmethod
def audit_view(cls, request, user, view_func, view_kwargs):
try:
audit = cls.create_audit(request, user)
if request.GET:
audit.params = request.META.get("QUERY_STRING", "")[:512]
audit.view = "%s.%s" % (view_func.__module__, view_func.__name__)
for k in STANDARD_HEADER_KEYS:
header_item = request.META.get(k, None)
if header_item is not None:
audit.headers[k] = header_item
# it's a bit verbose to go to that extreme, TODO: need to have
# targeted fields in the META, but due to server differences, it's
# hard to make it universal.
audit.view_kwargs = view_kwargs
return audit
except Exception:
log.exception("NavigationEventAudit.audit_view error")
ACCESS_LOGIN = 'i'
ACCESS_LOGOUT = 'o'
ACCESS_FAILED = 'f'
ACCESS_CHOICES = {
ACCESS_LOGIN: "Login",
ACCESS_LOGOUT: "Logout",
ACCESS_FAILED: "Login failed",
}
@architect.install('partition', type='range', subtype='date', constraint='month', column='event_date')
@foreign_value_init
class AccessAudit(AuditEvent):
access_type = models.CharField(max_length=1, choices=ACCESS_CHOICES.items())
http_accept_fk = models.ForeignKey(
HttpAccept, null=True, db_index=False, on_delete=models.PROTECT)
http_accept = ForeignValue(http_accept_fk, truncate=True)
trace_id = models.CharField(max_length=127, null=True, blank=True)
# Optional (django-ified) settings.AUDIT_TRACE_ID_HEADER set by AuditcareConfig
trace_id_header = None
@property
def description(self):
return f"{ACCESS_CHOICES[self.access_type]}: {self.user or ""}"
@classmethod
def create_audit(cls, request, user, access_type):
'''Creates an instance of a Access log.'''
audit = super().create_audit(request, user)
audit.http_accept = request.META.get('HTTP_ACCEPT')
audit.access_type = access_type
if cls.trace_id_header is not None:
audit.trace_id = request.META.get(cls.trace_id_header)
return audit
@classmethod
def audit_login(cls, request, user, *args, **kwargs):
audit = cls.create_audit(request, user, ACCESS_LOGIN)
audit.save()
@classmethod
def audit_login_failed(cls, request, username, *args, **kwargs):
audit = cls.create_audit(request, username, ACCESS_FAILED)
audit.save()
@classmethod
def audit_logout(cls, request, user):
audit = cls.create_audit(request, user, ACCESS_LOGOUT)
audit.save()
def audit_login(sender, *, request, user, **kwargs):
AccessAudit.audit_login(request, user) # success
def audit_logout(sender, *, request, user, **kwargs):
AccessAudit.audit_logout(request, user)
def audit_login_failed(sender, *, request, credentials, **kwargs):
AccessAudit.audit_login_failed(request, credentials["username"])
def get_domain(request):
domain = get_domain_from_url(request.path)
domain2 = getattr(request, "domain", None)
if domain2:
if not domain:
domain = domain2
elif domain != domain2:
log.error("domain mismatch for request %s: %r != %r",
request.path, domain, domain2)
return domain
| import logging
import uuid
from datetime import datetime
import architect
from django.contrib.auth.models import AnonymousUser, User
from django.db import models
from django.utils.functional import cached_property
from dimagi.utils.web import get_ip
from corehq.apps.domain.utils import get_domain_from_url
from corehq.util.models import ForeignValue, NullJsonField, foreign_value_init
log = logging.getLogger(__name__)
def make_uuid():
return uuid.uuid4().hex
def getdate():
return datetime.utcnow()
STANDARD_HEADER_KEYS = [
'X_FORWARDED_FOR',
'X_FORWARDED_HOST',
'X_FORWARDED_SERVER',
'VIA',
'HTTP_REFERER',
'REQUEST_METHOD',
'QUERY_STRING',
'HTTP_ACCEPT_CHARSET',
'HTTP_CONNECTION',
'HTTP_COOKIE',
'SERVER_NAME',
'SERVER_PORT',
'HTTP_ACCEPT',
'REMOTE_ADDR',
'HTTP_ACCEPT_LANGUAGE',
'CONTENT_TYPE',
'HTTP_ACCEPT_ENCODING',
# settings.AUDIT_TRACE_ID_HEADER (django-ified) will be added here
]
class UserAgent(models.Model):
value = models.CharField(max_length=255, db_index=True, unique=True)
class HttpAccept(models.Model):
value = models.CharField(max_length=255, db_index=True, unique=True)
class ViewName(models.Model):
value = models.CharField(max_length=255, db_index=True, unique=True)
class AuditEvent(models.Model):
id = models.BigAutoField(primary_key=True)
user = models.CharField(max_length=255, null=True, blank=True)
domain = models.CharField(max_length=126, null=True, blank=True)
event_date = models.DateTimeField(default=getdate, db_index=True)
path = models.CharField(max_length=255, blank=True, default='')
ip_address = models.CharField(max_length=45, blank=True, default='')
session_key = models.CharField(max_length=255, blank=True, null=True)
user_agent_fk = models.ForeignKey(
UserAgent, null=True, db_index=False, on_delete=models.PROTECT)
user_agent = ForeignValue(user_agent_fk, truncate=True)
@property
def doc_type(self):
return type(self).__name__
@property
def description(self):
raise NotImplementedError("abstract property")
class Meta:
abstract = True
index_together = [
("user", "event_date"),
("domain", "event_date"),
]
def __str__(self):
return "[%s] %s" % (self.doc_type, self.description)
@classmethod
def create_audit(cls, request, user):
audit = cls()
audit.domain = get_domain(request)
audit.path = request.path[:255]
audit.ip_address = get_ip(request)
audit.session_key = request.session.session_key
audit.user_agent = request.META.get('HTTP_USER_AGENT')
if isinstance(user, AnonymousUser):
audit.user = None
elif user is None:
audit.user = None
elif isinstance(user, User):
audit.user = user.username
else:
audit.user = user
return audit
@architect.install('partition', type='range', subtype='date', constraint='month', column='event_date')
@foreign_value_init
class NavigationEventAudit(AuditEvent):
"""
Audit event to track happenings within the system, ie, view access
"""
params = models.CharField(max_length=512, blank=True, default='')
view_fk = models.ForeignKey(
ViewName, null=True, db_index=False, on_delete=models.PROTECT)
view = ForeignValue(view_fk, truncate=True)
view_kwargs = NullJsonField(default=dict)
headers = NullJsonField(default=dict)
status_code = models.SmallIntegerField(default=0)
@property
def description(self):
return self.user or ""
@cached_property
def request_path(self):
return f"{self.path}?{self.params}"
@classmethod
def audit_view(cls, request, user, view_func, view_kwargs):
try:
audit = cls.create_audit(request, user)
if request.GET:
audit.params = request.META.get("QUERY_STRING", "")[:512]
audit.view = "%s.%s" % (view_func.__module__, view_func.__name__)
for k in STANDARD_HEADER_KEYS:
header_item = request.META.get(k, None)
if header_item is not None:
audit.headers[k] = header_item
# it's a bit verbose to go to that extreme, TODO: need to have
# targeted fields in the META, but due to server differences, it's
# hard to make it universal.
audit.view_kwargs = view_kwargs
return audit
except Exception:
log.exception("NavigationEventAudit.audit_view error")
ACCESS_LOGIN = 'i'
ACCESS_LOGOUT = 'o'
ACCESS_FAILED = 'f'
ACCESS_CHOICES = {
ACCESS_LOGIN: "Login",
ACCESS_LOGOUT: "Logout",
ACCESS_FAILED: "Login failed",
}
@architect.install('partition', type='range', subtype='date', constraint='month', column='event_date')
@foreign_value_init
class AccessAudit(AuditEvent):
access_type = models.CharField(max_length=1, choices=ACCESS_CHOICES.items())
http_accept_fk = models.ForeignKey(
HttpAccept, null=True, db_index=False, on_delete=models.PROTECT)
http_accept = ForeignValue(http_accept_fk, truncate=True)
trace_id = models.CharField(max_length=127, null=True, blank=True)
# Optional (django-ified) settings.AUDIT_TRACE_ID_HEADER set by AuditcareConfig
trace_id_header = None
@property
def description(self):
return f"{ACCESS_CHOICES[self.access_type]}: {self.user or ''}"
@classmethod
def create_audit(cls, request, user, access_type):
'''Creates an instance of a Access log.'''
audit = super().create_audit(request, user)
audit.http_accept = request.META.get('HTTP_ACCEPT')
audit.access_type = access_type
if cls.trace_id_header is not None:
audit.trace_id = request.META.get(cls.trace_id_header)
return audit
@classmethod
def audit_login(cls, request, user, *args, **kwargs):
audit = cls.create_audit(request, user, ACCESS_LOGIN)
audit.save()
@classmethod
def audit_login_failed(cls, request, username, *args, **kwargs):
audit = cls.create_audit(request, username, ACCESS_FAILED)
audit.save()
@classmethod
def audit_logout(cls, request, user):
audit = cls.create_audit(request, user, ACCESS_LOGOUT)
audit.save()
def audit_login(sender, *, request, user, **kwargs):
AccessAudit.audit_login(request, user) # success
def audit_logout(sender, *, request, user, **kwargs):
AccessAudit.audit_logout(request, user)
def audit_login_failed(sender, *, request, credentials, **kwargs):
AccessAudit.audit_login_failed(request, credentials["username"])
def get_domain(request):
domain = get_domain_from_url(request.path)
domain2 = getattr(request, "domain", None)
if domain2:
if not domain:
domain = domain2
elif domain != domain2:
log.error("domain mismatch for request %s: %r != %r",
request.path, domain, domain2)
return domain
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import re
from setuptools import setup, find_packages
from distutils.core import Command
class DepsCommand(Command):
"""A custom distutils command to print selective dependency groups.
# show available dependency groups:
python setup.py -q deps
# print dependency list for specified groups
python setup.py -q deps --dep-groups=core,vision
# see all options:
python setup.py -q deps --help
"""
description = 'show dependency groups and their packages'
user_options = [
# format: (long option, short option, description).
('dep-groups=', None, 'comma separated dependency groups'),
('dep-quote', None, 'quote each dependency'),
('dep-conda', None, 'adjust output for conda'),
]
def initialize_options(self):
"""Set default values for options."""
self.dep_groups = ''
self.dep_quote = False
self.dep_conda = False
def finalize_options(self):
"""Post-process options."""
pass
def parse(self):
arg = self.dep_groups.strip()
return re.split(r' *, *', arg) if len(arg) else []
def run(self):
"""Run command."""
wanted_groups = self.parse()
deps = []
invalid_groups = []
for grp in wanted_groups:
if grp in dep_groups: deps.extend(dep_groups[grp])
else: invalid_groups.append(grp)
if invalid_groups or not wanted_groups:
print("Available dependency groups:", ", ".join(sorted(dep_groups.keys())))
if invalid_groups:
print(f"Error: Invalid group name(s): {", ".join(invalid_groups)}")
exit(1)
else:
# prepare for shell word splitting (no whitespace in items)
deps = [re.sub(" ", "", x, 0) for x in sorted(set(deps))]
if self.dep_conda:
for i in range(len(deps)):
# strip pip-specific syntax
deps[i] = re.sub(r';.*', '', deps[i])
# rename mismatching package names
deps[i] = re.sub(r'^torch>', 'pytorch>', deps[i])
if self.dep_quote:
# for manual copy-n-paste (assuming no " in vars)
print(" ".join(map(lambda x: f'"{x}"', deps)))
else:
# if fed directly to `pip install` via backticks/$() don't quote
print(" ".join(deps))
# note: version is maintained inside fastai/version.py
exec(open('fastai/version.py').read())
with open('README.md') as readme_file: readme = readme_file.read()
# helper functions to make it easier to list dependencies not as a python list, but vertically w/ optional built-in comments to why a certain version of the dependency is listed
def cleanup(x): return re.sub(r' *#.*', '', x.strip()) # comments
def to_list(buffer): return list(filter(None, map(cleanup, buffer.splitlines())))
### normal dependencies ###
#
# these get resolved and installed via either of these two:
#
# pip install fastai
# pip install -e .
#
# IMPORTANT: when updating these, please make sure to sync conda/meta.yaml
dep_groups = {
'core': to_list("""
bottleneck # performance-improvement for numpy
dataclasses ; python_version<'3.7'
fastprogress>=0.2.1
beautifulsoup4
matplotlib
numexpr # performance-improvement for numpy
numpy>=1.15
nvidia-ml-py3
pandas
packaging
Pillow
pyyaml
pynvx>=1.0.0 ; platform_system=="Darwin" # only pypi at the moment
requests
scipy
torch>=1.0.0
"""),
'text': to_list("""
spacy>=2.0.18
"""),
'vision': to_list("""
torchvision
"""),
}
requirements = [y for x in dep_groups.values() for y in x]
### developer dependencies ###
#
# anything else that's not required by a user to run the library, but
# either is an enhancement or a developer-build requirement goes here.
#
# the [dev] feature is documented here:
# https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
#
# these, including the normal dependencies, get installed with:
#
# pip install "fastai[dev]"
#
# or via an editable install:
#
# pip install -e ".[dev]"
#
# some of the listed modules appear in test_requirements as well, as explained below.
#
dev_requirements = { 'dev' : to_list("""
coverage # make coverage
distro
ipython
jupyter
jupyter_contrib_nbextensions
nbconvert>=5.4
nbdime # help with nb diff/merge
nbformat
notebook>=5.7.0
pip>=9.0.1
pipreqs>=0.4.9
pytest>=4.4.0
pytest-xdist # make test-fast (faster parallel testing)
responses # for requests testing
traitlets
wheel>=0.30.0
""") }
### setup dependencies ###
# need at least setuptools>=36.2 to support syntax:
# dataclasses ; python_version<'3.7'
setup_requirements = to_list("""
pytest-runner
setuptools>=36.2
""")
# notes:
#
# * these deps will be installed locally under .eggs/ and will not be
# visible to pytest unless it's invoked via `python setup test`.
# Therefore it's the best to install them explicitly with:
# pip install -e .[dev]
#
### test dependencies ###
test_requirements = to_list("""
pytest
""")
# list of classifiers: https://pypi.org/pypi?%3Aaction=list_classifiers
setup(
cmdclass = { 'deps': DepsCommand },
name = 'fastai',
version = __version__,
packages = find_packages(),
include_package_data = True,
install_requires = requirements,
setup_requires = setup_requirements,
extras_require = dev_requirements,
tests_require = test_requirements,
python_requires = '>=3.6',
test_suite = 'tests',
description = "fastai makes deep learning with PyTorch faster, more accurate, and easier",
long_description = readme,
long_description_content_type = 'text/markdown',
keywords = 'fastai, deep learning, machine learning',
license = "Apache Software License 2.0",
url = 'https://github.com/fastai/fastai',
author = "Jeremy Howard",
author_email = 'info@fast.ai',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
zip_safe = False,
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import re
from setuptools import setup, find_packages
from distutils.core import Command
class DepsCommand(Command):
"""A custom distutils command to print selective dependency groups.
# show available dependency groups:
python setup.py -q deps
# print dependency list for specified groups
python setup.py -q deps --dep-groups=core,vision
# see all options:
python setup.py -q deps --help
"""
description = 'show dependency groups and their packages'
user_options = [
# format: (long option, short option, description).
('dep-groups=', None, 'comma separated dependency groups'),
('dep-quote', None, 'quote each dependency'),
('dep-conda', None, 'adjust output for conda'),
]
def initialize_options(self):
"""Set default values for options."""
self.dep_groups = ''
self.dep_quote = False
self.dep_conda = False
def finalize_options(self):
"""Post-process options."""
pass
def parse(self):
arg = self.dep_groups.strip()
return re.split(r' *, *', arg) if len(arg) else []
def run(self):
"""Run command."""
wanted_groups = self.parse()
deps = []
invalid_groups = []
for grp in wanted_groups:
if grp in dep_groups: deps.extend(dep_groups[grp])
else: invalid_groups.append(grp)
if invalid_groups or not wanted_groups:
print("Available dependency groups:", ", ".join(sorted(dep_groups.keys())))
if invalid_groups:
print(f"Error: Invalid group name(s): {', '.join(invalid_groups)}")
exit(1)
else:
# prepare for shell word splitting (no whitespace in items)
deps = [re.sub(" ", "", x, 0) for x in sorted(set(deps))]
if self.dep_conda:
for i in range(len(deps)):
# strip pip-specific syntax
deps[i] = re.sub(r';.*', '', deps[i])
# rename mismatching package names
deps[i] = re.sub(r'^torch>', 'pytorch>', deps[i])
if self.dep_quote:
# for manual copy-n-paste (assuming no " in vars)
print(" ".join(map(lambda x: f'"{x}"', deps)))
else:
# if fed directly to `pip install` via backticks/$() don't quote
print(" ".join(deps))
# note: version is maintained inside fastai/version.py
exec(open('fastai/version.py').read())
with open('README.md') as readme_file: readme = readme_file.read()
# helper functions to make it easier to list dependencies not as a python list, but vertically w/ optional built-in comments to why a certain version of the dependency is listed
def cleanup(x): return re.sub(r' *#.*', '', x.strip()) # comments
def to_list(buffer): return list(filter(None, map(cleanup, buffer.splitlines())))
### normal dependencies ###
#
# these get resolved and installed via either of these two:
#
# pip install fastai
# pip install -e .
#
# IMPORTANT: when updating these, please make sure to sync conda/meta.yaml
dep_groups = {
'core': to_list("""
bottleneck # performance-improvement for numpy
dataclasses ; python_version<'3.7'
fastprogress>=0.2.1
beautifulsoup4
matplotlib
numexpr # performance-improvement for numpy
numpy>=1.15
nvidia-ml-py3
pandas
packaging
Pillow
pyyaml
pynvx>=1.0.0 ; platform_system=="Darwin" # only pypi at the moment
requests
scipy
torch>=1.0.0
"""),
'text': to_list("""
spacy>=2.0.18
"""),
'vision': to_list("""
torchvision
"""),
}
requirements = [y for x in dep_groups.values() for y in x]
### developer dependencies ###
#
# anything else that's not required by a user to run the library, but
# either is an enhancement or a developer-build requirement goes here.
#
# the [dev] feature is documented here:
# https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
#
# these, including the normal dependencies, get installed with:
#
# pip install "fastai[dev]"
#
# or via an editable install:
#
# pip install -e ".[dev]"
#
# some of the listed modules appear in test_requirements as well, as explained below.
#
dev_requirements = { 'dev' : to_list("""
coverage # make coverage
distro
ipython
jupyter
jupyter_contrib_nbextensions
nbconvert>=5.4
nbdime # help with nb diff/merge
nbformat
notebook>=5.7.0
pip>=9.0.1
pipreqs>=0.4.9
pytest>=4.4.0
pytest-xdist # make test-fast (faster parallel testing)
responses # for requests testing
traitlets
wheel>=0.30.0
""") }
### setup dependencies ###
# need at least setuptools>=36.2 to support syntax:
# dataclasses ; python_version<'3.7'
setup_requirements = to_list("""
pytest-runner
setuptools>=36.2
""")
# notes:
#
# * these deps will be installed locally under .eggs/ and will not be
# visible to pytest unless it's invoked via `python setup test`.
# Therefore it's the best to install them explicitly with:
# pip install -e .[dev]
#
### test dependencies ###
test_requirements = to_list("""
pytest
""")
# list of classifiers: https://pypi.org/pypi?%3Aaction=list_classifiers
setup(
cmdclass = { 'deps': DepsCommand },
name = 'fastai',
version = __version__,
packages = find_packages(),
include_package_data = True,
install_requires = requirements,
setup_requires = setup_requirements,
extras_require = dev_requirements,
tests_require = test_requirements,
python_requires = '>=3.6',
test_suite = 'tests',
description = "fastai makes deep learning with PyTorch faster, more accurate, and easier",
long_description = readme,
long_description_content_type = 'text/markdown',
keywords = 'fastai, deep learning, machine learning',
license = "Apache Software License 2.0",
url = 'https://github.com/fastai/fastai',
author = "Jeremy Howard",
author_email = 'info@fast.ai',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
zip_safe = False,
)
|
import re
import yaml
from . import gcloud
from .cluster_config import ClusterConfig
DEFAULT_PROPERTIES = {
"spark:spark.task.maxFailures": "20",
"spark:spark.driver.extraJavaOptions": "-Xss4M",
"spark:spark.executor.extraJavaOptions": "-Xss4M",
'spark:spark.speculation': 'true',
"hdfs:dfs.replication": "1",
'dataproc:dataproc.logging.stackdriver.enable': 'false',
'dataproc:dataproc.monitoring.stackdriver.enable': 'false'
}
# leadre (master) machine type to memory map, used for setting
# spark.driver.memory property
MACHINE_MEM = {
'n1-standard-1': 3.75,
'n1-standard-2': 7.5,
'n1-standard-4': 15,
'n1-standard-8': 30,
'n1-standard-16': 60,
'n1-standard-32': 120,
'n1-standard-64': 240,
'n1-highmem-2': 13,
'n1-highmem-4': 26,
'n1-highmem-8': 52,
'n1-highmem-16': 104,
'n1-highmem-32': 208,
'n1-highmem-64': 416,
'n1-highcpu-2': 1.8,
'n1-highcpu-4': 3.6,
'n1-highcpu-8': 7.2,
'n1-highcpu-16': 14.4,
'n1-highcpu-32': 28.8,
'n1-highcpu-64': 57.6,
'n2-standard-2': 8,
'n2-standard-4': 16,
'n2-standard-8': 32,
'n2-standard-16': 64,
'n2-standard-32': 128,
'n2-standard-48': 192,
'n2-standard-64': 256,
'n2-standard-80': 320,
'n2-highmem-2': 16,
'n2-highmem-4': 32,
'n2-highmem-8': 64,
'n2-highmem-16': 128,
'n2-highmem-32': 256,
'n2-highmem-48': 384,
'n2-highmem-64': 512,
'n2-highmem-80': 640,
'n2-highcpu-2': 2,
'n2-highcpu-4': 4,
'n2-highcpu-8': 8,
'n2-highcpu-16': 16,
'n2-highcpu-32': 32,
'n2-highcpu-48': 48,
'n2-highcpu-64': 64,
'n2-highcpu-80': 80,
'n2d-standard-2': 8,
'n2d-standard-4': 16,
'n2d-standard-8': 32,
'n2d-standard-16': 64,
'n2d-standard-32': 128,
'n2d-standard-48': 192,
'n2d-standard-64': 256,
'n2d-standard-80': 320,
'n2d-standard-96': 384,
'n2d-standard-128': 512,
'n2d-standard-224': 896,
'n2d-highmem-2': 16,
'n2d-highmem-4': 32,
'n2d-highmem-8': 64,
'n2d-highmem-16': 128,
'n2d-highmem-32': 256,
'n2d-highmem-48': 384,
'n2d-highmem-64': 512,
'n2d-highmem-80': 640,
'n2d-highmem-96': 786,
'n2d-highcpu-2': 2,
'n2d-highcpu-4': 4,
'n2d-highcpu-8': 8,
'n2d-highcpu-16': 16,
'n2d-highcpu-32': 32,
'n2d-highcpu-48': 48,
'n2d-highcpu-64': 64,
'n2d-highcpu-80': 80,
'n2d-highcpu-96': 96,
'n2d-highcpu-128': 128,
'n2d-highcpu-224': 224,
'e2-standard-2': 8,
'e2-standard-4': 16,
'e2-standard-8': 32,
'e2-standard-16': 64,
'e2-highmem-2': 16,
'e2-highmem-4': 32,
'e2-highmem-8': 64,
'e2-highmem-16': 128,
'e2-highcpu-2': 2,
'e2-highcpu-4': 4,
'e2-highcpu-8': 8,
'e2-highcpu-16': 16,
'm1-ultramem-40': 961,
'm1-ultramem-80': 1922,
'm1-ultramem-160': 3844,
'm1-megamem-96': 1433,
'm2-ultramem-2084': 5888,
'm2-ultramem-4164': 11776,
'c2-standard-4': 16,
'c2-standard-8': 32,
'c2-standard-16': 64,
'c2-standard-30': 120,
'c2-standard-60': 240,
}
REGION_TO_REPLICATE_MAPPING = {
'us-central1': 'us',
'us-east1': 'us',
'us-east4': 'us',
'us-west1': 'us',
'us-west2': 'us',
'us-west3': 'us',
# Europe != EU
'europe-north1': 'eu',
'europe-west1': 'eu',
'europe-west2': 'uk',
'europe-west3': 'eu',
'europe-west4': 'eu',
'australia-southeast1': 'aus-sydney'
}
ANNOTATION_DB_BUCKETS = ["hail-datasets-us", "hail-datasets-eu"]
IMAGE_VERSION = '2.0.29-debian10'
def init_parser(parser):
parser.add_argument('name', type=str, help='Cluster name.')
# arguments with default parameters
parser.add_argument('--master-machine-type', '--master', '-m', default='n1-highmem-8', type=str,
help='Master machine type (default: %(default)s).')
parser.add_argument('--master-memory-fraction', default=0.8, type=float,
help='Fraction of master memory allocated to the JVM. '
'Use a smaller value to reserve more memory '
'for Python. (default: %(default)s)')
parser.add_argument('--master-boot-disk-size', default=100, type=int,
help='Disk size of master machine, in GB (default: %(default)s).')
parser.add_argument('--num-master-local-ssds', default=0, type=int,
help='Number of local SSDs to attach to the master machine (default: %(default)s).')
parser.add_argument('--num-secondary-workers', '--num-preemptible-workers', '--n-pre-workers', '-p', default=0, type=int,
help='Number of secondary (preemptible) worker machines (default: %(default)s).')
parser.add_argument('--num-worker-local-ssds', default=0, type=int,
help='Number of local SSDs to attach to each worker machine (default: %(default)s).')
parser.add_argument('--num-workers', '--n-workers', '-w', default=2, type=int,
help='Number of worker machines (default: %(default)s).')
parser.add_argument('--secondary-worker-boot-disk-size', '--preemptible-worker-boot-disk-size', default=40, type=int,
help='Disk size of secondary (preemptible) worker machines, in GB (default: %(default)s).')
parser.add_argument('--worker-boot-disk-size', default=40, type=int,
help='Disk size of worker machines, in GB (default: %(default)s).')
parser.add_argument('--worker-machine-type', '--worker',
help='Worker machine type (default: n1-standard-8, or n1-highmem-8 with --vep).')
parser.add_argument('--region',
help='Compute region for the cluster.')
parser.add_argument('--zone',
help='Compute zone for the cluster.')
parser.add_argument('--properties',
help='Additional configuration properties for the cluster')
parser.add_argument('--metadata',
help='Comma-separated list of metadata to add: KEY1=VALUE1,KEY2=VALUE2...')
parser.add_argument('--packages', '--pkgs',
help='Comma-separated list of Python packages to be installed on the master node.')
parser.add_argument('--project', help='Google Cloud project to start cluster (defaults to currently set project).')
parser.add_argument('--configuration',
help='Google Cloud configuration to start cluster (defaults to currently set configuration).')
parser.add_argument('--max-idle', type=str, help='If specified, maximum idle time before shutdown (e.g. 60m).')
max_age_group = parser.add_mutually_exclusive_group()
max_age_group.add_argument('--expiration-time', type=str, help='If specified, time at which cluster is shutdown (e.g. 2020-01-01T00:00:00Z).')
max_age_group.add_argument('--max-age', type=str, help='If specified, maximum age before shutdown (e.g. 60m).')
parser.add_argument('--bucket', type=str,
help='The Google Cloud Storage bucket to use for cluster staging (just the bucket name, no gs:// prefix).')
parser.add_argument('--network', type=str, help='the network for all nodes in this cluster')
parser.add_argument('--service-account', type=str, help='The Google Service Account to use for cluster creation (default to the Compute Engine service account).')
parser.add_argument('--master-tags', type=str, help='comma-separated list of instance tags to apply to the mastern node')
parser.add_argument('--scopes', help='Specifies access scopes for the node instances')
parser.add_argument('--wheel', help='Non-default Hail installation. Warning: experimental.')
# initialization action flags
parser.add_argument('--init', default='', help='Comma-separated list of init scripts to run.')
parser.add_argument('--init_timeout', default='20m',
help='Flag to specify a timeout period for the initialization action')
parser.add_argument('--vep',
help='Install VEP for the specified reference genome.',
required=False,
choices=['GRCh37', 'GRCh38'])
parser.add_argument('--dry-run', action='store_true', help="Print gcloud dataproc command, but don't run it.")
parser.add_argument('--no-off-heap-memory', action='store_true',
help="If true, don't partition JVM memory between hail heap and JVM heap")
parser.add_argument('--big-executors', action='store_true',
help="If true, double memory allocated per executor, using half the cores of the cluster with an extra large memory allotment per core.")
parser.add_argument('--off-heap-memory-fraction', type=float, default=0.6,
help="Minimum fraction of worker memory dedicated to off-heap Hail values.")
parser.add_argument('--off-heap-memory-hard-limit', action='store_true',
help="If true, limit off-heap allocations to the dedicated fraction")
parser.add_argument('--yarn-memory-fraction', type=float,
help="Fraction of machine memory to allocate to the yarn container scheduler.",
default=0.95)
# requester pays
parser.add_argument('--requester-pays-allow-all',
help="Allow reading from all requester-pays buckets.",
action='store_true',
required=False)
parser.add_argument('--requester-pays-allow-buckets',
help="Comma-separated list of requester-pays buckets to allow reading from.")
parser.add_argument('--requester-pays-allow-annotation-db',
action='store_true',
help="Allows reading from any of the requester-pays buckets that hold data for the annotation database.")
parser.add_argument('--debug-mode',
action='store_true',
help="Enable debug features on created cluster (heap dump on out-of-memory error)")
async def main(args, pass_through_args):
import pkg_resources # pylint: disable=import-outside-toplevel
conf = ClusterConfig()
conf.extend_flag('image-version', IMAGE_VERSION)
if not pkg_resources.resource_exists('hailtop.hailctl', "deploy.yaml"):
raise RuntimeError("package has no 'deploy.yaml' file")
deploy_metadata = yaml.safe_load(
pkg_resources.resource_stream('hailtop.hailctl', "deploy.yaml"))['dataproc']
conf.extend_flag('properties', DEFAULT_PROPERTIES)
if args.properties:
conf.parse_and_extend('properties', args.properties)
if args.debug_mode:
conf.extend_flag('properties', {
"spark:spark.driver.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow",
"spark:spark.executor.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow",
})
# default to highmem machines if using VEP
if not args.worker_machine_type:
args.worker_machine_type = 'n1-highmem-8' if args.vep else 'n1-standard-8'
# default initialization script to start up cluster with
conf.extend_flag('initialization-actions',
[deploy_metadata['init_notebook.py']])
# requester pays support
if args.requester_pays_allow_all or args.requester_pays_allow_buckets or args.requester_pays_allow_annotation_db:
if args.requester_pays_allow_all and args.requester_pays_allow_buckets:
raise RuntimeError("Cannot specify both 'requester_pays_allow_all' and 'requester_pays_allow_buckets")
if args.requester_pays_allow_all:
requester_pays_mode = "AUTO"
else:
requester_pays_mode = "CUSTOM"
requester_pays_bucket_sources = []
if args.requester_pays_allow_buckets:
requester_pays_bucket_sources.append(args.requester_pays_allow_buckets)
if args.requester_pays_allow_annotation_db:
requester_pays_bucket_sources.extend(ANNOTATION_DB_BUCKETS)
conf.extend_flag("properties", {"spark:spark.hadoop.fs.gs.requester.pays.buckets": ",".join(requester_pays_bucket_sources)})
# Need to pick requester pays project.
requester_pays_project = args.project if args.project else gcloud.get_config("project")
conf.extend_flag("properties", {"spark:spark.hadoop.fs.gs.requester.pays.mode": requester_pays_mode,
"spark:spark.hadoop.fs.gs.requester.pays.project.id": requester_pays_project})
# gcloud version 277 and onwards requires you to specify a region. Let's just require it for all hailctl users for consistency.
if args.region:
project_region = args.region
else:
project_region = gcloud.get_config("dataproc/region")
if not project_region:
raise RuntimeError("Could not determine dataproc region. Use --region argument to hailctl, or use `gcloud config set dataproc/region <my-region>` to set a default.")
# add VEP init script
if args.vep:
# VEP is too expensive if you have to pay egress charges. We must choose the right replicate.
replicate = REGION_TO_REPLICATE_MAPPING.get(project_region)
if replicate is None:
raise RuntimeError(f"The --vep argument is not currently provided in your region.\n"
f" Please contact the Hail team on https://discuss.hail.is for support.\n"
f" Your region: {project_region}\n"
f" Supported regions: {", ".join(REGION_TO_REPLICATE_MAPPING.keys())}")
print(f"Pulling VEP data from bucket in {replicate}.")
conf.extend_flag('metadata', {"VEP_REPLICATE": replicate})
vep_config_path = "/vep_data/vep-gcloud.json"
conf.extend_flag('metadata', {"VEP_CONFIG_PATH": vep_config_path, "VEP_CONFIG_URI": f"file://{vep_config_path}"})
conf.extend_flag('initialization-actions', [deploy_metadata[f'vep-{args.vep}.sh']])
# add custom init scripts
if args.init:
conf.extend_flag('initialization-actions', args.init.split(','))
if args.metadata:
conf.parse_and_extend('metadata', args.metadata)
wheel = args.wheel or deploy_metadata['wheel']
conf.extend_flag('metadata', {'WHEEL': wheel})
# if Python packages requested, add metadata variable
packages = deploy_metadata['pip_dependencies'].strip('|').split('|||')
metadata_pkgs = conf.flags['metadata'].get('PKGS')
split_regex = r'[|,]'
if metadata_pkgs:
packages.extend(re.split(split_regex, metadata_pkgs))
if args.packages:
packages.extend(re.split(split_regex, args.packages))
conf.extend_flag('metadata', {'PKGS': '|'.join(set(packages))})
def disk_size(size):
if args.vep:
size = max(size, 200)
return str(size) + 'GB'
conf.extend_flag('properties',
{"spark:spark.driver.memory": "{driver_memory}g".format(
driver_memory=str(int(MACHINE_MEM[args.master_machine_type] * args.master_memory_fraction)))})
conf.flags['master-machine-type'] = args.master_machine_type
conf.flags['master-boot-disk-size'] = '{}GB'.format(args.master_boot_disk_size)
conf.flags['num-master-local-ssds'] = args.num_master_local_ssds
conf.flags['num-secondary-workers'] = args.num_secondary_workers
conf.flags['num-worker-local-ssds'] = args.num_worker_local_ssds
conf.flags['num-workers'] = args.num_workers
conf.flags['secondary-worker-boot-disk-size'] = disk_size(args.secondary_worker_boot_disk_size)
conf.flags['worker-boot-disk-size'] = disk_size(args.worker_boot_disk_size)
conf.flags['worker-machine-type'] = args.worker_machine_type
if not args.no_off_heap_memory:
worker_memory = MACHINE_MEM[args.worker_machine_type]
# A Google support engineer recommended the strategy of passing the YARN
# config params, and the default value of 95% of machine memory to give to YARN.
# yarn.nodemanager.resource.memory-mb - total memory per machine
# yarn.scheduler.maximum-allocation-mb - max memory to allocate to each container
available_memory_fraction = args.yarn_memory_fraction
available_memory_mb = int(worker_memory * available_memory_fraction * 1024)
cores_per_machine = int(args.worker_machine_type.split('-')[-1])
executor_cores = min(cores_per_machine, 4)
available_memory_per_core_mb = available_memory_mb // cores_per_machine
memory_per_executor_mb = int(available_memory_per_core_mb * executor_cores)
off_heap_mb = int(memory_per_executor_mb * args.off_heap_memory_fraction)
on_heap_mb = memory_per_executor_mb - off_heap_mb
if args.off_heap_memory_hard_limit:
off_heap_memory_per_core = off_heap_mb // executor_cores
else:
off_heap_memory_per_core = available_memory_per_core_mb
print(f"hailctl dataproc: Creating a cluster with workers of machine type {args.worker_machine_type}.\n"
f" Allocating {memory_per_executor_mb} MB of memory per executor ({executor_cores} cores),\n"
f" with at least {off_heap_mb} MB for Hail off-heap values and {on_heap_mb} MB for the JVM."
f" Using a maximum Hail memory reservation of {off_heap_memory_per_core} MB per core.")
conf.extend_flag('properties',
{
'yarn:yarn.nodemanager.resource.memory-mb': f'{available_memory_mb}',
'yarn:yarn.scheduler.maximum-allocation-mb': f'{executor_cores * available_memory_per_core_mb}',
'spark:spark.executor.cores': f'{executor_cores}',
'spark:spark.executor.memory': f'{on_heap_mb}m',
'spark:spark.executor.memoryOverhead': f'{off_heap_mb}m',
'spark:spark.memory.storageFraction': '0.2',
'spark:spark.executorEnv.HAIL_WORKER_OFF_HEAP_MEMORY_PER_CORE_MB': str(
off_heap_memory_per_core),
}
)
if args.region:
conf.flags['region'] = args.region
if args.zone:
conf.flags['zone'] = args.zone
conf.flags['initialization-action-timeout'] = args.init_timeout
if args.network:
conf.flags['network'] = args.network
if args.configuration:
conf.flags['configuration'] = args.configuration
if args.project:
conf.flags['project'] = args.project
if args.bucket:
conf.flags['bucket'] = args.bucket
if args.scopes:
conf.flags['scopes'] = args.scopes
account = gcloud.get_config("account")
if account:
conf.flags['labels'] = 'creator=' + re.sub(r'[^0-9a-z_\-]', '_', account.lower())[:63]
# rewrite metadata and properties to escape them
conf.flags['metadata'] = '^|||^' + '|||'.join(f'{k}={v}' for k, v in conf.flags['metadata'].items())
conf.flags['properties'] = '^|||^' + '|||'.join(f'{k}={v}' for k, v in conf.flags['properties'].items())
# command to start cluster
cmd = conf.get_command(args.name)
if args.beta:
cmd.insert(1, 'beta')
if args.max_idle:
cmd.append('--max-idle={}'.format(args.max_idle))
if args.max_age:
cmd.append('--max-age={}'.format(args.max_age))
if args.expiration_time:
cmd.append('--expiration_time={}'.format(args.expiration_time))
if args.service_account:
cmd.append('--service-account={}'.format(args.service_account))
cmd.extend(pass_through_args)
# print underlying gcloud command
print(' '.join(cmd[:5]) + ' \\\n ' + ' \\\n '.join(cmd[5:]))
# spin up cluster
if not args.dry_run:
print("Starting cluster '{}'...".format(args.name))
gcloud.run(cmd[1:])
if args.master_tags:
add_tags_command = ['compute', 'instances', 'add-tags', args.name + '-m', '--tags', args.master_tags]
if args.project:
add_tags_command.append(f"--project={args.project}")
if args.zone:
add_tags_command.append(f"--zone={args.zone}")
print('gcloud ' + ' '.join(add_tags_command))
if not args.dry_run:
gcloud.run(add_tags_command)
| import re
import yaml
from . import gcloud
from .cluster_config import ClusterConfig
DEFAULT_PROPERTIES = {
"spark:spark.task.maxFailures": "20",
"spark:spark.driver.extraJavaOptions": "-Xss4M",
"spark:spark.executor.extraJavaOptions": "-Xss4M",
'spark:spark.speculation': 'true',
"hdfs:dfs.replication": "1",
'dataproc:dataproc.logging.stackdriver.enable': 'false',
'dataproc:dataproc.monitoring.stackdriver.enable': 'false'
}
# leadre (master) machine type to memory map, used for setting
# spark.driver.memory property
MACHINE_MEM = {
'n1-standard-1': 3.75,
'n1-standard-2': 7.5,
'n1-standard-4': 15,
'n1-standard-8': 30,
'n1-standard-16': 60,
'n1-standard-32': 120,
'n1-standard-64': 240,
'n1-highmem-2': 13,
'n1-highmem-4': 26,
'n1-highmem-8': 52,
'n1-highmem-16': 104,
'n1-highmem-32': 208,
'n1-highmem-64': 416,
'n1-highcpu-2': 1.8,
'n1-highcpu-4': 3.6,
'n1-highcpu-8': 7.2,
'n1-highcpu-16': 14.4,
'n1-highcpu-32': 28.8,
'n1-highcpu-64': 57.6,
'n2-standard-2': 8,
'n2-standard-4': 16,
'n2-standard-8': 32,
'n2-standard-16': 64,
'n2-standard-32': 128,
'n2-standard-48': 192,
'n2-standard-64': 256,
'n2-standard-80': 320,
'n2-highmem-2': 16,
'n2-highmem-4': 32,
'n2-highmem-8': 64,
'n2-highmem-16': 128,
'n2-highmem-32': 256,
'n2-highmem-48': 384,
'n2-highmem-64': 512,
'n2-highmem-80': 640,
'n2-highcpu-2': 2,
'n2-highcpu-4': 4,
'n2-highcpu-8': 8,
'n2-highcpu-16': 16,
'n2-highcpu-32': 32,
'n2-highcpu-48': 48,
'n2-highcpu-64': 64,
'n2-highcpu-80': 80,
'n2d-standard-2': 8,
'n2d-standard-4': 16,
'n2d-standard-8': 32,
'n2d-standard-16': 64,
'n2d-standard-32': 128,
'n2d-standard-48': 192,
'n2d-standard-64': 256,
'n2d-standard-80': 320,
'n2d-standard-96': 384,
'n2d-standard-128': 512,
'n2d-standard-224': 896,
'n2d-highmem-2': 16,
'n2d-highmem-4': 32,
'n2d-highmem-8': 64,
'n2d-highmem-16': 128,
'n2d-highmem-32': 256,
'n2d-highmem-48': 384,
'n2d-highmem-64': 512,
'n2d-highmem-80': 640,
'n2d-highmem-96': 786,
'n2d-highcpu-2': 2,
'n2d-highcpu-4': 4,
'n2d-highcpu-8': 8,
'n2d-highcpu-16': 16,
'n2d-highcpu-32': 32,
'n2d-highcpu-48': 48,
'n2d-highcpu-64': 64,
'n2d-highcpu-80': 80,
'n2d-highcpu-96': 96,
'n2d-highcpu-128': 128,
'n2d-highcpu-224': 224,
'e2-standard-2': 8,
'e2-standard-4': 16,
'e2-standard-8': 32,
'e2-standard-16': 64,
'e2-highmem-2': 16,
'e2-highmem-4': 32,
'e2-highmem-8': 64,
'e2-highmem-16': 128,
'e2-highcpu-2': 2,
'e2-highcpu-4': 4,
'e2-highcpu-8': 8,
'e2-highcpu-16': 16,
'm1-ultramem-40': 961,
'm1-ultramem-80': 1922,
'm1-ultramem-160': 3844,
'm1-megamem-96': 1433,
'm2-ultramem-2084': 5888,
'm2-ultramem-4164': 11776,
'c2-standard-4': 16,
'c2-standard-8': 32,
'c2-standard-16': 64,
'c2-standard-30': 120,
'c2-standard-60': 240,
}
REGION_TO_REPLICATE_MAPPING = {
'us-central1': 'us',
'us-east1': 'us',
'us-east4': 'us',
'us-west1': 'us',
'us-west2': 'us',
'us-west3': 'us',
# Europe != EU
'europe-north1': 'eu',
'europe-west1': 'eu',
'europe-west2': 'uk',
'europe-west3': 'eu',
'europe-west4': 'eu',
'australia-southeast1': 'aus-sydney'
}
ANNOTATION_DB_BUCKETS = ["hail-datasets-us", "hail-datasets-eu"]
IMAGE_VERSION = '2.0.29-debian10'
def init_parser(parser):
parser.add_argument('name', type=str, help='Cluster name.')
# arguments with default parameters
parser.add_argument('--master-machine-type', '--master', '-m', default='n1-highmem-8', type=str,
help='Master machine type (default: %(default)s).')
parser.add_argument('--master-memory-fraction', default=0.8, type=float,
help='Fraction of master memory allocated to the JVM. '
'Use a smaller value to reserve more memory '
'for Python. (default: %(default)s)')
parser.add_argument('--master-boot-disk-size', default=100, type=int,
help='Disk size of master machine, in GB (default: %(default)s).')
parser.add_argument('--num-master-local-ssds', default=0, type=int,
help='Number of local SSDs to attach to the master machine (default: %(default)s).')
parser.add_argument('--num-secondary-workers', '--num-preemptible-workers', '--n-pre-workers', '-p', default=0, type=int,
help='Number of secondary (preemptible) worker machines (default: %(default)s).')
parser.add_argument('--num-worker-local-ssds', default=0, type=int,
help='Number of local SSDs to attach to each worker machine (default: %(default)s).')
parser.add_argument('--num-workers', '--n-workers', '-w', default=2, type=int,
help='Number of worker machines (default: %(default)s).')
parser.add_argument('--secondary-worker-boot-disk-size', '--preemptible-worker-boot-disk-size', default=40, type=int,
help='Disk size of secondary (preemptible) worker machines, in GB (default: %(default)s).')
parser.add_argument('--worker-boot-disk-size', default=40, type=int,
help='Disk size of worker machines, in GB (default: %(default)s).')
parser.add_argument('--worker-machine-type', '--worker',
help='Worker machine type (default: n1-standard-8, or n1-highmem-8 with --vep).')
parser.add_argument('--region',
help='Compute region for the cluster.')
parser.add_argument('--zone',
help='Compute zone for the cluster.')
parser.add_argument('--properties',
help='Additional configuration properties for the cluster')
parser.add_argument('--metadata',
help='Comma-separated list of metadata to add: KEY1=VALUE1,KEY2=VALUE2...')
parser.add_argument('--packages', '--pkgs',
help='Comma-separated list of Python packages to be installed on the master node.')
parser.add_argument('--project', help='Google Cloud project to start cluster (defaults to currently set project).')
parser.add_argument('--configuration',
help='Google Cloud configuration to start cluster (defaults to currently set configuration).')
parser.add_argument('--max-idle', type=str, help='If specified, maximum idle time before shutdown (e.g. 60m).')
max_age_group = parser.add_mutually_exclusive_group()
max_age_group.add_argument('--expiration-time', type=str, help='If specified, time at which cluster is shutdown (e.g. 2020-01-01T00:00:00Z).')
max_age_group.add_argument('--max-age', type=str, help='If specified, maximum age before shutdown (e.g. 60m).')
parser.add_argument('--bucket', type=str,
help='The Google Cloud Storage bucket to use for cluster staging (just the bucket name, no gs:// prefix).')
parser.add_argument('--network', type=str, help='the network for all nodes in this cluster')
parser.add_argument('--service-account', type=str, help='The Google Service Account to use for cluster creation (default to the Compute Engine service account).')
parser.add_argument('--master-tags', type=str, help='comma-separated list of instance tags to apply to the mastern node')
parser.add_argument('--scopes', help='Specifies access scopes for the node instances')
parser.add_argument('--wheel', help='Non-default Hail installation. Warning: experimental.')
# initialization action flags
parser.add_argument('--init', default='', help='Comma-separated list of init scripts to run.')
parser.add_argument('--init_timeout', default='20m',
help='Flag to specify a timeout period for the initialization action')
parser.add_argument('--vep',
help='Install VEP for the specified reference genome.',
required=False,
choices=['GRCh37', 'GRCh38'])
parser.add_argument('--dry-run', action='store_true', help="Print gcloud dataproc command, but don't run it.")
parser.add_argument('--no-off-heap-memory', action='store_true',
help="If true, don't partition JVM memory between hail heap and JVM heap")
parser.add_argument('--big-executors', action='store_true',
help="If true, double memory allocated per executor, using half the cores of the cluster with an extra large memory allotment per core.")
parser.add_argument('--off-heap-memory-fraction', type=float, default=0.6,
help="Minimum fraction of worker memory dedicated to off-heap Hail values.")
parser.add_argument('--off-heap-memory-hard-limit', action='store_true',
help="If true, limit off-heap allocations to the dedicated fraction")
parser.add_argument('--yarn-memory-fraction', type=float,
help="Fraction of machine memory to allocate to the yarn container scheduler.",
default=0.95)
# requester pays
parser.add_argument('--requester-pays-allow-all',
help="Allow reading from all requester-pays buckets.",
action='store_true',
required=False)
parser.add_argument('--requester-pays-allow-buckets',
help="Comma-separated list of requester-pays buckets to allow reading from.")
parser.add_argument('--requester-pays-allow-annotation-db',
action='store_true',
help="Allows reading from any of the requester-pays buckets that hold data for the annotation database.")
parser.add_argument('--debug-mode',
action='store_true',
help="Enable debug features on created cluster (heap dump on out-of-memory error)")
async def main(args, pass_through_args):
import pkg_resources # pylint: disable=import-outside-toplevel
conf = ClusterConfig()
conf.extend_flag('image-version', IMAGE_VERSION)
if not pkg_resources.resource_exists('hailtop.hailctl', "deploy.yaml"):
raise RuntimeError("package has no 'deploy.yaml' file")
deploy_metadata = yaml.safe_load(
pkg_resources.resource_stream('hailtop.hailctl', "deploy.yaml"))['dataproc']
conf.extend_flag('properties', DEFAULT_PROPERTIES)
if args.properties:
conf.parse_and_extend('properties', args.properties)
if args.debug_mode:
conf.extend_flag('properties', {
"spark:spark.driver.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow",
"spark:spark.executor.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow",
})
# default to highmem machines if using VEP
if not args.worker_machine_type:
args.worker_machine_type = 'n1-highmem-8' if args.vep else 'n1-standard-8'
# default initialization script to start up cluster with
conf.extend_flag('initialization-actions',
[deploy_metadata['init_notebook.py']])
# requester pays support
if args.requester_pays_allow_all or args.requester_pays_allow_buckets or args.requester_pays_allow_annotation_db:
if args.requester_pays_allow_all and args.requester_pays_allow_buckets:
raise RuntimeError("Cannot specify both 'requester_pays_allow_all' and 'requester_pays_allow_buckets")
if args.requester_pays_allow_all:
requester_pays_mode = "AUTO"
else:
requester_pays_mode = "CUSTOM"
requester_pays_bucket_sources = []
if args.requester_pays_allow_buckets:
requester_pays_bucket_sources.append(args.requester_pays_allow_buckets)
if args.requester_pays_allow_annotation_db:
requester_pays_bucket_sources.extend(ANNOTATION_DB_BUCKETS)
conf.extend_flag("properties", {"spark:spark.hadoop.fs.gs.requester.pays.buckets": ",".join(requester_pays_bucket_sources)})
# Need to pick requester pays project.
requester_pays_project = args.project if args.project else gcloud.get_config("project")
conf.extend_flag("properties", {"spark:spark.hadoop.fs.gs.requester.pays.mode": requester_pays_mode,
"spark:spark.hadoop.fs.gs.requester.pays.project.id": requester_pays_project})
# gcloud version 277 and onwards requires you to specify a region. Let's just require it for all hailctl users for consistency.
if args.region:
project_region = args.region
else:
project_region = gcloud.get_config("dataproc/region")
if not project_region:
raise RuntimeError("Could not determine dataproc region. Use --region argument to hailctl, or use `gcloud config set dataproc/region <my-region>` to set a default.")
# add VEP init script
if args.vep:
# VEP is too expensive if you have to pay egress charges. We must choose the right replicate.
replicate = REGION_TO_REPLICATE_MAPPING.get(project_region)
if replicate is None:
raise RuntimeError(f"The --vep argument is not currently provided in your region.\n"
f" Please contact the Hail team on https://discuss.hail.is for support.\n"
f" Your region: {project_region}\n"
f" Supported regions: {', '.join(REGION_TO_REPLICATE_MAPPING.keys())}")
print(f"Pulling VEP data from bucket in {replicate}.")
conf.extend_flag('metadata', {"VEP_REPLICATE": replicate})
vep_config_path = "/vep_data/vep-gcloud.json"
conf.extend_flag('metadata', {"VEP_CONFIG_PATH": vep_config_path, "VEP_CONFIG_URI": f"file://{vep_config_path}"})
conf.extend_flag('initialization-actions', [deploy_metadata[f'vep-{args.vep}.sh']])
# add custom init scripts
if args.init:
conf.extend_flag('initialization-actions', args.init.split(','))
if args.metadata:
conf.parse_and_extend('metadata', args.metadata)
wheel = args.wheel or deploy_metadata['wheel']
conf.extend_flag('metadata', {'WHEEL': wheel})
# if Python packages requested, add metadata variable
packages = deploy_metadata['pip_dependencies'].strip('|').split('|||')
metadata_pkgs = conf.flags['metadata'].get('PKGS')
split_regex = r'[|,]'
if metadata_pkgs:
packages.extend(re.split(split_regex, metadata_pkgs))
if args.packages:
packages.extend(re.split(split_regex, args.packages))
conf.extend_flag('metadata', {'PKGS': '|'.join(set(packages))})
def disk_size(size):
if args.vep:
size = max(size, 200)
return str(size) + 'GB'
conf.extend_flag('properties',
{"spark:spark.driver.memory": "{driver_memory}g".format(
driver_memory=str(int(MACHINE_MEM[args.master_machine_type] * args.master_memory_fraction)))})
conf.flags['master-machine-type'] = args.master_machine_type
conf.flags['master-boot-disk-size'] = '{}GB'.format(args.master_boot_disk_size)
conf.flags['num-master-local-ssds'] = args.num_master_local_ssds
conf.flags['num-secondary-workers'] = args.num_secondary_workers
conf.flags['num-worker-local-ssds'] = args.num_worker_local_ssds
conf.flags['num-workers'] = args.num_workers
conf.flags['secondary-worker-boot-disk-size'] = disk_size(args.secondary_worker_boot_disk_size)
conf.flags['worker-boot-disk-size'] = disk_size(args.worker_boot_disk_size)
conf.flags['worker-machine-type'] = args.worker_machine_type
if not args.no_off_heap_memory:
worker_memory = MACHINE_MEM[args.worker_machine_type]
# A Google support engineer recommended the strategy of passing the YARN
# config params, and the default value of 95% of machine memory to give to YARN.
# yarn.nodemanager.resource.memory-mb - total memory per machine
# yarn.scheduler.maximum-allocation-mb - max memory to allocate to each container
available_memory_fraction = args.yarn_memory_fraction
available_memory_mb = int(worker_memory * available_memory_fraction * 1024)
cores_per_machine = int(args.worker_machine_type.split('-')[-1])
executor_cores = min(cores_per_machine, 4)
available_memory_per_core_mb = available_memory_mb // cores_per_machine
memory_per_executor_mb = int(available_memory_per_core_mb * executor_cores)
off_heap_mb = int(memory_per_executor_mb * args.off_heap_memory_fraction)
on_heap_mb = memory_per_executor_mb - off_heap_mb
if args.off_heap_memory_hard_limit:
off_heap_memory_per_core = off_heap_mb // executor_cores
else:
off_heap_memory_per_core = available_memory_per_core_mb
print(f"hailctl dataproc: Creating a cluster with workers of machine type {args.worker_machine_type}.\n"
f" Allocating {memory_per_executor_mb} MB of memory per executor ({executor_cores} cores),\n"
f" with at least {off_heap_mb} MB for Hail off-heap values and {on_heap_mb} MB for the JVM."
f" Using a maximum Hail memory reservation of {off_heap_memory_per_core} MB per core.")
conf.extend_flag('properties',
{
'yarn:yarn.nodemanager.resource.memory-mb': f'{available_memory_mb}',
'yarn:yarn.scheduler.maximum-allocation-mb': f'{executor_cores * available_memory_per_core_mb}',
'spark:spark.executor.cores': f'{executor_cores}',
'spark:spark.executor.memory': f'{on_heap_mb}m',
'spark:spark.executor.memoryOverhead': f'{off_heap_mb}m',
'spark:spark.memory.storageFraction': '0.2',
'spark:spark.executorEnv.HAIL_WORKER_OFF_HEAP_MEMORY_PER_CORE_MB': str(
off_heap_memory_per_core),
}
)
if args.region:
conf.flags['region'] = args.region
if args.zone:
conf.flags['zone'] = args.zone
conf.flags['initialization-action-timeout'] = args.init_timeout
if args.network:
conf.flags['network'] = args.network
if args.configuration:
conf.flags['configuration'] = args.configuration
if args.project:
conf.flags['project'] = args.project
if args.bucket:
conf.flags['bucket'] = args.bucket
if args.scopes:
conf.flags['scopes'] = args.scopes
account = gcloud.get_config("account")
if account:
conf.flags['labels'] = 'creator=' + re.sub(r'[^0-9a-z_\-]', '_', account.lower())[:63]
# rewrite metadata and properties to escape them
conf.flags['metadata'] = '^|||^' + '|||'.join(f'{k}={v}' for k, v in conf.flags['metadata'].items())
conf.flags['properties'] = '^|||^' + '|||'.join(f'{k}={v}' for k, v in conf.flags['properties'].items())
# command to start cluster
cmd = conf.get_command(args.name)
if args.beta:
cmd.insert(1, 'beta')
if args.max_idle:
cmd.append('--max-idle={}'.format(args.max_idle))
if args.max_age:
cmd.append('--max-age={}'.format(args.max_age))
if args.expiration_time:
cmd.append('--expiration_time={}'.format(args.expiration_time))
if args.service_account:
cmd.append('--service-account={}'.format(args.service_account))
cmd.extend(pass_through_args)
# print underlying gcloud command
print(' '.join(cmd[:5]) + ' \\\n ' + ' \\\n '.join(cmd[5:]))
# spin up cluster
if not args.dry_run:
print("Starting cluster '{}'...".format(args.name))
gcloud.run(cmd[1:])
if args.master_tags:
add_tags_command = ['compute', 'instances', 'add-tags', args.name + '-m', '--tags', args.master_tags]
if args.project:
add_tags_command.append(f"--project={args.project}")
if args.zone:
add_tags_command.append(f"--zone={args.zone}")
print('gcloud ' + ' '.join(add_tags_command))
if not args.dry_run:
gcloud.run(add_tags_command)
|
import datetime
import re
from django.http import HttpResponse
from django.utils.dateparse import parse_datetime
from openpyxl import Workbook
from security.models import credit_sources, disbursement_methods
from security.templatetags.security import (
currency, format_card_number, format_sort_code,
format_resolution, format_disbursement_resolution,
list_prison_names,
)
from security.utils import EmailSet, NameSet
class ObjectListXlsxResponse(HttpResponse):
def __init__(self, object_list, object_type, attachment_name='export.xlsx', **kwargs):
kwargs.setdefault(
'content_type',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
super().__init__(**kwargs)
self['Content-Disposition'] = 'attachment; filename="%s"' % attachment_name
serialiser = ObjectListSerialiser.serialiser_for(object_type)
workbook = serialiser.make_workbook(object_list)
workbook.save(self)
class ObjectListSerialiser:
serialisers = {}
headers = []
def __init_subclass__(cls, object_type):
cls.serialisers[object_type] = cls
@classmethod
def serialiser_for(cls, object_type):
try:
return cls.serialisers[object_type]()
except KeyError:
raise NotImplementedError(f'Cannot export {object_type}')
def make_workbook(self, object_list):
workbook = Workbook(write_only=True)
worksheet = workbook.create_sheet()
worksheet.append(self.headers)
for record in object_list:
serialised_record = self.serialise(record)
worksheet.append([
escape_formulae(serialised_record.get(field))
for field in self.headers
])
return workbook
def serialise(self, record):
raise NotImplementedError
class CreditListSerialiser(ObjectListSerialiser, object_type='credits'):
headers = [
'Internal ID',
'Date started', 'Date received', 'Date credited',
'Amount',
'Prisoner number', 'Prisoner name', 'Prison',
'Sender name', 'Payment method',
'Bank transfer sort code', 'Bank transfer account', 'Bank transfer roll number',
'Debit card number', 'Debit card expiry', 'Debit card billing address',
'Sender email', 'Sender IP address',
'Status',
'NOMIS transaction',
]
def serialise(self, record):
return {
'Internal ID': record['id'],
'Date started': record['started_at'],
'Date received': (
record['received_at'].strftime('%Y-%m-%d')
if record['source'] == 'bank_transfer' else record['received_at']
),
'Date credited': record['credited_at'],
'Amount': currency(record['amount']),
'Prisoner number': record['prisoner_number'],
'Prisoner name': record['prisoner_name'],
'Prison': record['prison_name'],
'Sender name': record['sender_name'],
'Payment method': str(credit_sources.get(record['source'], record['source'])),
'Bank transfer sort code': (
format_sort_code(record['sender_sort_code']) if record['sender_sort_code'] else None
),
'Bank transfer account': record['sender_account_number'],
'Bank transfer roll number': record['sender_roll_number'],
'Debit card number': (
f'{record['card_number_first_digits'] or '******'}******{record['card_number_last_digits']}'
if record['card_number_last_digits']
else None
),
'Debit card expiry': record['card_expiry_date'],
'Debit card billing address': credit_address_for_export(record['billing_address']),
'Sender email': record['sender_email'],
'Sender IP address': record['ip_address'],
'Status': str(format_resolution(record['resolution'])),
'NOMIS transaction': record['nomis_transaction_id'],
}
class DisbursementListSerialiser(ObjectListSerialiser, object_type='disbursements'):
headers = [
'Internal ID',
'Date entered', 'Date confirmed', 'Date sent',
'Amount',
'Prisoner number', 'Prisoner name', 'Prison',
'Recipient name', 'Payment method',
'Bank transfer sort code', 'Bank transfer account', 'Bank transfer roll number',
'Recipient address', 'Recipient email',
'Status',
'NOMIS transaction', 'SOP invoice number',
]
def serialise(self, record):
last_action_dates = {
log_item['action']: parse_datetime(log_item['created'])
for log_item in record['log_set']
}
return {
'Internal ID': record['id'],
'Date entered': record['created'],
'Date confirmed': last_action_dates.get('confirmed', ''),
'Date sent': last_action_dates.get('sent', ''),
'Amount': currency(record['amount']),
'Prisoner number': record['prisoner_number'],
'Prisoner name': record['prisoner_name'],
'Prison': record['prison_name'],
'Recipient name': f'{record['recipient_first_name']} {record['recipient_last_name']}'.strip(),
'Payment method': str(disbursement_methods.get(record['method'], record['method'])),
'Bank transfer sort code': (
format_sort_code(record['sort_code']) if record['sort_code'] else ''
),
'Bank transfer account': record['account_number'],
'Bank transfer roll number': record['roll_number'],
'Recipient address': disbursement_address_for_export(record),
'Recipient email': record['recipient_email'],
'Status': str(format_disbursement_resolution(record['resolution'])),
'NOMIS transaction': record['nomis_transaction_id'],
'SOP invoice number': record['invoice_number'],
}
class SenderListSerialiser(ObjectListSerialiser, object_type='senders'):
headers = [
'Sender name', 'Payment method',
'Credits sent', 'Total amount sent',
'Prisoners sent to', 'Prisons sent to',
'Bank transfer sort code', 'Bank transfer account', 'Bank transfer roll number',
'Debit card number', 'Debit card expiry', 'Debit card postcode',
'Other cardholder names', 'Cardholder emails',
]
def serialise(self, record):
serialised_record = {
'Credits sent': record['credit_count'],
'Total amount sent': currency(record['credit_total']),
'Prisoners sent to': record['prisoner_count'],
'Prisons sent to': record['prison_count'],
}
if record.get('bank_transfer_details'):
bank_transfer = record['bank_transfer_details'][0]
return {
**serialised_record,
'Sender name': bank_transfer['sender_name'],
'Payment method': 'Bank transfer',
'Bank transfer sort code': format_sort_code(bank_transfer['sender_sort_code']),
'Bank transfer account': bank_transfer['sender_account_number'],
'Bank transfer roll number': bank_transfer['sender_roll_number'],
}
if record.get('debit_card_details'):
debit_card = record['debit_card_details'][0]
try:
sender_name = debit_card['cardholder_names'][0]
except IndexError:
sender_name = 'Unknown'
other_sender_names = NameSet(debit_card['cardholder_names'])
if sender_name in other_sender_names:
other_sender_names.remove(sender_name)
return {
**serialised_record,
'Sender name': sender_name,
'Payment method': 'Debit card',
'Debit card number': format_card_number(debit_card),
'Debit card expiry': debit_card['card_expiry_date'],
'Debit card postcode': debit_card['postcode'] or 'Unknown',
'Other cardholder names': ', '.join(other_sender_names),
'Cardholder emails': ', '.join(EmailSet(debit_card['sender_emails'])),
}
return {
**serialised_record,
'Sender name': '(Unknown)',
'Payment method': '(Unknown)',
}
class PrisonerListSerialiser(ObjectListSerialiser, object_type='prisoners'):
headers = [
'Prisoner number',
'Prisoner name',
'Date of birth',
'Credits received',
'Total amount received',
'Payment sources',
'Disbursements sent',
'Total amount sent',
'Recipients',
'Current prison',
'All known prisons',
'Names given by senders',
]
def serialise(self, record):
if record['current_prison']:
current_prison = record['current_prison']['name']
else:
current_prison = 'Not in a public prison'
provided_names = NameSet(record['provided_names'])
return {
'Prisoner number': record['prisoner_number'],
'Prisoner name': record['prisoner_name'],
'Date of birth': record['prisoner_dob'],
'Credits received': record['credit_count'],
'Total amount received': currency(record['credit_total']),
'Payment sources': record['sender_count'],
'Disbursements sent': record['disbursement_count'],
'Total amount sent': currency(record['disbursement_total']),
'Recipients': record['recipient_count'],
'Current prison': current_prison,
'All known prisons': list_prison_names(record['prisons']),
'Names given by senders': ', '.join(provided_names),
}
def escape_formulae(value):
"""
Escapes formulae (strings that start with =) to prevent
spreadsheet software vulnerabilities being exploited
:param value: the value being added to a CSV cell
"""
if isinstance(value, str) and value.startswith('='):
return "'" + value
if isinstance(value, datetime.datetime):
return value.strftime('%Y-%m-%d %H:%M:%S')
if isinstance(value, datetime.date):
return value.strftime('%Y-%m-%d')
return value
def credit_address_for_export(address):
if not address:
return ''
whitespace = re.compile(r'\s+')
keys = ('line1', 'line2', 'city', 'postcode', 'country')
lines = (whitespace.sub(' ', address[key]).strip() for key in keys if address.get(key))
return ', '.join(lines)
def disbursement_address_for_export(disbursement):
whitespace = re.compile(r'\s+')
keys = ('address_line1', 'address_line2', 'city', 'postcode', 'country')
lines = (whitespace.sub(' ', disbursement[key]).strip() for key in keys if disbursement.get(key))
return ', '.join(lines)
| import datetime
import re
from django.http import HttpResponse
from django.utils.dateparse import parse_datetime
from openpyxl import Workbook
from security.models import credit_sources, disbursement_methods
from security.templatetags.security import (
currency, format_card_number, format_sort_code,
format_resolution, format_disbursement_resolution,
list_prison_names,
)
from security.utils import EmailSet, NameSet
class ObjectListXlsxResponse(HttpResponse):
def __init__(self, object_list, object_type, attachment_name='export.xlsx', **kwargs):
kwargs.setdefault(
'content_type',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
super().__init__(**kwargs)
self['Content-Disposition'] = 'attachment; filename="%s"' % attachment_name
serialiser = ObjectListSerialiser.serialiser_for(object_type)
workbook = serialiser.make_workbook(object_list)
workbook.save(self)
class ObjectListSerialiser:
serialisers = {}
headers = []
def __init_subclass__(cls, object_type):
cls.serialisers[object_type] = cls
@classmethod
def serialiser_for(cls, object_type):
try:
return cls.serialisers[object_type]()
except KeyError:
raise NotImplementedError(f'Cannot export {object_type}')
def make_workbook(self, object_list):
workbook = Workbook(write_only=True)
worksheet = workbook.create_sheet()
worksheet.append(self.headers)
for record in object_list:
serialised_record = self.serialise(record)
worksheet.append([
escape_formulae(serialised_record.get(field))
for field in self.headers
])
return workbook
def serialise(self, record):
raise NotImplementedError
class CreditListSerialiser(ObjectListSerialiser, object_type='credits'):
headers = [
'Internal ID',
'Date started', 'Date received', 'Date credited',
'Amount',
'Prisoner number', 'Prisoner name', 'Prison',
'Sender name', 'Payment method',
'Bank transfer sort code', 'Bank transfer account', 'Bank transfer roll number',
'Debit card number', 'Debit card expiry', 'Debit card billing address',
'Sender email', 'Sender IP address',
'Status',
'NOMIS transaction',
]
def serialise(self, record):
return {
'Internal ID': record['id'],
'Date started': record['started_at'],
'Date received': (
record['received_at'].strftime('%Y-%m-%d')
if record['source'] == 'bank_transfer' else record['received_at']
),
'Date credited': record['credited_at'],
'Amount': currency(record['amount']),
'Prisoner number': record['prisoner_number'],
'Prisoner name': record['prisoner_name'],
'Prison': record['prison_name'],
'Sender name': record['sender_name'],
'Payment method': str(credit_sources.get(record['source'], record['source'])),
'Bank transfer sort code': (
format_sort_code(record['sender_sort_code']) if record['sender_sort_code'] else None
),
'Bank transfer account': record['sender_account_number'],
'Bank transfer roll number': record['sender_roll_number'],
'Debit card number': (
f'{record["card_number_first_digits"] or "******"}******{record["card_number_last_digits"]}'
if record['card_number_last_digits']
else None
),
'Debit card expiry': record['card_expiry_date'],
'Debit card billing address': credit_address_for_export(record['billing_address']),
'Sender email': record['sender_email'],
'Sender IP address': record['ip_address'],
'Status': str(format_resolution(record['resolution'])),
'NOMIS transaction': record['nomis_transaction_id'],
}
class DisbursementListSerialiser(ObjectListSerialiser, object_type='disbursements'):
headers = [
'Internal ID',
'Date entered', 'Date confirmed', 'Date sent',
'Amount',
'Prisoner number', 'Prisoner name', 'Prison',
'Recipient name', 'Payment method',
'Bank transfer sort code', 'Bank transfer account', 'Bank transfer roll number',
'Recipient address', 'Recipient email',
'Status',
'NOMIS transaction', 'SOP invoice number',
]
def serialise(self, record):
last_action_dates = {
log_item['action']: parse_datetime(log_item['created'])
for log_item in record['log_set']
}
return {
'Internal ID': record['id'],
'Date entered': record['created'],
'Date confirmed': last_action_dates.get('confirmed', ''),
'Date sent': last_action_dates.get('sent', ''),
'Amount': currency(record['amount']),
'Prisoner number': record['prisoner_number'],
'Prisoner name': record['prisoner_name'],
'Prison': record['prison_name'],
'Recipient name': f'{record["recipient_first_name"]} {record["recipient_last_name"]}'.strip(),
'Payment method': str(disbursement_methods.get(record['method'], record['method'])),
'Bank transfer sort code': (
format_sort_code(record['sort_code']) if record['sort_code'] else ''
),
'Bank transfer account': record['account_number'],
'Bank transfer roll number': record['roll_number'],
'Recipient address': disbursement_address_for_export(record),
'Recipient email': record['recipient_email'],
'Status': str(format_disbursement_resolution(record['resolution'])),
'NOMIS transaction': record['nomis_transaction_id'],
'SOP invoice number': record['invoice_number'],
}
class SenderListSerialiser(ObjectListSerialiser, object_type='senders'):
headers = [
'Sender name', 'Payment method',
'Credits sent', 'Total amount sent',
'Prisoners sent to', 'Prisons sent to',
'Bank transfer sort code', 'Bank transfer account', 'Bank transfer roll number',
'Debit card number', 'Debit card expiry', 'Debit card postcode',
'Other cardholder names', 'Cardholder emails',
]
def serialise(self, record):
serialised_record = {
'Credits sent': record['credit_count'],
'Total amount sent': currency(record['credit_total']),
'Prisoners sent to': record['prisoner_count'],
'Prisons sent to': record['prison_count'],
}
if record.get('bank_transfer_details'):
bank_transfer = record['bank_transfer_details'][0]
return {
**serialised_record,
'Sender name': bank_transfer['sender_name'],
'Payment method': 'Bank transfer',
'Bank transfer sort code': format_sort_code(bank_transfer['sender_sort_code']),
'Bank transfer account': bank_transfer['sender_account_number'],
'Bank transfer roll number': bank_transfer['sender_roll_number'],
}
if record.get('debit_card_details'):
debit_card = record['debit_card_details'][0]
try:
sender_name = debit_card['cardholder_names'][0]
except IndexError:
sender_name = 'Unknown'
other_sender_names = NameSet(debit_card['cardholder_names'])
if sender_name in other_sender_names:
other_sender_names.remove(sender_name)
return {
**serialised_record,
'Sender name': sender_name,
'Payment method': 'Debit card',
'Debit card number': format_card_number(debit_card),
'Debit card expiry': debit_card['card_expiry_date'],
'Debit card postcode': debit_card['postcode'] or 'Unknown',
'Other cardholder names': ', '.join(other_sender_names),
'Cardholder emails': ', '.join(EmailSet(debit_card['sender_emails'])),
}
return {
**serialised_record,
'Sender name': '(Unknown)',
'Payment method': '(Unknown)',
}
class PrisonerListSerialiser(ObjectListSerialiser, object_type='prisoners'):
headers = [
'Prisoner number',
'Prisoner name',
'Date of birth',
'Credits received',
'Total amount received',
'Payment sources',
'Disbursements sent',
'Total amount sent',
'Recipients',
'Current prison',
'All known prisons',
'Names given by senders',
]
def serialise(self, record):
if record['current_prison']:
current_prison = record['current_prison']['name']
else:
current_prison = 'Not in a public prison'
provided_names = NameSet(record['provided_names'])
return {
'Prisoner number': record['prisoner_number'],
'Prisoner name': record['prisoner_name'],
'Date of birth': record['prisoner_dob'],
'Credits received': record['credit_count'],
'Total amount received': currency(record['credit_total']),
'Payment sources': record['sender_count'],
'Disbursements sent': record['disbursement_count'],
'Total amount sent': currency(record['disbursement_total']),
'Recipients': record['recipient_count'],
'Current prison': current_prison,
'All known prisons': list_prison_names(record['prisons']),
'Names given by senders': ', '.join(provided_names),
}
def escape_formulae(value):
"""
Escapes formulae (strings that start with =) to prevent
spreadsheet software vulnerabilities being exploited
:param value: the value being added to a CSV cell
"""
if isinstance(value, str) and value.startswith('='):
return "'" + value
if isinstance(value, datetime.datetime):
return value.strftime('%Y-%m-%d %H:%M:%S')
if isinstance(value, datetime.date):
return value.strftime('%Y-%m-%d')
return value
def credit_address_for_export(address):
if not address:
return ''
whitespace = re.compile(r'\s+')
keys = ('line1', 'line2', 'city', 'postcode', 'country')
lines = (whitespace.sub(' ', address[key]).strip() for key in keys if address.get(key))
return ', '.join(lines)
def disbursement_address_for_export(disbursement):
whitespace = re.compile(r'\s+')
keys = ('address_line1', 'address_line2', 'city', 'postcode', 'country')
lines = (whitespace.sub(' ', disbursement[key]).strip() for key in keys if disbursement.get(key))
return ', '.join(lines)
|
"""
Evacuation planning based on network flow algorithms.
"""
from __future__ import annotations
from collections import deque
from itertools import groupby
from sys import stderr
from typing import Dict, List, Tuple, NamedTuple
import networkx as nx
from .level import Level
from .graph.reservation_graph import ReservationGraph, ReservationNode, Reservation
class _NodeAdder:
def __init__(self, g: nx.Graph):
self.next_id = 0
self.g = g
def add(self, label=None) -> int:
curr_id = self.next_id
self.g.add_node(curr_id)
if label:
self.g.nodes[curr_id]["label"] = label
self.next_id += 1
return curr_id
def node_clones(self, g: nx.Graph, label=None) -> Dict[int, int]:
return {n: self.add(label=f"{n}-{label}") for n in g.node}
class NodeInfo(NamedTuple):
id: int
t: int
type: int
IN = 0
OUT = 1
def get_info(expansion_records: List[Tuple[Dict[int, int], Dict[int, int]]]) -> Dict[int, NodeInfo]:
res: Dict[int, NodeInfo] = {}
for t, (ins, outs) in enumerate(expansion_records):
for n in ins:
res[ins[n]] = NodeInfo(n, t, IN)
for n in outs:
res[outs[n]] = NodeInfo(n, t, OUT)
return res
def expand(lvl: Level, time: int) -> Tuple[nx.DiGraph, List[Tuple[Dict[int, int], Dict[int, int]]]]:
"""Time-expand the graph underlying the given level"""
exp_g = nx.DiGraph()
adder = _NodeAdder(exp_g)
source = adder.add("src")
sink = adder.add("sink")
inputs = adder.node_clones(lvl.g, "0i")
node_id_records = []
outputs: Dict[int, int] = {}
for agent in lvl.scenario.agents:
exp_g.add_edge(source, inputs[agent.origin], capacity=1)
for t in range(0, time):
outputs = adder.node_clones(lvl.g, f"{t}o")
for k in inputs:
exp_g.add_edge(inputs[k], outputs[k], capacity=1)
node_id_records.append((inputs, outputs))
if t < time - 1:
inputs = adder.node_clones(lvl.g, f"{t+1}i")
for k in inputs:
exp_g.add_edge(outputs[k], inputs[k], capacity=1)
for edge in lvl.g.edges:
exp_g.add_edge(outputs[edge[0]], inputs[edge[1]], capacity=1)
exp_g.add_edge(outputs[edge[1]], inputs[edge[0]], capacity=1)
else:
for k in outputs:
if lvl.is_safe(k):
exp_g.add_edge(outputs[k], sink, capacity=1)
return (exp_g, node_id_records)
def follow_path(start: int, flow_dict: Dict[int, Dict[int, int]], info: Dict[int, NodeInfo]) -> List[int]:
"""Follow a path of a single agent starting at node `start` through the graph flow"""
current_node = start
path = []
while current_node in info:
current_info = info[current_node]
current_dict = flow_dict[current_node]
if current_info.type == OUT:
path.append(current_info.id)
for n in current_dict:
if current_dict[n] > 0:
current_node = n
break
return path
def reconstruct(lvl: Level, flow_dict: Dict[int, Dict[int, int]], info: Dict[int, NodeInfo]) -> List[List[int]]:
"""Reconstruct agent paths from the given flow and node information"""
paths: List[List[int]] = [[]] * len(lvl.scenario.agents)
start_flows = flow_dict[0]
agent_starts = {agent.origin: i for i, agent in enumerate(lvl.scenario.agents)}
for n in start_flows:
if start_flows[n] > 0:
agent = agent_starts[info[n].id]
paths[agent] = follow_path(n, flow_dict, info)
return paths
def extend(path: List[int], t: int) -> List[int]:
return path + ([path[-1]] * (t - len(path)))
def annotate_with_flow(g: nx.DiGraph, flow_dict: Dict[int, Dict[int, int]]):
for u in flow_dict:
flows = flow_dict[u]
for v in flows:
g.edges[(u, v)]["flow"] = flows[v]
def drawable_graph(g: nx.DiGraph) -> nx.DiGraph:
drawable = nx.DiGraph()
for u, v in g.edges:
u_label = g.node[u]["label"]
v_label = g.node[v]["label"]
drawable.add_edge(u_label, v_label)
return drawable
def flow_at_time(lvl: Level, t: int):
"""Find the maximum flow in a graph expanded for t"""
exp_g, node_ids = expand(lvl, t)
flow_val, flow_dict = nx.maximum_flow(exp_g, 0, 1)
return flow_val, flow_dict, node_ids
class FlowAgent():
def __init__(self, level: Level, reservations: ReservationGraph, agents: List[FlowAgent], id: int, original_path: List[int], debug: bool):
self.level = level
self.reservations = reservations
self.agents = agents
self.id = id
self.debug = debug
self.queue = deque(original_path)
self.path: List[int] = []
def step(self):
if self.done():
return self._stay()
rn = ReservationNode(self.queue[0], len(self.path) + 1)
if self.reservable(rn):
self.path.append(self.queue.popleft())
self.reservations.reserve(Reservation(rn, self.id, 1))
self.reservations.reserve(Reservation(rn.incremented_t(), self.id, 0))
else:
self._handle_block(rn)
def _stay(self):
pos = self.path[-1]
self.path.append(pos)
rn = ReservationNode(pos, len(self.path))
self.reservations.reserve(Reservation(rn, self.id, 0))
self.reservations.reserve(Reservation(rn.incremented_t(), self.id, 0))
def _handle_block(self, rn):
reservation = self.reservations.get(rn)
a = self.agents[reservation.agent]
deadlocked = len(a.queue) > 0 and a.queue[0] == self.path[-1] and self.queue[0] == a.path[-1]
if deadlocked:
a.queue, self.queue = self.queue, a.queue
return self._stay()
if not a.done() or rn.pos() != a.path[-1]:
return self._stay()
if self.debug:
print(f"Swapping {self.id} and {a.id} (blocked at {a.path[-1]}, t={len(self.path) + 1})", file=stderr)
self.queue.popleft()
a.queue = self.queue
self.queue = deque([a.path[-1]])
self._stay()
def done(self):
return len(self.queue) == 0
def reservable(self, node: ReservationNode) -> bool:
reservation = self.reservations.get(node)
return reservation is None or reservation.agent == self.id
def postprocess_iteration(lvl: Level, paths: List[List[int]], debug: bool) -> List[List[int]]:
"""Makes agents trying to move into occupied vertices wait for another turn and resolve deadlocks."""
reservations = ReservationGraph(lvl.g)
agents: List[FlowAgent] = []
for i, path in enumerate(paths):
agents.append(FlowAgent(lvl, reservations, agents, i, path, debug))
i = 0
while any(map(lambda a: not a.done(), agents)):
for agent in agents:
agent.step()
return [agent.path for agent in agents]
def postprocess_paths(lvl: Level, paths: List[List[int]], debug: bool) -> List[List[int]]:
"""Repeatedly postprocess deduplicated paths to get valid MAE paths shorter than from a single postprocess_iteration"""
i = 1
while True:
if debug:
print(f"Postprocessing iteration {i}", file=stderr)
dedup = [list(map(lambda t: t[0], groupby(path))) for path in paths]
new_paths = postprocess_iteration(lvl, dedup, debug)
if debug:
for j, path in enumerate(new_paths):
print(f"{j}: {" ".join(map(str, path))}", file=stderr)
if new_paths == paths:
break
paths = new_paths
i += 1
return paths
class Solution(NamedTuple):
flow: int
flow_dict: Dict
node_ids: List[Tuple[Dict[int, int], Dict[int, int]]]
t: int
def evacuation_paths(lvl: Level, debug) -> List[List[int]]:
"""Return the evacuation plan for a flow-based evacuation with the shortest makespan"""
best_sol = Solution(0, {}, [], 0)
highest_wrong = 0
t = len(lvl.scenario.agents)
while True:
if debug:
print(f"Trying {t} as makespan", file=stderr)
flow_val, flow_dict, node_ids = flow_at_time(lvl, t)
if flow_val == len(lvl.scenario.agents):
best_sol = Solution(flow_val, flow_dict, node_ids, t)
break
else:
highest_wrong = t
t += t // 2
while True:
if debug:
print("Range:", highest_wrong, best_sol.t, file=stderr)
t = highest_wrong + (best_sol.t - highest_wrong) // 2
flow_val, flow_dict, node_ids = flow_at_time(lvl, t)
if debug:
print(f"t={t} maxflow={flow_val}", file=stderr)
if flow_val == len(lvl.scenario.agents):
best_sol = Solution(flow_val, flow_dict, node_ids, t)
if t == highest_wrong + 1:
break
else:
highest_wrong = t
if t == best_sol.t - 1:
break
return reconstruct(lvl, best_sol.flow_dict, get_info(best_sol.node_ids))
def plan_evacuation(lvl: Level, postprocess=False, debug=True) -> List[List[int]]:
paths = evacuation_paths(lvl, debug)
if postprocess:
return postprocess_paths(lvl, paths, debug)
else:
return list(map(lambda p: extend(p, len(max(paths, key=len))), paths))
| """
Evacuation planning based on network flow algorithms.
"""
from __future__ import annotations
from collections import deque
from itertools import groupby
from sys import stderr
from typing import Dict, List, Tuple, NamedTuple
import networkx as nx
from .level import Level
from .graph.reservation_graph import ReservationGraph, ReservationNode, Reservation
class _NodeAdder:
def __init__(self, g: nx.Graph):
self.next_id = 0
self.g = g
def add(self, label=None) -> int:
curr_id = self.next_id
self.g.add_node(curr_id)
if label:
self.g.nodes[curr_id]["label"] = label
self.next_id += 1
return curr_id
def node_clones(self, g: nx.Graph, label=None) -> Dict[int, int]:
return {n: self.add(label=f"{n}-{label}") for n in g.node}
class NodeInfo(NamedTuple):
id: int
t: int
type: int
IN = 0
OUT = 1
def get_info(expansion_records: List[Tuple[Dict[int, int], Dict[int, int]]]) -> Dict[int, NodeInfo]:
res: Dict[int, NodeInfo] = {}
for t, (ins, outs) in enumerate(expansion_records):
for n in ins:
res[ins[n]] = NodeInfo(n, t, IN)
for n in outs:
res[outs[n]] = NodeInfo(n, t, OUT)
return res
def expand(lvl: Level, time: int) -> Tuple[nx.DiGraph, List[Tuple[Dict[int, int], Dict[int, int]]]]:
"""Time-expand the graph underlying the given level"""
exp_g = nx.DiGraph()
adder = _NodeAdder(exp_g)
source = adder.add("src")
sink = adder.add("sink")
inputs = adder.node_clones(lvl.g, "0i")
node_id_records = []
outputs: Dict[int, int] = {}
for agent in lvl.scenario.agents:
exp_g.add_edge(source, inputs[agent.origin], capacity=1)
for t in range(0, time):
outputs = adder.node_clones(lvl.g, f"{t}o")
for k in inputs:
exp_g.add_edge(inputs[k], outputs[k], capacity=1)
node_id_records.append((inputs, outputs))
if t < time - 1:
inputs = adder.node_clones(lvl.g, f"{t+1}i")
for k in inputs:
exp_g.add_edge(outputs[k], inputs[k], capacity=1)
for edge in lvl.g.edges:
exp_g.add_edge(outputs[edge[0]], inputs[edge[1]], capacity=1)
exp_g.add_edge(outputs[edge[1]], inputs[edge[0]], capacity=1)
else:
for k in outputs:
if lvl.is_safe(k):
exp_g.add_edge(outputs[k], sink, capacity=1)
return (exp_g, node_id_records)
def follow_path(start: int, flow_dict: Dict[int, Dict[int, int]], info: Dict[int, NodeInfo]) -> List[int]:
"""Follow a path of a single agent starting at node `start` through the graph flow"""
current_node = start
path = []
while current_node in info:
current_info = info[current_node]
current_dict = flow_dict[current_node]
if current_info.type == OUT:
path.append(current_info.id)
for n in current_dict:
if current_dict[n] > 0:
current_node = n
break
return path
def reconstruct(lvl: Level, flow_dict: Dict[int, Dict[int, int]], info: Dict[int, NodeInfo]) -> List[List[int]]:
"""Reconstruct agent paths from the given flow and node information"""
paths: List[List[int]] = [[]] * len(lvl.scenario.agents)
start_flows = flow_dict[0]
agent_starts = {agent.origin: i for i, agent in enumerate(lvl.scenario.agents)}
for n in start_flows:
if start_flows[n] > 0:
agent = agent_starts[info[n].id]
paths[agent] = follow_path(n, flow_dict, info)
return paths
def extend(path: List[int], t: int) -> List[int]:
return path + ([path[-1]] * (t - len(path)))
def annotate_with_flow(g: nx.DiGraph, flow_dict: Dict[int, Dict[int, int]]):
for u in flow_dict:
flows = flow_dict[u]
for v in flows:
g.edges[(u, v)]["flow"] = flows[v]
def drawable_graph(g: nx.DiGraph) -> nx.DiGraph:
drawable = nx.DiGraph()
for u, v in g.edges:
u_label = g.node[u]["label"]
v_label = g.node[v]["label"]
drawable.add_edge(u_label, v_label)
return drawable
def flow_at_time(lvl: Level, t: int):
"""Find the maximum flow in a graph expanded for t"""
exp_g, node_ids = expand(lvl, t)
flow_val, flow_dict = nx.maximum_flow(exp_g, 0, 1)
return flow_val, flow_dict, node_ids
class FlowAgent():
def __init__(self, level: Level, reservations: ReservationGraph, agents: List[FlowAgent], id: int, original_path: List[int], debug: bool):
self.level = level
self.reservations = reservations
self.agents = agents
self.id = id
self.debug = debug
self.queue = deque(original_path)
self.path: List[int] = []
def step(self):
if self.done():
return self._stay()
rn = ReservationNode(self.queue[0], len(self.path) + 1)
if self.reservable(rn):
self.path.append(self.queue.popleft())
self.reservations.reserve(Reservation(rn, self.id, 1))
self.reservations.reserve(Reservation(rn.incremented_t(), self.id, 0))
else:
self._handle_block(rn)
def _stay(self):
pos = self.path[-1]
self.path.append(pos)
rn = ReservationNode(pos, len(self.path))
self.reservations.reserve(Reservation(rn, self.id, 0))
self.reservations.reserve(Reservation(rn.incremented_t(), self.id, 0))
def _handle_block(self, rn):
reservation = self.reservations.get(rn)
a = self.agents[reservation.agent]
deadlocked = len(a.queue) > 0 and a.queue[0] == self.path[-1] and self.queue[0] == a.path[-1]
if deadlocked:
a.queue, self.queue = self.queue, a.queue
return self._stay()
if not a.done() or rn.pos() != a.path[-1]:
return self._stay()
if self.debug:
print(f"Swapping {self.id} and {a.id} (blocked at {a.path[-1]}, t={len(self.path) + 1})", file=stderr)
self.queue.popleft()
a.queue = self.queue
self.queue = deque([a.path[-1]])
self._stay()
def done(self):
return len(self.queue) == 0
def reservable(self, node: ReservationNode) -> bool:
reservation = self.reservations.get(node)
return reservation is None or reservation.agent == self.id
def postprocess_iteration(lvl: Level, paths: List[List[int]], debug: bool) -> List[List[int]]:
"""Makes agents trying to move into occupied vertices wait for another turn and resolve deadlocks."""
reservations = ReservationGraph(lvl.g)
agents: List[FlowAgent] = []
for i, path in enumerate(paths):
agents.append(FlowAgent(lvl, reservations, agents, i, path, debug))
i = 0
while any(map(lambda a: not a.done(), agents)):
for agent in agents:
agent.step()
return [agent.path for agent in agents]
def postprocess_paths(lvl: Level, paths: List[List[int]], debug: bool) -> List[List[int]]:
"""Repeatedly postprocess deduplicated paths to get valid MAE paths shorter than from a single postprocess_iteration"""
i = 1
while True:
if debug:
print(f"Postprocessing iteration {i}", file=stderr)
dedup = [list(map(lambda t: t[0], groupby(path))) for path in paths]
new_paths = postprocess_iteration(lvl, dedup, debug)
if debug:
for j, path in enumerate(new_paths):
print(f"{j}: {' '.join(map(str, path))}", file=stderr)
if new_paths == paths:
break
paths = new_paths
i += 1
return paths
class Solution(NamedTuple):
flow: int
flow_dict: Dict
node_ids: List[Tuple[Dict[int, int], Dict[int, int]]]
t: int
def evacuation_paths(lvl: Level, debug) -> List[List[int]]:
"""Return the evacuation plan for a flow-based evacuation with the shortest makespan"""
best_sol = Solution(0, {}, [], 0)
highest_wrong = 0
t = len(lvl.scenario.agents)
while True:
if debug:
print(f"Trying {t} as makespan", file=stderr)
flow_val, flow_dict, node_ids = flow_at_time(lvl, t)
if flow_val == len(lvl.scenario.agents):
best_sol = Solution(flow_val, flow_dict, node_ids, t)
break
else:
highest_wrong = t
t += t // 2
while True:
if debug:
print("Range:", highest_wrong, best_sol.t, file=stderr)
t = highest_wrong + (best_sol.t - highest_wrong) // 2
flow_val, flow_dict, node_ids = flow_at_time(lvl, t)
if debug:
print(f"t={t} maxflow={flow_val}", file=stderr)
if flow_val == len(lvl.scenario.agents):
best_sol = Solution(flow_val, flow_dict, node_ids, t)
if t == highest_wrong + 1:
break
else:
highest_wrong = t
if t == best_sol.t - 1:
break
return reconstruct(lvl, best_sol.flow_dict, get_info(best_sol.node_ids))
def plan_evacuation(lvl: Level, postprocess=False, debug=True) -> List[List[int]]:
paths = evacuation_paths(lvl, debug)
if postprocess:
return postprocess_paths(lvl, paths, debug)
else:
return list(map(lambda p: extend(p, len(max(paths, key=len))), paths))
|
import discord
from discord.ext import commands, menus
from mysqldb import the_database, the_django_database
from .player import Player, Skill
from .enums import QuestEnum
from extra.menu import ConfirmSkill, SwitchTribePages
from extra import utils
import os
import asyncio
from datetime import datetime
from typing import List, Union, Dict, Any, Optional, Callable
from random import choice
bots_and_commands_channel_id = int(os.getenv('BOTS_AND_COMMANDS_CHANNEL_ID', 123))
approve_thumbnail_channel_id = int(os.getenv('APPROVE_THUMBNAIL_CHANNEL_ID', 123))
class Munk(Player):
emoji = '<:Munk:839498018712715284>'
def __init__(self, client) -> None:
self.client = client
@commands.Cog.listener(name='on_raw_reaction_add')
async def on_raw_reaction_add_munk(self, payload) -> None:
""" Checks reactions related to skill actions. """
# Checks if it wasn't a bot's reaction
if not payload.guild_id:
return
# Checks whether it's a valid member and not a bot
if not payload.member or payload.member.bot:
return
if payload.channel_id != approve_thumbnail_channel_id:
return
skill_action = await self.get_skill_action_by_message_id_and_skill_type(message_id=payload.message_id, skill_type='thumbnail_request')
if skill_action is not None:
emoji = str(payload.emoji)
# Checks whether it's a steal
if emoji == '✅':
await self.delete_skill_action_by_message_id(payload.message_id)
channel = self.client.get_channel(skill_action[5])
message = await channel.fetch_message(skill_action[4])
if message:
tribe = await self.get_tribe_info_by_user_id(user_id=skill_action[0])
message_embed = discord.Embed(
title="Thumbnail Approved!",
description=f"**<@{payload.user_id}>, approved your tribe `{tribe["name"]}`'s thumbnail/logo, <@{skill_action[0]}>!**",
color=discord.Color.green(),
url=tribe['link']
)
message_embed.set_image(url=skill_action[8])
await self.bots_txt.send(content=f"<@{skill_action[0]}>", embed=message_embed)
await message.delete()
await self.update_tribe_thumbnail(user_id=skill_action[0], tribe_name=tribe['name'], link=skill_action[8])
elif emoji == '❌':
await self.delete_skill_action_by_message_id(payload.message_id)
channel = self.client.get_channel(skill_action[5])
message = await channel.fetch_message(skill_action[4])
if message:
tribe = await self.get_tribe_info_by_user_id(user_id=skill_action[0])
message_embed = discord.Embed(
title="Thumbnail Refused!",
description=f"**<@{payload.user_id}>, refused your tribe `{tribe["name"]}`'s thumbnail/logo, <@{skill_action[0]}>!**",
color=discord.Color.red(),
url=tribe['link']
)
message_embed.set_image(url=skill_action[8])
await self.bots_txt.send(content=f"<@{skill_action[0]}>", embed=message_embed)
await message.delete()
@commands.command()
@Player.poisoned()
@Player.skill_on_cooldown()
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
async def munk(self, ctx, target: discord.Member = None) -> None:
""" Converts a user into a real Munk.
:param target: The person you want to convert to a Munk. """
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{ctx.author.mention}, you can only use this command in {self.bots_txt.mention}!**")
attacker = ctx.author
attacker_fx = await self.get_user_effects(attacker)
if 'knocked_out' in attacker_fx:
return await ctx.send(f"**{attacker.mention}, you can't use your skill, because you are knocked-out!**")
if not target:
return await ctx.send(f"**Please, choose a member to use the `Munk` skill, {attacker.mention}!**")
if target.bot:
return await ctx.send(f"**You cannot convert a bot into a `Munk`, {attacker.mention}!**")
if attacker.id == target.id:
return await ctx.send(f"**You cannot convert yourself, since you are already a `Munk`, {attacker.mention}!**")
target_fx = await self.get_user_effects(target)
if 'munk' in target_fx:
return await ctx.send(f"**{target.mention} is already a `Munk`, {attacker.mention}!**")
target_sloth_profile = await self.get_sloth_profile(target.id)
if not target_sloth_profile:
return await ctx.send(f"**You cannot convert someone who doesn't have an account, {attacker.mention}!**")
if target_sloth_profile[1] == 'default':
return await ctx.send(f"**You cannot convert someone who has a `default` Sloth class, {attacker.mention}!**")
if 'protected' in target_fx:
return await ctx.send(f"**{attacker.mention}, you cannot convert {target.mention} into a `Munk`, because they are protected against attacks!**")
confirmed = await ConfirmSkill(f"**{attacker.mention}, are you sure you want to convert {target.mention} into a `Munk`?**").prompt(ctx)
if not confirmed:
return await ctx.send("**Not converting them, then!**")
if ctx.invoked_with == 'mirror':
mirrored_skill = await self.get_skill_action_by_user_id_and_skill_type(user_id=attacker.id, skill_type='mirror')
if not mirrored_skill:
return await ctx.send(f"**Something went wrong with this, {attacker.mention}!**")
else:
_, exists = await Player.skill_on_cooldown(skill=Skill.ONE).predicate(ctx)
try:
await target.edit(nick=f"{target.display_name} Munk")
current_timestamp = await utils.get_timestamp()
if ctx.invoked_with != 'mirror':
if exists:
await self.update_user_skill_ts(attacker.id, Skill.ONE, current_timestamp)
else:
await self.insert_user_skill_cooldown(attacker.id, Skill.ONE, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=attacker.id)
munk_embed = await self.get_munk_embed(
channel=ctx.channel, perpetrator_id=attacker.id, target_id=target.id)
msg = await ctx.send(embed=munk_embed)
except Exception as e:
print(e)
return await ctx.send(f"**Something went wrong and your `Munk` skill failed, {attacker.mention}!**")
else:
await msg.edit(content=f"<@{target.id}>")
if 'reflect' in target_fx and 'munk' not in attacker_fx:
await self.reflect_attack(ctx, attacker, target, 'munk')
async def get_munk_embed(self, channel, perpetrator_id: int, target_id: int) -> discord.Embed:
""" Makes an embedded message for a munk action.
:param channel: The context channel.
:param perpetrator_id: The ID of the perpetrator of the munk skill.
:param target_id: The ID of the target member that is gonna be protected. """
timestamp = await utils.get_timestamp()
munk_embed = discord.Embed(
title="A Munk Convertion has been delightfully performed!",
description=f"🐿️ <@{perpetrator_id}> converted <@{target_id}> into a `Munk`! 🐿️",
color = discord.Color.green(),
timestamp=datetime.fromtimestamp(timestamp)
)
munk_embed.set_thumbnail(url="https://thelanguagesloth.com/media/sloth_classes/Munk.png")
munk_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return munk_embed
async def get_join_tribe_embed(self, channel, inviter: discord.Member, target: discord.Member, tribe: Dict[str, Union[int, str]]) -> discord.Embed:
""" Makes an embedded message for a tribe joining.
:param channel: The context channel.
:param inviter: The inviter.
:param target_id: The target member that is gonna be invited to a tribe.
:param tribe: The tribe and its information. """
timestamp = await utils.get_timestamp()
join_tribe_embed = discord.Embed(
title="Someone just joined a Tribe!",
description=f"🏕️ {target.mention} just joined `{tribe["name"]}`! 🏕️",
color=discord.Color.green(),
timestamp=datetime.fromtimestamp(timestamp),
url=tribe['link']
)
join_tribe_embed.set_author(name=inviter, icon_url=inviter.display_avatar)
if tribe['thumbnail']:
join_tribe_embed.set_thumbnail(url=tribe['thumbnail'])
join_tribe_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return join_tribe_embed
async def get_tribe_info_by_name(self, name: str) -> Dict[str, Union[str, int]]:
""" Gets information about a specific tribe.
:param name: The name of the tribe. """
mycursor, db = await the_database()
await mycursor.execute("SELECT * FROM UserTribe WHERE tribe_name = %s", (name,))
tribe = await mycursor.fetchone()
await mycursor.close()
tribe_info = {
'owner_id': None,
'name': None,
'description': None,
'two_emojis': None,
'thumbnail': None,
'form': None,
'link': None
}
if tribe:
tribe_info = {
'owner_id': tribe[0],
'name': tribe[1],
'description': tribe[2],
'two_emojis': tribe[3],
'thumbnail': tribe[4],
'form': tribe[5],
'link': f"https://thelanguagesloth.com/tribes/{tribe[6]}/"
}
return tribe_info
async def get_tribe_info_by_user_id(self, user_id: int) -> Dict[str, Union[str, int]]:
""" Gets information about a specific tribe.
:param user_id: The ID of the user owner of the tribe. """
mycursor, db = await the_database()
await mycursor.execute("SELECT * FROM UserTribe WHERE user_id = %s", (user_id,))
tribe = await mycursor.fetchone()
await mycursor.close()
tribe_info = {
'owner_id': None,
'name': None,
'description': None,
'two_emojis': None,
'thumbnail': None,
'form': None,
'link': None
}
if tribe:
tribe_info = {
'owner_id': tribe[0],
'name': tribe[1],
'description': tribe[2],
'two_emojis': tribe[3],
'thumbnail': tribe[4],
'form': tribe[5],
'link': f"https://thelanguagesloth.com/tribes/{tribe[6]}/"
}
return tribe_info
async def get_tribe_member(self, user_id: int) -> List[Union[str, int]]:
""" Gets a Tribe Member.
:param user_id: The ID of the tribe member to get. """
mycursor, db = await the_database()
await mycursor.execute("SELECT * FROM TribeMember WHERE member_id = %s", (user_id,))
tribe_member = await mycursor.fetchone()
await mycursor.close()
return tribe_member
async def get_tribe_members(self, tribe_owner_id: int = None, tribe_name: str = None) -> List[List[Union[int, str]]]:
""" Gets a list of IDs of members of a particular tribe.
:param tribe_owner_id: The ID of the owner of the tribe (Optional).
:param tribe_name: The name of the tribe. (Optional).
Ps: At least one of the parameters has to be provided. """
mycursor, _ = await the_database()
tribe_members: List[int] = []
if tribe_owner_id:
await mycursor.execute("SELECT tribe_name FROM UserTribe WHERE user_id = %s", (tribe_owner_id,))
tribe = await mycursor.fetchone()
await mycursor.execute("SELECT member_id, tribe_role FROM TribeMember WHERE tribe_name = %s", (tribe[0],))
tribe_members = await mycursor.fetchall()
elif tribe_name:
await mycursor.execute("SELECT member_id, tribe_role FROM TribeMember WHERE tribe_name = %s", (tribe_name,))
tribe_members = await mycursor.fetchall()
await mycursor.close()
return tribe_members
@commands.group(aliases=['tb'])
@Player.poisoned()
@Player.kidnapped()
async def tribe(self, ctx) -> None:
""" Command for managing and interacting with a tribe.
(Use this without a subcommand to see all subcommands available) """
if ctx.invoked_subcommand:
return
cmd = self.client.get_command('tribe')
prefix = self.client.command_prefix
subcommands = [f"{prefix}{c.qualified_name}" for c in cmd.commands
]
subcommands = '\n'.join(subcommands)
items_embed = discord.Embed(
title="__Subcommads__:",
description=f"```apache\n{subcommands}```",
color=ctx.author.color,
timestamp=ctx.message.created_at
)
await ctx.send(embed=items_embed)
@tribe.command(aliases=['request_logo', 'ask_thumbnail', 'ask_logo'])
@commands.cooldown(1, 3600, commands.BucketType.user)
async def request_thumbnail(self, ctx, image_url: str = None) -> None:
""" Request a thumbnail for your tribe.
:param image_url: The URL link of the thumbnail image. """
requester = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{ctx.author.mention}, you can only use this command in {self.bots_txt.mention}!**")
if not image_url:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"You need to inform an image URL, {requester.mention}!**")
if not image_url.startswith('https://'):
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"You need to inform an image URL that has HTTPS in it, {requester.mention}!**")
if len(image_url) > 200:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"You need to inform an image URL within 200 characters, {requester.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=requester.id)
if not user_tribe['name']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You don't even have a tribe, you cannot request it, {requester.mention}!**")
confirm = await ConfirmSkill(content=requester.mention,
msg=f"**Are you sure you want to request [this]({image_url}) to be `{user_tribe["name"]}`'s thumbnail/logo?**").prompt(ctx)
if confirm:
# Sends message to a moderation-clearance room
room = self.client.get_channel(approve_thumbnail_channel_id)
request_embed = discord.Embed(
title="__Thumbnail Request__",
description=f"{requester.mention} is requesting the image below to be their tribe"s (`{user_tribe["name"]}`) thumbnail/logo.",
color=requester.color,
timestamp=ctx.message.created_at
)
request_embed.set_image(url=image_url)
request_msg = await room.send(embed=request_embed)
# Don't need to store it, since it is forever
current_timestamp = await utils.get_timestamp()
await self.insert_skill_action(
user_id=requester.id, skill_type="thumbnail_request", skill_timestamp=current_timestamp,
target_id=requester.id, channel_id=room.id, message_id=request_msg.id,
content=image_url
)
await request_msg.add_reaction('✅')
await request_msg.add_reaction('❌')
await ctx.send(f"**Request sent, {ctx.author.mention}!**")
else:
ctx.command.reset_cooldown(ctx)
await ctx.send(f"**Not doing requesting it, then, {requester.mention}!**")
@tribe.command(aliases=['inv'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def invite(self, ctx, member: discord.Member = None) -> None:
""" Invites a user to your tribe.
:param member: The member to invite. """
inviter = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{inviter.mention}, you can only use this command in {self.bots_txt.mention}!**")
tribe_member = await self.get_tribe_member(inviter.id)
if not tribe_member or tribe_member[0] != tribe_member[2]:
return await ctx.send(f"**You don't have a tribe, {inviter.mention}**!")
if not member:
return await ctx.send(f"**Please, inform a member to invite to your tribe, {inviter.mention}!**")
if inviter.id == member.id:
return await ctx.send(f"**You cannot invite yourself into your own tribe, {inviter.mention}!**")
confirm = await ConfirmSkill(f"Are you sure you want to invite, {member.mention} to `{tribe_member[1]}`?").prompt(ctx)
if not confirm:
return await ctx.send("**Not inviting them, then!**")
# Checks whether user is already in a tribe.
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile:
return await ctx.send(f"**You cannot invite someone that doesn't have an account, {inviter.mention}!**")
if sloth_profile[1] == 'default':
return await ctx.send(f"**You cannot invite someone that doesn't have a Sloth Class, {inviter.mention}!**")
if sloth_profile[3]:
return await ctx.send(f"**You cannot invite someone that is already in a tribe, {inviter.mention}!**")
custom_ctx = ctx
custom_ctx.author = member
invite = await ConfirmSkill(content=f"{member.mention}", msg=f"{inviter.mention} invited you to join their tribe called `{tribe_member[1]}`, do you wanna join?").prompt(custom_ctx)
if invite:
user_tribe = await self.get_tribe_info_by_user_id(inviter.id)
try:
await self.insert_tribe_member(owner_id=inviter.id, tribe_name=tribe_member[1], user_id=member.id)
await self.update_someones_tribe(user_id=member.id, tribe_name=tribe_member[1])
try:
await self.update_tribe_name(member=member, two_emojis=user_tribe['two_emojis'], joining=True)
except:
pass
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}, {inviter.mention}!**")
else:
join_tribe_embed = await self.get_join_tribe_embed(
channel=ctx.channel, inviter=inviter, target=member, tribe=user_tribe)
await ctx.send(embed=join_tribe_embed)
else:
await ctx.send(f"**{member.mention} refused your invitation to join `{tribe_member[1]}`, {inviter.mention}!**")
@tribe.command(aliases=['view', 'display', 'show'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def see(self, ctx, *, name: str = None) -> None:
""" Shows some information about a tribe.
If not provided a tribe name, it will check the one the user is in.
:param name: The tribe name. """
member = ctx.author
tribe = None
if name:
tribe = await self.get_tribe_info_by_name(name)
else:
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile or not sloth_profile[3]:
return await ctx.send(
f"**You didn't provide any tribe name and you're not in a tribe either, {member.mention}!**")
tribe = await self.get_tribe_info_by_name(sloth_profile[3])
if not tribe['name']:
return await ctx.send(f"**No tribes with that name were found, {member.mention}!**")
# Gets all tribe members
tribe_members = await self.get_tribe_members(tribe_name=tribe['name'])
all_members = list(map(lambda mid: f"<@{mid[0]}> ({mid[1]})", tribe_members))
# Additional data:
additional = {
'tribe': tribe,
'change_embed': self._make_tribe_embed
}
pages = menus.MenuPages(source=SwitchTribePages(all_members, **additional), clear_reactions_after=True)
await pages.start(ctx)
async def _make_tribe_embed(self, ctx: commands.Context, tribe: Dict[str, Union[str, int]], entries: int, offset: int, lentries: int) -> discord.Embed:
tribe_owner = self.client.get_user(tribe['owner_id'])
tribe_embed = discord.Embed(
title=f"{tribe["name"]} ({tribe["two_emojis"]})",
description=tribe['description'],
timestamp=ctx.message.created_at,
color=ctx.author.color,
url=tribe['link']
)
if tribe['thumbnail']:
tribe_embed.set_thumbnail(url=tribe['thumbnail'])
if tribe_owner:
tribe_embed.set_author(name=f"Owner: {tribe_owner}", icon_url=tribe_owner.display_avatar, url=tribe_owner.display_avatar)
tribe_embed.add_field(name="__Members:__", value=', '.join(entries), inline=False)
for i, v in enumerate(entries, start=offset):
tribe_embed.set_footer(text=f"({i} of {lentries})")
return tribe_embed
@tribe.command(aliases=['kick', 'expel', 'kick_out', 'can_i_show_you_the_door?'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def kickout(self, ctx, member: Union[discord.Member, discord.User] = None) -> None:
""" Exepels someone from your tribe.
:param member: The member to expel. """
expeller = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{expeller.mention}, you can only use this command in {self.bots_txt.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=expeller.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {expeller.mention}**!")
if not member:
return await ctx.send(f"**Please, inform a member to kick from your tribe, {expeller.mention}!**")
if expeller.id == member.id:
return await ctx.send(f"**You cannot kick yourself out of your own tribe, {expeller.mention}!**")
member_fx = await self.get_user_effects(member)
if 'kidnapped' in member_fx:
return await ctx.send(f"**You cannot kick someone from your tribe who is kidnapped, {expeller.mention}!**")
confirm = await ConfirmSkill(f"Are you sure you want to kick, {member.mention} from `{user_tribe["name"]}`?").prompt(ctx)
if not confirm:
return await ctx.send("**Not kicking them, then!**")
# Checks whether user is already in a tribe.
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile:
return await ctx.send(f"**You cannot kick out someone that doesn't even have an account, {expeller.mention}!**")
if sloth_profile[3] != user_tribe['name']:
return await ctx.send(f"**You cannot kick out someone that is not in your tribe, {expeller.mention}!**")
try:
# await self.update_someones_tribe(user_id=member.id, tribe_name=None)
await self.delete_tribe_member(user_id=member.id)
try:
await self.update_tribe_name(member=member, two_emojis=user_tribe['two_emojis'], joining=False)
except:
pass
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {expeller.mention}!**")
else:
await ctx.send(f"**You successfully kicked {member.mention} out of `{user_tribe["name"]}`, {expeller.mention}!**")
@tribe.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def leave(self, ctx) -> None:
""" Leaves the tribe the user is in. """
member = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{member.mention}, you can only use this command in {self.bots_txt.mention}!**")
tribe_member = await self.get_tribe_member(user_id=member.id)
if not tribe_member[1]:
return await ctx.send(f"**You are not in a tribe, {member.mention}**!")
if member.id == tribe_member[0]:
return await ctx.send(f"**You cannot leave your own tribe, {member.mention}!**")
user_tribe = await self.get_tribe_info_by_name(tribe_member[1])
confirm = await ConfirmSkill(f"Are you sure you want to leave `{user_tribe["name"]}`, {member.mention}?").prompt(ctx)
if not confirm:
return await ctx.send("**Not leaving it, then!**")
# Updates user tribe status and nickname
try:
await self.delete_tribe_member(member.id)
try:
await self.update_tribe_name(member=member, two_emojis=user_tribe['two_emojis'], joining=False)
except Exception as ee:
print(ee)
pass
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}!**")
else:
await ctx.send(f"**You successfully left `{user_tribe["name"]}`, {member.mention}!**")
async def update_someones_tribe(self, user_id: int, tribe_name: str = None) -> None:
""" Updates someone's tribe status.
:param user_id: The ID of the user who's gonna be updated.
:param tribe_name: The name of the tribe the user is gonna be set to. (default = None) """
mycursor, db = await the_database()
await mycursor.execute("UPDATE SlothProfile SET tribe = %s, tribe_user_id = %s WHERE user_id = %s", (tribe_name, user_id, user_id))
await db.commit()
await mycursor.close()
async def update_tribe_thumbnail(self, user_id: int, tribe_name: str, link: str = None) -> None:
""" Updates someone's tribe thumbnail link.
:param user_id: The ID of the tribe's owner.
:param tribe_name: The name of the tribe.
:param link: The link that the tribe's thumbnail will be set to. """
mycursor, db = await the_django_database()
await mycursor.execute("""
UPDATE tribe_tribe SET tribe_thumbnail = %s
WHERE owner_id = %s AND tribe_name = %s""", (link, user_id, tribe_name))
await db.commit()
await mycursor.close()
mycursor, db = await the_database()
await mycursor.execute("""
UPDATE UserTribe SET tribe_thumbnail = %s
WHERE user_id = %s AND tribe_name = %s""", (link, user_id, tribe_name))
await db.commit()
await mycursor.close()
async def update_tribe_name(self, member: discord.Member, two_emojis: str, joining: bool) -> None:
""" Updates someone's nickname so it has their tribe's two-emoji combination identifier.
:param member: The member whose nickname is gonna be updated.
:param two_emojis: The two-emoji combination identifier.
:param joining: Whether the user is joining the tribe. """
dname = member.display_name
if joining:
# Checks whether member is Munked
if dname.endswith('Munk'):
await member.edit(nick=f"{dname.strip()[:-4]} {two_emojis} Munk".strip())
else:
await member.edit(nick=f"{dname.strip()} {two_emojis}".strip())
else:
nick = ' '.join(map(lambda p: p.strip(), dname.rsplit(two_emojis, 1)))
if nick != dname:
await member.edit(nick=nick)
async def check_tribe_creations(self) -> None:
""" Check on-going steals and their expiration time. """
creations = await self.get_skill_actions_by_skill_type('tribe_creation')
guild = self.client.get_guild(int(os.getenv('SERVER_ID', 123)))
for creation in creations:
try:
# Removes skill action from the database
await self.delete_skill_action_by_target_id_and_skill_type(target_id=creation[0], skill_type='tribe_creation')
member = discord.utils.get(guild.members, id=creation[0])
try:
await self.update_tribe_name(member=member, two_emojis=creation[6], joining=True)
except:
pass
except:
pass
@commands.command()
@Player.poisoned()
@Player.skills_used(requirement=5)
@Player.skill_on_cooldown(skill=Skill.TWO)
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
async def create_tribe(self, ctx) -> None:
""" Guides you into the creation of a tribe,
which is a custom group for people to join and do something. """
member = ctx.author
link = 'https://thelanguagesloth.com/tribes'
tribe_embed = discord.Embed(
title="__Tribe Creation__",
description=f"In order to create your tribe, access our website by clicking [here]({link}) or in the button below!",
color=member.color,
timestamp=ctx.message.created_at,
url=link
)
tribe_embed.set_author(name=member, url=member.display_avatar, icon_url=member.display_avatar)
tribe_embed.set_thumbnail(url=member.display_avatar)
tribe_embed.set_footer(text=member.guild.name, icon_url=member.guild.icon.url)
view = discord.ui.View()
view.add_item(discord.ui.Button(style=5, label="Create Tribe", url=link, emoji="🏕️"))
await ctx.send(embed=tribe_embed, view=view)
@commands.command(aliases=['add_tribe_role', 'createtriberole', 'addtriberole'])
@Player.poisoned()
@Player.skills_used(requirement=20)
@Player.skill_on_cooldown(skill=Skill.THREE, seconds=36000)
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
async def create_tribe_role(self, ctx, role_name: str = None) -> None:
""" Creates a tribe role.
With different roles and positions in your tribe, you
can better administrate and know what each person should do
or their purpose inside your tribe.
:param role_name: The name of the tribe role. (MAX = 30 Chars)
* Cooldown: 1 day
Ps: It is not an actual server role. """
perpetrator = ctx.author
# Do the magic here.
if ctx.channel.id != self.bots_txt.id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use this skill, because you are knocked-out!**")
user_tribe = await self.get_tribe_info_by_user_id(perpetrator.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {perpetrator.mention}**!")
if not role_name:
return await ctx.send(f"**Please, inform a Tribe Role name, {perpetrator.mention}!**")
if len(role_name) > 30:
return await ctx.send(f"**Please, infom a Tribe Role name under or equal to 30 characters, {perpetrator.mention}!**")
if role_name.lower() in ['owner', 'member']:
return await ctx.send(f"**You cannot use this as your Tribe Role's name, {perpetrator.mention}!**")
tribe_roles = await self.get_tribe_roles(perpetrator.id)
if role_name.lower() in [trole[2].lower() for trole in tribe_roles]:
return await ctx.send(f"**You already have a Tribe Role with that name, {perpetrator.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to create a Tribe Role named `{role_name}`, {perpetrator.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not making it, then, {perpetrator.mention}!**")
_, exists = await Player.skill_on_cooldown(skill=Skill.THREE, seconds=36000).predicate(ctx)
try:
current_timestamp = await utils.get_timestamp()
await self.insert_tribe_role(perpetrator.id, user_tribe['name'], role_name)
if exists:
await self.update_user_skill_ts(perpetrator.id, Skill.THREE, current_timestamp)
else:
await self.insert_user_skill_cooldown(perpetrator.id, Skill.THREE, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
except Exception as e:
print(e)
return await ctx.send(f"**Something went wrong with your skill and it failed, {perpetrator.mention}!**")
else:
tribe_role_embed = await self.get_tribe_role_embed(
channel=ctx.channel, owner_id=perpetrator.id, tribe_info=user_tribe, role_name=role_name)
await ctx.send(embed=tribe_role_embed)
@tribe.command(aliases=['remove_role', 'deleterole', 'removerole'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def delete_role(self, ctx, role_name: str = None) -> None:
""" Deletes a specific role from the member's tribe.
:param role_name: The name of the role to delete. """
member = ctx.author
if not role_name:
return await ctx.send(f"**Please, inform a Tribe Role name, {member.mention}!**")
if len(role_name) > 30:
return await ctx.send(f"**Tribe Role names have a limit of 30 characters, {member.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=member.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {member.mention}**!")
tribe_role = await self.get_tribe_role(member.id, role_name)
if not tribe_role:
return await ctx.send(f"**You don't have a Tribe Role with that name, {member.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to delete your tribe's `{tribe_role[2]}` role, {member.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {member.mention}!**")
try:
await self.delete_tribe_role(member.id, user_tribe['name'], role_name)
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}!**")
else:
await ctx.send(f"**Successfully deleted the `{role_name}` role from your tribe, {member.mention}!**")
@tribe.command(aliases=['remove_roles', 'deleteroles', 'removeroles'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def delete_roles(self, ctx) -> None:
""" Deletes all Tribe Roles from the member's tribe. """
member = ctx.author
user_tribe = await self.get_tribe_info_by_user_id(user_id=member.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {member.mention}**!")
tribe_roles = await self.get_tribe_roles(member.id)
if not tribe_roles:
return await ctx.send(f"**You don't any Tribe Roles, {member.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to delete your tribe's roles, {member.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {member.mention}!**")
try:
await self.delete_tribe_roles(member.id, user_tribe['name'])
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}!**")
else:
await ctx.send(f"**Successfully deleted all roles from your tribe, {member.mention}!**")
@tribe.command(aliases=['give_role', 'giverole'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def promote(self, ctx, member: discord.Member = None, role_name: str = None) -> None:
""" Promotes a Tribe Member to a given Tribe Role.
:param member: The Tribe Member to promote.
:param role_name: The Tribe Role to promote the member to. """
owner = ctx.author
if not member:
return await ctx.send(f"**Please, inform a Tribe Member to promote, {owner.mention}!**")
if owner.id == member.id:
return await ctx.send(f"**You cannot promote yourself, {owner.mention}!**")
if not role_name:
return await ctx.send(f"**Please, inform a Tribe Role name, {owner.mention}!**")
if len(role_name) > 30:
return await ctx.send(f"**Tribe Role names have a limit of 30 characters, {owner.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=owner.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {owner.mention}**!")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
return await ctx.send(f"**{member.mention} is not even in a tribe, {owner.mention}!**")
if tribe_member[1] != user_tribe['name']:
return await ctx.send(f"**{member.mention} is not even from your tribe, {owner.mention}!**")
if str(tribe_member[3]).lower() == role_name.lower():
return await ctx.send(f"**{member.mention} already has this Tribe Role, {owner.mention}!**")
tribe_role = await self.get_tribe_role(owner.id, role_name)
if not tribe_role:
return await ctx.send(f"**You don't have a Tribe Role with that name, {owner.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to promote {member.mention} to `{tribe_role[2]}`, {owner.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {owner.mention}!**")
try:
await self.update_user_tribe_role(member.id, tribe_role[2])
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {owner.mention}!**")
else:
await ctx.send(f"**Successfully promoted {member.mention} to `{tribe_role[2]}`, {owner.mention}!**")
@tribe.command(aliases=['take_role', 'takerole'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def demote(self, ctx, member: discord.Member = None) -> None:
""" Demotes a Tribe Member from their current Tribe Role.
:param member: The Tribe Member to demote. """
owner = ctx.author
if not member:
return await ctx.send(f"**Please, inform a Tribe Member to promote, {owner.mention}!**")
if owner.id == member.id:
return await ctx.send(f"**You cannot demote yourself, {owner.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=owner.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {owner.mention}**!")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
return await ctx.send(f"**{member.mention} is not even in a tribe, {owner.mention}!**")
if tribe_member[1] != user_tribe['name']:
return await ctx.send(f"**{member.mention} is not even from your tribe, {owner.mention}!**")
if tribe_member[3] == 'Member':
return await ctx.send(f"**{member.mention} already has the default Tribe Role, {owner.mention}!**")
tribe_role = await self.get_tribe_role(owner.id, tribe_member[3])
if not tribe_role:
return await ctx.send(f"**You don't have a Tribe Role with that name, {owner.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to demote {member.mention} from `{tribe_role[2]}` to `Member`, {owner.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {owner.mention}!**")
try:
await self.update_user_tribe_role(member.id)
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {owner.mention}!**")
else:
await ctx.send(f"**Successfully demote {member.mention} from `{tribe_role[2]}` to `Member`, {owner.mention}!**")
@tribe.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def roles(self, ctx, tribe_name: Optional[str] = None) -> None:
""" Shows the Tribe Roles of a given tribe.
:param tribe_name: The name of the tribe to show the roles. [Optional]
PS: If a tribe name is not provided, it will fetch the tribe the user is in. """
member = ctx.author
tribe = None
if tribe_name:
tribe = await self.get_tribe_info_by_name(tribe_name)
else:
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile or not sloth_profile[3]:
return await ctx.send(
f"**You didn't provide any tribe name and you're not in a tribe either, {member.mention}!**")
tribe = await self.get_tribe_info_by_name(sloth_profile[3])
if not tribe['name']:
return await ctx.send(f"**No tribe with that name was found, {member.mention}**!")
roles = await self.get_tribe_roles(member.id)
if not roles:
return await ctx.send(f"**This tribe doesn't have any intern roles, {member.mention}!**")
embed = discord.Embed(
title=f"__{tribe["name"]}'s Roles__:",
description=', '.join([r[2] for r in roles]),
color=member.color,
timestamp=ctx.message.created_at,
url=tribe['link']
)
embed.set_author(name=member.display_name, url=member.display_avatar, icon_url=member.display_avatar)
if tribe['thumbnail']:
embed.set_thumbnail(url=tribe['thumbnail'])
embed.set_footer(text=ctx.guild.name, icon_url=ctx.guild.icon.url)
await ctx.send(embed=embed)
async def get_tribe_role(self, owner_id: int, role_name: str) -> List[Union[int, str]]:
""" Gets a Tribe Role by name.
:param owner_id: The ID of the owner of that tribe.
:param role_name: The name of the role. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM TribeRole WHERE owner_id = %s AND LOWER(role_name) = LOWER(%s)", (owner_id, role_name))
tribe_role = await mycursor.fetchone()
await mycursor.close()
return tribe_role
async def get_tribe_roles(self, owner_id: int) -> List[List[Union[int, str]]]:
""" Gets all Tribe Roles from tribe owner's tribe.
:param owner_id: The ID of the owner of that tribe. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM TribeRole WHERE owner_id = %s", (owner_id,))
tribe_roles = await mycursor.fetchall()
await mycursor.close()
return tribe_roles
async def insert_tribe_role(self, owner_id: int, tribe_name: str, role_name: str) -> None:
""" Inserts a Tribe Role into the database.
:param owner_id: The ID of the owner of that tribe.
:param tribe_name: The name of the tribe.
:param role_name: The name of the role. """
mycursor, db = await the_database()
await mycursor.execute("""
INSERT INTO TribeRole (owner_id, tribe_name, role_name) VALUES (%s, %s, %s)
""", (owner_id, tribe_name, role_name))
await db.commit()
await mycursor.close()
async def delete_tribe_role(self, owner_id: int, tribe_name: str, role_name: str) -> None:
""" Deletes a Tribe Role from the database.
:param owner_id: The ID of the owner of that tribe.
:param tribe_name: The name of the tribe.
:param role_name: The name of the role. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM TribeRole WHERE owner_id = %s AND LOWER(role_name) = LOWER(%s)", (owner_id, role_name))
await mycursor.execute("""
UPDATE TribeMember SET tribe_role = DEFAULT(tribe_role) WHERE tribe_name = %s AND LOWER(tribe_role) = LOWER(%s)
""", (tribe_name, role_name))
await db.commit()
await mycursor.close()
async def delete_tribe_roles(self, owner_id: int, tribe_name: str) -> None:
""" Deletes all Tribe Roles from the database.
:param owner_id: The ID of the owner of that tribe.
:param tribe_name: The name of the tribe. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM TribeRole WHERE owner_id = %s", (owner_id,))
await mycursor.execute("""
UPDATE TribeMember SET tribe_role = DEFAULT(tribe_role)
WHERE tribe_name = %s AND tribe_role <> 'Owner'
""", (tribe_name,))
await db.commit()
await mycursor.close()
async def insert_tribe_member(self, owner_id: int, tribe_name: str, user_id: int, tribe_role: str = 'Member') -> None:
""" Inserts a Tribe Member.
:param owner_id: The ID of the owner of the tribe the user is joining.
:param tribe_name: The tribe name.
:param user_id: The ID of the user.
:param tribe_role: The initial role they're gonna have in the tribe. """
mycursor, db = await the_database()
await mycursor.execute("""
INSERT INTO TribeMember (owner_id, tribe_name, member_id, tribe_role)
VALUES (%s, %s, %s, %s)""", (owner_id, tribe_name, user_id, tribe_role))
await db.commit()
await mycursor.close()
async def delete_tribe_member(self, user_id: int) -> None:
""" Deletes a Tribe Member.
:param user_id: The ID of the tribe member. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM TribeMember WHERE member_id = %s", (user_id,))
await db.commit()
await mycursor.close()
async def get_tribe_role_embed(self, channel: discord.TextChannel, owner_id: int, tribe_info: Dict[str, Union[str, int]], role_name: str) -> discord.Embed:
""" Makes an embedded message for a Tribe Role creation.
:param channel: The context channel.
:param owner_id: The owner of the tribe.
:param tribe_info: The tribe info.
:param role_name: The role created for that tribe. """
current_ts = await utils.get_timestamp()
tribe_role_embed = discord.Embed(
title="__A Tribe Role has been Created__",
description=f"<@{owner_id}> has just created a Tribe Role named `{role_name}` for their tribe named `{tribe_info["name"]}`.",
color=discord.Color.green(),
timestamp=datetime.fromtimestamp(current_ts)
)
if tribe_info['thumbnail']:
tribe_role_embed.set_thumbnail(url=tribe_info['thumbnail'])
tribe_role_embed.set_image(url='https://media1.tenor.com/images/5327c87ecb310a382e891a0ed209357f/tenor.gif?itemid=18799194')
tribe_role_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return tribe_role_embed
async def update_user_tribe_owner(self, old_owner_id: int, new_owner_id: int) -> None:
""" Updates the user's Tribe Role.
:param old_owner_id: The old Tribe owner's ID.
:param new_owner_id: The new Tribe owner's ID. """
mycursor1, db1 = await the_database()
await mycursor1.execute("UPDATE UserTribe SET user_id = %s WHERE user_id = %s", (new_owner_id, old_owner_id))
await mycursor1.execute("""
UPDATE TribeMember as GL, (
SELECT owner_id, member_id, tribe_role
FROM TribeMember
WHERE member_id = %s
) OG, (
SELECT owner_id, member_id, tribe_role
FROM TribeMember
WHERE member_id = %s
) T
SET GL.tribe_role = (
CASE
WHEN GL.member_id = %s THEN T.tribe_role
WHEN GL.member_id = %s THEN OG.tribe_role
END
)
WHERE GL.member_id in (%s, %s);
""", (new_owner_id, old_owner_id, new_owner_id, old_owner_id, new_owner_id, old_owner_id))
await db1.commit()
await mycursor1.close()
mycursor2, db2 = await the_django_database()
await mycursor2.execute("UPDATE tribe_tribe SET owner_id = %s WHERE owner_id = %s", (new_owner_id, old_owner_id))
await db2.commit()
await mycursor2.close()
async def update_user_tribe_role(self, user_id: int, role_name: Optional[str] = None) -> None:
""" Updates the user's Tribe Role.
:param user_id: The Tribe Member's ID.
:param role_name: The name of the role. [Optional][Default='Member'] """
mycursor, db = await the_database()
if not role_name:
await mycursor.execute("UPDATE TribeMember SET tribe_role = DEFAULT(tribe_role) WHERE member_id = %s", (user_id,))
else:
await mycursor.execute("UPDATE TribeMember SET tribe_role = %s WHERE member_id = %s", (role_name, user_id))
await db.commit()
await mycursor.close()
@tribe.command(aliases=['to', 'transfer'])
@commands.cooldown(1, 60, commands.BucketType.user)
async def transfer_ownership(self, ctx, *, member: discord.Member = None) -> None:
""" Transfers the ownership of your tribe to someone else. """
author = ctx.author
if not member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform a member, {author.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(author.id)
if not user_tribe['name']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You don't have a tribe, {author.mention}**!")
if user_tribe['owner_id'] == member.id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You can't transfer the tribe to yourself, {author.mention}!**")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is not even in a tribe, you can't transfer the tribe to them, {author.mention}!**")
if tribe_member[0] != user_tribe['owner_id']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is in a different tribe, you can't transfer the tribe to them, {author.mention}!**")
confirm = await ConfirmSkill(
f"**Are you sure you want to transfer your ownership of `{user_tribe["name"]}` to {member.mention}, {author.mention}?**"
).prompt(ctx)
if not confirm:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Not doing it, then, {author.mention}!**")
await self.update_user_tribe_owner(author.id, member.id)
await ctx.send(f"**Successfully transferred ownership of `{user_tribe["name"]}` from {author.mention} to {member.mention}!**")
@tribe.command(aliases=["fto", "ftransfer", "force_transfer"])
@commands.cooldown(1, 60, commands.BucketType.user)
@commands.has_permissions(administrator=True)
async def force_transfer_ownership(self, ctx, tribe_name: str = None, member: discord.Member = None) -> None:
""" (ADMIN) Force-transfers the ownership of a Tribe to another user.
:param tribe_name: The name of the tribe from which to transfer ownership.
:param member: The member to transfer the Tribe to. """
author = ctx.author
if not member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform a member to transfer the tribe to, {author.mention}!**")
if not tribe_name:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform the name of the tribe, {author.mention}!**")
user_tribe = await self.get_tribe_info_by_name(tribe_name)
if not user_tribe['name']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**No tribes with that name were found, {author.mention}!**")
if user_tribe['owner_id'] == member.id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You can't transfer the tribe to the same user, {author.mention}!**")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is not even in a tribe, you can't transfer the tribe to them, {author.mention}!**")
if tribe_member[0] != user_tribe['owner_id']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is in a different tribe, you can't transfer the tribe to them, {author.mention}!**")
confirm = await ConfirmSkill(
f"**Are you sure you want to transfer ownership of `{user_tribe["name"]}` from <@{user_tribe["owner_id"]}> to {member.mention}, {author.mention}?**"
).prompt(ctx)
if not confirm:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Not doing it, then, {author.mention}!**")
try:
await self.update_user_tribe_owner(user_tribe['owner_id'], member.id)
except:
await ctx.send(f"**Something went wrong with it, {author.mention}!**")
else:
await ctx.send(f"**Successfully transferred ownership of `{user_tribe["name"]}` from <@{user_tribe["owner_id"]}> to {member.mention}!**")
@commands.command(aliases=['get_mission', 'gq', 'gm'])
@Player.poisoned()
@Player.skills_used(requirement=50)
@Player.skill_on_cooldown(skill=Skill.FOUR, seconds=172800)
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
@Player.not_ready()
async def get_quest(self, ctx) -> None:
""" Gets a Quest for you and your Tribe to complete, and if so,
the involved people will get rewarded.
• Delay = 2 days
• Cost = Free """
perpetrator = ctx.author
# Do the magic here.
if ctx.channel.id != self.bots_txt.id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use this skill, because you are knocked-out!**")
tribe_member = await self.get_tribe_member(perpetrator.id)
if not tribe_member:
return await ctx.send(f"**You are not in a tribe, {perpetrator.mention}**!")
user_tribe = await self.get_tribe_info_by_user_id(tribe_member[0])
# Checks whether there's already a max number of 1 open quests in that tribe
if await self.get_skill_action_by_user_id_and_skill_type(user_id=perpetrator.id, skill_type="quest"):
return await ctx.send(f"**You cannot have more than 1 on-going Quest at a time, {perpetrator.mention}!**")
random_quest = await self.generate_random_quest()
_, exists = await Player.skill_on_cooldown(skill=Skill.FOUR, seconds=172800).predicate(ctx)
try:
current_timestamp = await utils.get_timestamp()
await self.insert_skill_action(
user_id=perpetrator.id, skill_type="quest", skill_timestamp=current_timestamp,
target_id=perpetrator.id, channel_id=ctx.channel.id, price=random_quest["enum_value"], content=random_quest["message"]
)
if exists:
await self.update_user_skill_ts(perpetrator.id, Skill.FOUR, current_timestamp)
else:
await self.insert_user_skill_cooldown(perpetrator.id, Skill.FOUR, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
except Exception as e:
print(e)
return await ctx.send(f"**Something went wrong with your skill and it failed, {perpetrator.mention}!**")
else:
tribe_quest_embed = await self.get_tribe_quest_embed(channel=ctx.channel, user_id=perpetrator.id, quest=random_quest, tribe=user_tribe)
await ctx.send(embed=tribe_quest_embed)
async def generate_random_quest(self) -> Any:
""" Generates a random question. """
quests: List[Dict[str, Union[str, int]]] = [
{"message": "Complete 5 `TheLanguageJungle` games.", "enum_value": 1},
{"message": "Rep someone and get repped back.", "enum_value": 2},
{"message": "Win a coinflip betting 50 leaves.", "enum_value": 3},
{"message": "Get a 15+ score in the `Flags` game.", "enum_value": 4},
{"message": "Spend 4 hours in a Voice Channel in a single day.", "enum_value": 5},
{"message": "Buy any item from the SlothShop, if you have all items you need to get ripped-off first.", "enum_value": 6},
{"message": "Ping DNK 3 times in a row and try to evade a BAN!!!!", "enum_value": 7},
]
return choice(quests)
async def get_tribe_quest_embed(self,
channel: Union[discord.TextChannel, discord.Thread], user_id: int, quest: Dict[str, Union[str, int]], tribe: Dict[str, Union[str, int]]
) -> discord.Embed:
""" Makes an embedded message for a Tribe Role creation.
:param channel: The context channel.
:param owner_id: The owner of the tribe.
:param tribe_info: The tribe info.
:param role_name: The role created for that tribe. """
current_ts = await utils.get_timestamp()
tribe_quest_embed = discord.Embed(
title="__A New Quest has been Started__",
description=f"<@{user_id}> has just started a Quest for their Tribe named `{tribe["name"]}`!",
color=discord.Color.green(),
timestamp=datetime.fromtimestamp(current_ts)
)
tribe_quest_embed.add_field(name="__Quest__:", value=quest["message"])
if tribe["thumbnail"]:
tribe_quest_embed.set_thumbnail(url=tribe["thumbnail"])
tribe_quest_embed.set_image(url='https://c.tenor.com/MJ8Dxo58AJAAAAAC/muggers-quest.gif')
tribe_quest_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return tribe_quest_embed
async def complete_quest(self, user_id: int) -> None:
""" Completes an on-going quest for a member.
:param user_id: The ID of the user who's completing the quest. """
# Gets Quest
quest = await self.get_skill_action_by_user_id_and_skill_type(user_id=user_id, skill_type="quest")
if not quest:
return
# Deletes Quest
await self.delete_skill_action_by_user_id_and_skill_type(user_id=user_id, skill_type='quest')
# Gets enum value
enum_name = QuestEnum.__dict__['_member_names_'][quest[7]-1]
function: Callable = QuestEnum.__getitem__(name=enum_name)
# Runs attached method if there's any
if function:
await function()
@tribe.command(aliases=["mission", "task", "chore", "quests"])
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.has_permissions(administrator=True)
async def quest(self, ctx) -> None:
""" Shows all Quests that the tribe you are in has. """
member = ctx.author
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You're not even in a tribe, {member.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(tribe_member[0])
tribe_members = await self.get_tribe_members(tribe_member[0], tribe_member[1])
quests: List[List[Union[str, int]]] = []
for tribe_member in tribe_members:
quest = await self.get_skill_action_by_user_id_and_skill_type(user_id=tribe_member[0], skill_type="quest")
if quest:
quests.append(quest)
if not quests:
return await ctx.send(f"**No quests found in your tribe, {member.mention}!**")
quests_text: str = ''.join(list(map(lambda q: f"```• {q[8]} ({q[7]});```", quests)))
embed: discord.Embed = discord.Embed(
title="__Tribe Quests__",
description=f"Showing all `{len(quests)}` quests from this tribe:\n{quests_text}",
color=member.color,
timestamp=ctx.message.created_at
)
embed.set_footer(text=f"Requested by: {member}", icon_url=member.display_avatar)
if user_tribe["thumbnail"]:
embed.set_thumbnail(url=user_tribe["thumbnail"])
await ctx.send(embed=embed) | import discord
from discord.ext import commands, menus
from mysqldb import the_database, the_django_database
from .player import Player, Skill
from .enums import QuestEnum
from extra.menu import ConfirmSkill, SwitchTribePages
from extra import utils
import os
import asyncio
from datetime import datetime
from typing import List, Union, Dict, Any, Optional, Callable
from random import choice
bots_and_commands_channel_id = int(os.getenv('BOTS_AND_COMMANDS_CHANNEL_ID', 123))
approve_thumbnail_channel_id = int(os.getenv('APPROVE_THUMBNAIL_CHANNEL_ID', 123))
class Munk(Player):
emoji = '<:Munk:839498018712715284>'
def __init__(self, client) -> None:
self.client = client
@commands.Cog.listener(name='on_raw_reaction_add')
async def on_raw_reaction_add_munk(self, payload) -> None:
""" Checks reactions related to skill actions. """
# Checks if it wasn't a bot's reaction
if not payload.guild_id:
return
# Checks whether it's a valid member and not a bot
if not payload.member or payload.member.bot:
return
if payload.channel_id != approve_thumbnail_channel_id:
return
skill_action = await self.get_skill_action_by_message_id_and_skill_type(message_id=payload.message_id, skill_type='thumbnail_request')
if skill_action is not None:
emoji = str(payload.emoji)
# Checks whether it's a steal
if emoji == '✅':
await self.delete_skill_action_by_message_id(payload.message_id)
channel = self.client.get_channel(skill_action[5])
message = await channel.fetch_message(skill_action[4])
if message:
tribe = await self.get_tribe_info_by_user_id(user_id=skill_action[0])
message_embed = discord.Embed(
title="Thumbnail Approved!",
description=f"**<@{payload.user_id}>, approved your tribe `{tribe['name']}`'s thumbnail/logo, <@{skill_action[0]}>!**",
color=discord.Color.green(),
url=tribe['link']
)
message_embed.set_image(url=skill_action[8])
await self.bots_txt.send(content=f"<@{skill_action[0]}>", embed=message_embed)
await message.delete()
await self.update_tribe_thumbnail(user_id=skill_action[0], tribe_name=tribe['name'], link=skill_action[8])
elif emoji == '❌':
await self.delete_skill_action_by_message_id(payload.message_id)
channel = self.client.get_channel(skill_action[5])
message = await channel.fetch_message(skill_action[4])
if message:
tribe = await self.get_tribe_info_by_user_id(user_id=skill_action[0])
message_embed = discord.Embed(
title="Thumbnail Refused!",
description=f"**<@{payload.user_id}>, refused your tribe `{tribe['name']}`'s thumbnail/logo, <@{skill_action[0]}>!**",
color=discord.Color.red(),
url=tribe['link']
)
message_embed.set_image(url=skill_action[8])
await self.bots_txt.send(content=f"<@{skill_action[0]}>", embed=message_embed)
await message.delete()
@commands.command()
@Player.poisoned()
@Player.skill_on_cooldown()
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
async def munk(self, ctx, target: discord.Member = None) -> None:
""" Converts a user into a real Munk.
:param target: The person you want to convert to a Munk. """
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{ctx.author.mention}, you can only use this command in {self.bots_txt.mention}!**")
attacker = ctx.author
attacker_fx = await self.get_user_effects(attacker)
if 'knocked_out' in attacker_fx:
return await ctx.send(f"**{attacker.mention}, you can't use your skill, because you are knocked-out!**")
if not target:
return await ctx.send(f"**Please, choose a member to use the `Munk` skill, {attacker.mention}!**")
if target.bot:
return await ctx.send(f"**You cannot convert a bot into a `Munk`, {attacker.mention}!**")
if attacker.id == target.id:
return await ctx.send(f"**You cannot convert yourself, since you are already a `Munk`, {attacker.mention}!**")
target_fx = await self.get_user_effects(target)
if 'munk' in target_fx:
return await ctx.send(f"**{target.mention} is already a `Munk`, {attacker.mention}!**")
target_sloth_profile = await self.get_sloth_profile(target.id)
if not target_sloth_profile:
return await ctx.send(f"**You cannot convert someone who doesn't have an account, {attacker.mention}!**")
if target_sloth_profile[1] == 'default':
return await ctx.send(f"**You cannot convert someone who has a `default` Sloth class, {attacker.mention}!**")
if 'protected' in target_fx:
return await ctx.send(f"**{attacker.mention}, you cannot convert {target.mention} into a `Munk`, because they are protected against attacks!**")
confirmed = await ConfirmSkill(f"**{attacker.mention}, are you sure you want to convert {target.mention} into a `Munk`?**").prompt(ctx)
if not confirmed:
return await ctx.send("**Not converting them, then!**")
if ctx.invoked_with == 'mirror':
mirrored_skill = await self.get_skill_action_by_user_id_and_skill_type(user_id=attacker.id, skill_type='mirror')
if not mirrored_skill:
return await ctx.send(f"**Something went wrong with this, {attacker.mention}!**")
else:
_, exists = await Player.skill_on_cooldown(skill=Skill.ONE).predicate(ctx)
try:
await target.edit(nick=f"{target.display_name} Munk")
current_timestamp = await utils.get_timestamp()
if ctx.invoked_with != 'mirror':
if exists:
await self.update_user_skill_ts(attacker.id, Skill.ONE, current_timestamp)
else:
await self.insert_user_skill_cooldown(attacker.id, Skill.ONE, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=attacker.id)
munk_embed = await self.get_munk_embed(
channel=ctx.channel, perpetrator_id=attacker.id, target_id=target.id)
msg = await ctx.send(embed=munk_embed)
except Exception as e:
print(e)
return await ctx.send(f"**Something went wrong and your `Munk` skill failed, {attacker.mention}!**")
else:
await msg.edit(content=f"<@{target.id}>")
if 'reflect' in target_fx and 'munk' not in attacker_fx:
await self.reflect_attack(ctx, attacker, target, 'munk')
async def get_munk_embed(self, channel, perpetrator_id: int, target_id: int) -> discord.Embed:
""" Makes an embedded message for a munk action.
:param channel: The context channel.
:param perpetrator_id: The ID of the perpetrator of the munk skill.
:param target_id: The ID of the target member that is gonna be protected. """
timestamp = await utils.get_timestamp()
munk_embed = discord.Embed(
title="A Munk Convertion has been delightfully performed!",
description=f"🐿️ <@{perpetrator_id}> converted <@{target_id}> into a `Munk`! 🐿️",
color = discord.Color.green(),
timestamp=datetime.fromtimestamp(timestamp)
)
munk_embed.set_thumbnail(url="https://thelanguagesloth.com/media/sloth_classes/Munk.png")
munk_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return munk_embed
async def get_join_tribe_embed(self, channel, inviter: discord.Member, target: discord.Member, tribe: Dict[str, Union[int, str]]) -> discord.Embed:
""" Makes an embedded message for a tribe joining.
:param channel: The context channel.
:param inviter: The inviter.
:param target_id: The target member that is gonna be invited to a tribe.
:param tribe: The tribe and its information. """
timestamp = await utils.get_timestamp()
join_tribe_embed = discord.Embed(
title="Someone just joined a Tribe!",
description=f"🏕️ {target.mention} just joined `{tribe['name']}`! 🏕️",
color=discord.Color.green(),
timestamp=datetime.fromtimestamp(timestamp),
url=tribe['link']
)
join_tribe_embed.set_author(name=inviter, icon_url=inviter.display_avatar)
if tribe['thumbnail']:
join_tribe_embed.set_thumbnail(url=tribe['thumbnail'])
join_tribe_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return join_tribe_embed
async def get_tribe_info_by_name(self, name: str) -> Dict[str, Union[str, int]]:
""" Gets information about a specific tribe.
:param name: The name of the tribe. """
mycursor, db = await the_database()
await mycursor.execute("SELECT * FROM UserTribe WHERE tribe_name = %s", (name,))
tribe = await mycursor.fetchone()
await mycursor.close()
tribe_info = {
'owner_id': None,
'name': None,
'description': None,
'two_emojis': None,
'thumbnail': None,
'form': None,
'link': None
}
if tribe:
tribe_info = {
'owner_id': tribe[0],
'name': tribe[1],
'description': tribe[2],
'two_emojis': tribe[3],
'thumbnail': tribe[4],
'form': tribe[5],
'link': f"https://thelanguagesloth.com/tribes/{tribe[6]}/"
}
return tribe_info
async def get_tribe_info_by_user_id(self, user_id: int) -> Dict[str, Union[str, int]]:
""" Gets information about a specific tribe.
:param user_id: The ID of the user owner of the tribe. """
mycursor, db = await the_database()
await mycursor.execute("SELECT * FROM UserTribe WHERE user_id = %s", (user_id,))
tribe = await mycursor.fetchone()
await mycursor.close()
tribe_info = {
'owner_id': None,
'name': None,
'description': None,
'two_emojis': None,
'thumbnail': None,
'form': None,
'link': None
}
if tribe:
tribe_info = {
'owner_id': tribe[0],
'name': tribe[1],
'description': tribe[2],
'two_emojis': tribe[3],
'thumbnail': tribe[4],
'form': tribe[5],
'link': f"https://thelanguagesloth.com/tribes/{tribe[6]}/"
}
return tribe_info
async def get_tribe_member(self, user_id: int) -> List[Union[str, int]]:
""" Gets a Tribe Member.
:param user_id: The ID of the tribe member to get. """
mycursor, db = await the_database()
await mycursor.execute("SELECT * FROM TribeMember WHERE member_id = %s", (user_id,))
tribe_member = await mycursor.fetchone()
await mycursor.close()
return tribe_member
async def get_tribe_members(self, tribe_owner_id: int = None, tribe_name: str = None) -> List[List[Union[int, str]]]:
""" Gets a list of IDs of members of a particular tribe.
:param tribe_owner_id: The ID of the owner of the tribe (Optional).
:param tribe_name: The name of the tribe. (Optional).
Ps: At least one of the parameters has to be provided. """
mycursor, _ = await the_database()
tribe_members: List[int] = []
if tribe_owner_id:
await mycursor.execute("SELECT tribe_name FROM UserTribe WHERE user_id = %s", (tribe_owner_id,))
tribe = await mycursor.fetchone()
await mycursor.execute("SELECT member_id, tribe_role FROM TribeMember WHERE tribe_name = %s", (tribe[0],))
tribe_members = await mycursor.fetchall()
elif tribe_name:
await mycursor.execute("SELECT member_id, tribe_role FROM TribeMember WHERE tribe_name = %s", (tribe_name,))
tribe_members = await mycursor.fetchall()
await mycursor.close()
return tribe_members
@commands.group(aliases=['tb'])
@Player.poisoned()
@Player.kidnapped()
async def tribe(self, ctx) -> None:
""" Command for managing and interacting with a tribe.
(Use this without a subcommand to see all subcommands available) """
if ctx.invoked_subcommand:
return
cmd = self.client.get_command('tribe')
prefix = self.client.command_prefix
subcommands = [f"{prefix}{c.qualified_name}" for c in cmd.commands
]
subcommands = '\n'.join(subcommands)
items_embed = discord.Embed(
title="__Subcommads__:",
description=f"```apache\n{subcommands}```",
color=ctx.author.color,
timestamp=ctx.message.created_at
)
await ctx.send(embed=items_embed)
@tribe.command(aliases=['request_logo', 'ask_thumbnail', 'ask_logo'])
@commands.cooldown(1, 3600, commands.BucketType.user)
async def request_thumbnail(self, ctx, image_url: str = None) -> None:
""" Request a thumbnail for your tribe.
:param image_url: The URL link of the thumbnail image. """
requester = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{ctx.author.mention}, you can only use this command in {self.bots_txt.mention}!**")
if not image_url:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"You need to inform an image URL, {requester.mention}!**")
if not image_url.startswith('https://'):
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"You need to inform an image URL that has HTTPS in it, {requester.mention}!**")
if len(image_url) > 200:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"You need to inform an image URL within 200 characters, {requester.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=requester.id)
if not user_tribe['name']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You don't even have a tribe, you cannot request it, {requester.mention}!**")
confirm = await ConfirmSkill(content=requester.mention,
msg=f"**Are you sure you want to request [this]({image_url}) to be `{user_tribe['name']}`'s thumbnail/logo?**").prompt(ctx)
if confirm:
# Sends message to a moderation-clearance room
room = self.client.get_channel(approve_thumbnail_channel_id)
request_embed = discord.Embed(
title="__Thumbnail Request__",
description=f"{requester.mention} is requesting the image below to be their tribe's (`{user_tribe['name']}`) thumbnail/logo.",
color=requester.color,
timestamp=ctx.message.created_at
)
request_embed.set_image(url=image_url)
request_msg = await room.send(embed=request_embed)
# Don't need to store it, since it is forever
current_timestamp = await utils.get_timestamp()
await self.insert_skill_action(
user_id=requester.id, skill_type="thumbnail_request", skill_timestamp=current_timestamp,
target_id=requester.id, channel_id=room.id, message_id=request_msg.id,
content=image_url
)
await request_msg.add_reaction('✅')
await request_msg.add_reaction('❌')
await ctx.send(f"**Request sent, {ctx.author.mention}!**")
else:
ctx.command.reset_cooldown(ctx)
await ctx.send(f"**Not doing requesting it, then, {requester.mention}!**")
@tribe.command(aliases=['inv'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def invite(self, ctx, member: discord.Member = None) -> None:
""" Invites a user to your tribe.
:param member: The member to invite. """
inviter = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{inviter.mention}, you can only use this command in {self.bots_txt.mention}!**")
tribe_member = await self.get_tribe_member(inviter.id)
if not tribe_member or tribe_member[0] != tribe_member[2]:
return await ctx.send(f"**You don't have a tribe, {inviter.mention}**!")
if not member:
return await ctx.send(f"**Please, inform a member to invite to your tribe, {inviter.mention}!**")
if inviter.id == member.id:
return await ctx.send(f"**You cannot invite yourself into your own tribe, {inviter.mention}!**")
confirm = await ConfirmSkill(f"Are you sure you want to invite, {member.mention} to `{tribe_member[1]}`?").prompt(ctx)
if not confirm:
return await ctx.send("**Not inviting them, then!**")
# Checks whether user is already in a tribe.
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile:
return await ctx.send(f"**You cannot invite someone that doesn't have an account, {inviter.mention}!**")
if sloth_profile[1] == 'default':
return await ctx.send(f"**You cannot invite someone that doesn't have a Sloth Class, {inviter.mention}!**")
if sloth_profile[3]:
return await ctx.send(f"**You cannot invite someone that is already in a tribe, {inviter.mention}!**")
custom_ctx = ctx
custom_ctx.author = member
invite = await ConfirmSkill(content=f"{member.mention}", msg=f"{inviter.mention} invited you to join their tribe called `{tribe_member[1]}`, do you wanna join?").prompt(custom_ctx)
if invite:
user_tribe = await self.get_tribe_info_by_user_id(inviter.id)
try:
await self.insert_tribe_member(owner_id=inviter.id, tribe_name=tribe_member[1], user_id=member.id)
await self.update_someones_tribe(user_id=member.id, tribe_name=tribe_member[1])
try:
await self.update_tribe_name(member=member, two_emojis=user_tribe['two_emojis'], joining=True)
except:
pass
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}, {inviter.mention}!**")
else:
join_tribe_embed = await self.get_join_tribe_embed(
channel=ctx.channel, inviter=inviter, target=member, tribe=user_tribe)
await ctx.send(embed=join_tribe_embed)
else:
await ctx.send(f"**{member.mention} refused your invitation to join `{tribe_member[1]}`, {inviter.mention}!**")
@tribe.command(aliases=['view', 'display', 'show'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def see(self, ctx, *, name: str = None) -> None:
""" Shows some information about a tribe.
If not provided a tribe name, it will check the one the user is in.
:param name: The tribe name. """
member = ctx.author
tribe = None
if name:
tribe = await self.get_tribe_info_by_name(name)
else:
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile or not sloth_profile[3]:
return await ctx.send(
f"**You didn't provide any tribe name and you're not in a tribe either, {member.mention}!**")
tribe = await self.get_tribe_info_by_name(sloth_profile[3])
if not tribe['name']:
return await ctx.send(f"**No tribes with that name were found, {member.mention}!**")
# Gets all tribe members
tribe_members = await self.get_tribe_members(tribe_name=tribe['name'])
all_members = list(map(lambda mid: f"<@{mid[0]}> ({mid[1]})", tribe_members))
# Additional data:
additional = {
'tribe': tribe,
'change_embed': self._make_tribe_embed
}
pages = menus.MenuPages(source=SwitchTribePages(all_members, **additional), clear_reactions_after=True)
await pages.start(ctx)
async def _make_tribe_embed(self, ctx: commands.Context, tribe: Dict[str, Union[str, int]], entries: int, offset: int, lentries: int) -> discord.Embed:
tribe_owner = self.client.get_user(tribe['owner_id'])
tribe_embed = discord.Embed(
title=f"{tribe['name']} ({tribe['two_emojis']})",
description=tribe['description'],
timestamp=ctx.message.created_at,
color=ctx.author.color,
url=tribe['link']
)
if tribe['thumbnail']:
tribe_embed.set_thumbnail(url=tribe['thumbnail'])
if tribe_owner:
tribe_embed.set_author(name=f"Owner: {tribe_owner}", icon_url=tribe_owner.display_avatar, url=tribe_owner.display_avatar)
tribe_embed.add_field(name="__Members:__", value=', '.join(entries), inline=False)
for i, v in enumerate(entries, start=offset):
tribe_embed.set_footer(text=f"({i} of {lentries})")
return tribe_embed
@tribe.command(aliases=['kick', 'expel', 'kick_out', 'can_i_show_you_the_door?'])
@commands.cooldown(1, 10, commands.BucketType.user)
async def kickout(self, ctx, member: Union[discord.Member, discord.User] = None) -> None:
""" Exepels someone from your tribe.
:param member: The member to expel. """
expeller = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{expeller.mention}, you can only use this command in {self.bots_txt.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=expeller.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {expeller.mention}**!")
if not member:
return await ctx.send(f"**Please, inform a member to kick from your tribe, {expeller.mention}!**")
if expeller.id == member.id:
return await ctx.send(f"**You cannot kick yourself out of your own tribe, {expeller.mention}!**")
member_fx = await self.get_user_effects(member)
if 'kidnapped' in member_fx:
return await ctx.send(f"**You cannot kick someone from your tribe who is kidnapped, {expeller.mention}!**")
confirm = await ConfirmSkill(f"Are you sure you want to kick, {member.mention} from `{user_tribe['name']}`?").prompt(ctx)
if not confirm:
return await ctx.send("**Not kicking them, then!**")
# Checks whether user is already in a tribe.
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile:
return await ctx.send(f"**You cannot kick out someone that doesn't even have an account, {expeller.mention}!**")
if sloth_profile[3] != user_tribe['name']:
return await ctx.send(f"**You cannot kick out someone that is not in your tribe, {expeller.mention}!**")
try:
# await self.update_someones_tribe(user_id=member.id, tribe_name=None)
await self.delete_tribe_member(user_id=member.id)
try:
await self.update_tribe_name(member=member, two_emojis=user_tribe['two_emojis'], joining=False)
except:
pass
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {expeller.mention}!**")
else:
await ctx.send(f"**You successfully kicked {member.mention} out of `{user_tribe['name']}`, {expeller.mention}!**")
@tribe.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def leave(self, ctx) -> None:
""" Leaves the tribe the user is in. """
member = ctx.author
if ctx.channel.id != bots_and_commands_channel_id:
return await ctx.send(f"**{member.mention}, you can only use this command in {self.bots_txt.mention}!**")
tribe_member = await self.get_tribe_member(user_id=member.id)
if not tribe_member[1]:
return await ctx.send(f"**You are not in a tribe, {member.mention}**!")
if member.id == tribe_member[0]:
return await ctx.send(f"**You cannot leave your own tribe, {member.mention}!**")
user_tribe = await self.get_tribe_info_by_name(tribe_member[1])
confirm = await ConfirmSkill(f"Are you sure you want to leave `{user_tribe['name']}`, {member.mention}?").prompt(ctx)
if not confirm:
return await ctx.send("**Not leaving it, then!**")
# Updates user tribe status and nickname
try:
await self.delete_tribe_member(member.id)
try:
await self.update_tribe_name(member=member, two_emojis=user_tribe['two_emojis'], joining=False)
except Exception as ee:
print(ee)
pass
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}!**")
else:
await ctx.send(f"**You successfully left `{user_tribe['name']}`, {member.mention}!**")
async def update_someones_tribe(self, user_id: int, tribe_name: str = None) -> None:
""" Updates someone's tribe status.
:param user_id: The ID of the user who's gonna be updated.
:param tribe_name: The name of the tribe the user is gonna be set to. (default = None) """
mycursor, db = await the_database()
await mycursor.execute("UPDATE SlothProfile SET tribe = %s, tribe_user_id = %s WHERE user_id = %s", (tribe_name, user_id, user_id))
await db.commit()
await mycursor.close()
async def update_tribe_thumbnail(self, user_id: int, tribe_name: str, link: str = None) -> None:
""" Updates someone's tribe thumbnail link.
:param user_id: The ID of the tribe's owner.
:param tribe_name: The name of the tribe.
:param link: The link that the tribe's thumbnail will be set to. """
mycursor, db = await the_django_database()
await mycursor.execute("""
UPDATE tribe_tribe SET tribe_thumbnail = %s
WHERE owner_id = %s AND tribe_name = %s""", (link, user_id, tribe_name))
await db.commit()
await mycursor.close()
mycursor, db = await the_database()
await mycursor.execute("""
UPDATE UserTribe SET tribe_thumbnail = %s
WHERE user_id = %s AND tribe_name = %s""", (link, user_id, tribe_name))
await db.commit()
await mycursor.close()
async def update_tribe_name(self, member: discord.Member, two_emojis: str, joining: bool) -> None:
""" Updates someone's nickname so it has their tribe's two-emoji combination identifier.
:param member: The member whose nickname is gonna be updated.
:param two_emojis: The two-emoji combination identifier.
:param joining: Whether the user is joining the tribe. """
dname = member.display_name
if joining:
# Checks whether member is Munked
if dname.endswith('Munk'):
await member.edit(nick=f"{dname.strip()[:-4]} {two_emojis} Munk".strip())
else:
await member.edit(nick=f"{dname.strip()} {two_emojis}".strip())
else:
nick = ' '.join(map(lambda p: p.strip(), dname.rsplit(two_emojis, 1)))
if nick != dname:
await member.edit(nick=nick)
async def check_tribe_creations(self) -> None:
""" Check on-going steals and their expiration time. """
creations = await self.get_skill_actions_by_skill_type('tribe_creation')
guild = self.client.get_guild(int(os.getenv('SERVER_ID', 123)))
for creation in creations:
try:
# Removes skill action from the database
await self.delete_skill_action_by_target_id_and_skill_type(target_id=creation[0], skill_type='tribe_creation')
member = discord.utils.get(guild.members, id=creation[0])
try:
await self.update_tribe_name(member=member, two_emojis=creation[6], joining=True)
except:
pass
except:
pass
@commands.command()
@Player.poisoned()
@Player.skills_used(requirement=5)
@Player.skill_on_cooldown(skill=Skill.TWO)
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
async def create_tribe(self, ctx) -> None:
""" Guides you into the creation of a tribe,
which is a custom group for people to join and do something. """
member = ctx.author
link = 'https://thelanguagesloth.com/tribes'
tribe_embed = discord.Embed(
title="__Tribe Creation__",
description=f"In order to create your tribe, access our website by clicking [here]({link}) or in the button below!",
color=member.color,
timestamp=ctx.message.created_at,
url=link
)
tribe_embed.set_author(name=member, url=member.display_avatar, icon_url=member.display_avatar)
tribe_embed.set_thumbnail(url=member.display_avatar)
tribe_embed.set_footer(text=member.guild.name, icon_url=member.guild.icon.url)
view = discord.ui.View()
view.add_item(discord.ui.Button(style=5, label="Create Tribe", url=link, emoji="🏕️"))
await ctx.send(embed=tribe_embed, view=view)
@commands.command(aliases=['add_tribe_role', 'createtriberole', 'addtriberole'])
@Player.poisoned()
@Player.skills_used(requirement=20)
@Player.skill_on_cooldown(skill=Skill.THREE, seconds=36000)
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
async def create_tribe_role(self, ctx, role_name: str = None) -> None:
""" Creates a tribe role.
With different roles and positions in your tribe, you
can better administrate and know what each person should do
or their purpose inside your tribe.
:param role_name: The name of the tribe role. (MAX = 30 Chars)
* Cooldown: 1 day
Ps: It is not an actual server role. """
perpetrator = ctx.author
# Do the magic here.
if ctx.channel.id != self.bots_txt.id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use this skill, because you are knocked-out!**")
user_tribe = await self.get_tribe_info_by_user_id(perpetrator.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {perpetrator.mention}**!")
if not role_name:
return await ctx.send(f"**Please, inform a Tribe Role name, {perpetrator.mention}!**")
if len(role_name) > 30:
return await ctx.send(f"**Please, infom a Tribe Role name under or equal to 30 characters, {perpetrator.mention}!**")
if role_name.lower() in ['owner', 'member']:
return await ctx.send(f"**You cannot use this as your Tribe Role's name, {perpetrator.mention}!**")
tribe_roles = await self.get_tribe_roles(perpetrator.id)
if role_name.lower() in [trole[2].lower() for trole in tribe_roles]:
return await ctx.send(f"**You already have a Tribe Role with that name, {perpetrator.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to create a Tribe Role named `{role_name}`, {perpetrator.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not making it, then, {perpetrator.mention}!**")
_, exists = await Player.skill_on_cooldown(skill=Skill.THREE, seconds=36000).predicate(ctx)
try:
current_timestamp = await utils.get_timestamp()
await self.insert_tribe_role(perpetrator.id, user_tribe['name'], role_name)
if exists:
await self.update_user_skill_ts(perpetrator.id, Skill.THREE, current_timestamp)
else:
await self.insert_user_skill_cooldown(perpetrator.id, Skill.THREE, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
except Exception as e:
print(e)
return await ctx.send(f"**Something went wrong with your skill and it failed, {perpetrator.mention}!**")
else:
tribe_role_embed = await self.get_tribe_role_embed(
channel=ctx.channel, owner_id=perpetrator.id, tribe_info=user_tribe, role_name=role_name)
await ctx.send(embed=tribe_role_embed)
@tribe.command(aliases=['remove_role', 'deleterole', 'removerole'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def delete_role(self, ctx, role_name: str = None) -> None:
""" Deletes a specific role from the member's tribe.
:param role_name: The name of the role to delete. """
member = ctx.author
if not role_name:
return await ctx.send(f"**Please, inform a Tribe Role name, {member.mention}!**")
if len(role_name) > 30:
return await ctx.send(f"**Tribe Role names have a limit of 30 characters, {member.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=member.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {member.mention}**!")
tribe_role = await self.get_tribe_role(member.id, role_name)
if not tribe_role:
return await ctx.send(f"**You don't have a Tribe Role with that name, {member.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to delete your tribe's `{tribe_role[2]}` role, {member.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {member.mention}!**")
try:
await self.delete_tribe_role(member.id, user_tribe['name'], role_name)
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}!**")
else:
await ctx.send(f"**Successfully deleted the `{role_name}` role from your tribe, {member.mention}!**")
@tribe.command(aliases=['remove_roles', 'deleteroles', 'removeroles'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def delete_roles(self, ctx) -> None:
""" Deletes all Tribe Roles from the member's tribe. """
member = ctx.author
user_tribe = await self.get_tribe_info_by_user_id(user_id=member.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {member.mention}**!")
tribe_roles = await self.get_tribe_roles(member.id)
if not tribe_roles:
return await ctx.send(f"**You don't any Tribe Roles, {member.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to delete your tribe's roles, {member.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {member.mention}!**")
try:
await self.delete_tribe_roles(member.id, user_tribe['name'])
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {member.mention}!**")
else:
await ctx.send(f"**Successfully deleted all roles from your tribe, {member.mention}!**")
@tribe.command(aliases=['give_role', 'giverole'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def promote(self, ctx, member: discord.Member = None, role_name: str = None) -> None:
""" Promotes a Tribe Member to a given Tribe Role.
:param member: The Tribe Member to promote.
:param role_name: The Tribe Role to promote the member to. """
owner = ctx.author
if not member:
return await ctx.send(f"**Please, inform a Tribe Member to promote, {owner.mention}!**")
if owner.id == member.id:
return await ctx.send(f"**You cannot promote yourself, {owner.mention}!**")
if not role_name:
return await ctx.send(f"**Please, inform a Tribe Role name, {owner.mention}!**")
if len(role_name) > 30:
return await ctx.send(f"**Tribe Role names have a limit of 30 characters, {owner.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=owner.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {owner.mention}**!")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
return await ctx.send(f"**{member.mention} is not even in a tribe, {owner.mention}!**")
if tribe_member[1] != user_tribe['name']:
return await ctx.send(f"**{member.mention} is not even from your tribe, {owner.mention}!**")
if str(tribe_member[3]).lower() == role_name.lower():
return await ctx.send(f"**{member.mention} already has this Tribe Role, {owner.mention}!**")
tribe_role = await self.get_tribe_role(owner.id, role_name)
if not tribe_role:
return await ctx.send(f"**You don't have a Tribe Role with that name, {owner.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to promote {member.mention} to `{tribe_role[2]}`, {owner.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {owner.mention}!**")
try:
await self.update_user_tribe_role(member.id, tribe_role[2])
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {owner.mention}!**")
else:
await ctx.send(f"**Successfully promoted {member.mention} to `{tribe_role[2]}`, {owner.mention}!**")
@tribe.command(aliases=['take_role', 'takerole'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def demote(self, ctx, member: discord.Member = None) -> None:
""" Demotes a Tribe Member from their current Tribe Role.
:param member: The Tribe Member to demote. """
owner = ctx.author
if not member:
return await ctx.send(f"**Please, inform a Tribe Member to promote, {owner.mention}!**")
if owner.id == member.id:
return await ctx.send(f"**You cannot demote yourself, {owner.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(user_id=owner.id)
if not user_tribe['name']:
return await ctx.send(f"**You don't have a tribe, {owner.mention}**!")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
return await ctx.send(f"**{member.mention} is not even in a tribe, {owner.mention}!**")
if tribe_member[1] != user_tribe['name']:
return await ctx.send(f"**{member.mention} is not even from your tribe, {owner.mention}!**")
if tribe_member[3] == 'Member':
return await ctx.send(f"**{member.mention} already has the default Tribe Role, {owner.mention}!**")
tribe_role = await self.get_tribe_role(owner.id, tribe_member[3])
if not tribe_role:
return await ctx.send(f"**You don't have a Tribe Role with that name, {owner.mention}!**")
confirm = await ConfirmSkill(f"**Are you sure you want to demote {member.mention} from `{tribe_role[2]}` to `Member`, {owner.mention}?**").prompt(ctx)
if not confirm:
return await ctx.send(f"**Not doing it then, {owner.mention}!**")
try:
await self.update_user_tribe_role(member.id)
except Exception as e:
print(e)
await ctx.send(f"**Something went wrong with it, {owner.mention}!**")
else:
await ctx.send(f"**Successfully demote {member.mention} from `{tribe_role[2]}` to `Member`, {owner.mention}!**")
@tribe.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def roles(self, ctx, tribe_name: Optional[str] = None) -> None:
""" Shows the Tribe Roles of a given tribe.
:param tribe_name: The name of the tribe to show the roles. [Optional]
PS: If a tribe name is not provided, it will fetch the tribe the user is in. """
member = ctx.author
tribe = None
if tribe_name:
tribe = await self.get_tribe_info_by_name(tribe_name)
else:
sloth_profile = await self.get_sloth_profile(member.id)
if not sloth_profile or not sloth_profile[3]:
return await ctx.send(
f"**You didn't provide any tribe name and you're not in a tribe either, {member.mention}!**")
tribe = await self.get_tribe_info_by_name(sloth_profile[3])
if not tribe['name']:
return await ctx.send(f"**No tribe with that name was found, {member.mention}**!")
roles = await self.get_tribe_roles(member.id)
if not roles:
return await ctx.send(f"**This tribe doesn't have any intern roles, {member.mention}!**")
embed = discord.Embed(
title=f"__{tribe['name']}'s Roles__:",
description=', '.join([r[2] for r in roles]),
color=member.color,
timestamp=ctx.message.created_at,
url=tribe['link']
)
embed.set_author(name=member.display_name, url=member.display_avatar, icon_url=member.display_avatar)
if tribe['thumbnail']:
embed.set_thumbnail(url=tribe['thumbnail'])
embed.set_footer(text=ctx.guild.name, icon_url=ctx.guild.icon.url)
await ctx.send(embed=embed)
async def get_tribe_role(self, owner_id: int, role_name: str) -> List[Union[int, str]]:
""" Gets a Tribe Role by name.
:param owner_id: The ID of the owner of that tribe.
:param role_name: The name of the role. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM TribeRole WHERE owner_id = %s AND LOWER(role_name) = LOWER(%s)", (owner_id, role_name))
tribe_role = await mycursor.fetchone()
await mycursor.close()
return tribe_role
async def get_tribe_roles(self, owner_id: int) -> List[List[Union[int, str]]]:
""" Gets all Tribe Roles from tribe owner's tribe.
:param owner_id: The ID of the owner of that tribe. """
mycursor, _ = await the_database()
await mycursor.execute("SELECT * FROM TribeRole WHERE owner_id = %s", (owner_id,))
tribe_roles = await mycursor.fetchall()
await mycursor.close()
return tribe_roles
async def insert_tribe_role(self, owner_id: int, tribe_name: str, role_name: str) -> None:
""" Inserts a Tribe Role into the database.
:param owner_id: The ID of the owner of that tribe.
:param tribe_name: The name of the tribe.
:param role_name: The name of the role. """
mycursor, db = await the_database()
await mycursor.execute("""
INSERT INTO TribeRole (owner_id, tribe_name, role_name) VALUES (%s, %s, %s)
""", (owner_id, tribe_name, role_name))
await db.commit()
await mycursor.close()
async def delete_tribe_role(self, owner_id: int, tribe_name: str, role_name: str) -> None:
""" Deletes a Tribe Role from the database.
:param owner_id: The ID of the owner of that tribe.
:param tribe_name: The name of the tribe.
:param role_name: The name of the role. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM TribeRole WHERE owner_id = %s AND LOWER(role_name) = LOWER(%s)", (owner_id, role_name))
await mycursor.execute("""
UPDATE TribeMember SET tribe_role = DEFAULT(tribe_role) WHERE tribe_name = %s AND LOWER(tribe_role) = LOWER(%s)
""", (tribe_name, role_name))
await db.commit()
await mycursor.close()
async def delete_tribe_roles(self, owner_id: int, tribe_name: str) -> None:
""" Deletes all Tribe Roles from the database.
:param owner_id: The ID of the owner of that tribe.
:param tribe_name: The name of the tribe. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM TribeRole WHERE owner_id = %s", (owner_id,))
await mycursor.execute("""
UPDATE TribeMember SET tribe_role = DEFAULT(tribe_role)
WHERE tribe_name = %s AND tribe_role <> 'Owner'
""", (tribe_name,))
await db.commit()
await mycursor.close()
async def insert_tribe_member(self, owner_id: int, tribe_name: str, user_id: int, tribe_role: str = 'Member') -> None:
""" Inserts a Tribe Member.
:param owner_id: The ID of the owner of the tribe the user is joining.
:param tribe_name: The tribe name.
:param user_id: The ID of the user.
:param tribe_role: The initial role they're gonna have in the tribe. """
mycursor, db = await the_database()
await mycursor.execute("""
INSERT INTO TribeMember (owner_id, tribe_name, member_id, tribe_role)
VALUES (%s, %s, %s, %s)""", (owner_id, tribe_name, user_id, tribe_role))
await db.commit()
await mycursor.close()
async def delete_tribe_member(self, user_id: int) -> None:
""" Deletes a Tribe Member.
:param user_id: The ID of the tribe member. """
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM TribeMember WHERE member_id = %s", (user_id,))
await db.commit()
await mycursor.close()
async def get_tribe_role_embed(self, channel: discord.TextChannel, owner_id: int, tribe_info: Dict[str, Union[str, int]], role_name: str) -> discord.Embed:
""" Makes an embedded message for a Tribe Role creation.
:param channel: The context channel.
:param owner_id: The owner of the tribe.
:param tribe_info: The tribe info.
:param role_name: The role created for that tribe. """
current_ts = await utils.get_timestamp()
tribe_role_embed = discord.Embed(
title="__A Tribe Role has been Created__",
description=f"<@{owner_id}> has just created a Tribe Role named `{role_name}` for their tribe named `{tribe_info['name']}`.",
color=discord.Color.green(),
timestamp=datetime.fromtimestamp(current_ts)
)
if tribe_info['thumbnail']:
tribe_role_embed.set_thumbnail(url=tribe_info['thumbnail'])
tribe_role_embed.set_image(url='https://media1.tenor.com/images/5327c87ecb310a382e891a0ed209357f/tenor.gif?itemid=18799194')
tribe_role_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return tribe_role_embed
async def update_user_tribe_owner(self, old_owner_id: int, new_owner_id: int) -> None:
""" Updates the user's Tribe Role.
:param old_owner_id: The old Tribe owner's ID.
:param new_owner_id: The new Tribe owner's ID. """
mycursor1, db1 = await the_database()
await mycursor1.execute("UPDATE UserTribe SET user_id = %s WHERE user_id = %s", (new_owner_id, old_owner_id))
await mycursor1.execute("""
UPDATE TribeMember as GL, (
SELECT owner_id, member_id, tribe_role
FROM TribeMember
WHERE member_id = %s
) OG, (
SELECT owner_id, member_id, tribe_role
FROM TribeMember
WHERE member_id = %s
) T
SET GL.tribe_role = (
CASE
WHEN GL.member_id = %s THEN T.tribe_role
WHEN GL.member_id = %s THEN OG.tribe_role
END
)
WHERE GL.member_id in (%s, %s);
""", (new_owner_id, old_owner_id, new_owner_id, old_owner_id, new_owner_id, old_owner_id))
await db1.commit()
await mycursor1.close()
mycursor2, db2 = await the_django_database()
await mycursor2.execute("UPDATE tribe_tribe SET owner_id = %s WHERE owner_id = %s", (new_owner_id, old_owner_id))
await db2.commit()
await mycursor2.close()
async def update_user_tribe_role(self, user_id: int, role_name: Optional[str] = None) -> None:
""" Updates the user's Tribe Role.
:param user_id: The Tribe Member's ID.
:param role_name: The name of the role. [Optional][Default='Member'] """
mycursor, db = await the_database()
if not role_name:
await mycursor.execute("UPDATE TribeMember SET tribe_role = DEFAULT(tribe_role) WHERE member_id = %s", (user_id,))
else:
await mycursor.execute("UPDATE TribeMember SET tribe_role = %s WHERE member_id = %s", (role_name, user_id))
await db.commit()
await mycursor.close()
@tribe.command(aliases=['to', 'transfer'])
@commands.cooldown(1, 60, commands.BucketType.user)
async def transfer_ownership(self, ctx, *, member: discord.Member = None) -> None:
""" Transfers the ownership of your tribe to someone else. """
author = ctx.author
if not member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform a member, {author.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(author.id)
if not user_tribe['name']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You don't have a tribe, {author.mention}**!")
if user_tribe['owner_id'] == member.id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You can't transfer the tribe to yourself, {author.mention}!**")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is not even in a tribe, you can't transfer the tribe to them, {author.mention}!**")
if tribe_member[0] != user_tribe['owner_id']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is in a different tribe, you can't transfer the tribe to them, {author.mention}!**")
confirm = await ConfirmSkill(
f"**Are you sure you want to transfer your ownership of `{user_tribe['name']}` to {member.mention}, {author.mention}?**"
).prompt(ctx)
if not confirm:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Not doing it, then, {author.mention}!**")
await self.update_user_tribe_owner(author.id, member.id)
await ctx.send(f"**Successfully transferred ownership of `{user_tribe['name']}` from {author.mention} to {member.mention}!**")
@tribe.command(aliases=["fto", "ftransfer", "force_transfer"])
@commands.cooldown(1, 60, commands.BucketType.user)
@commands.has_permissions(administrator=True)
async def force_transfer_ownership(self, ctx, tribe_name: str = None, member: discord.Member = None) -> None:
""" (ADMIN) Force-transfers the ownership of a Tribe to another user.
:param tribe_name: The name of the tribe from which to transfer ownership.
:param member: The member to transfer the Tribe to. """
author = ctx.author
if not member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform a member to transfer the tribe to, {author.mention}!**")
if not tribe_name:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform the name of the tribe, {author.mention}!**")
user_tribe = await self.get_tribe_info_by_name(tribe_name)
if not user_tribe['name']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**No tribes with that name were found, {author.mention}!**")
if user_tribe['owner_id'] == member.id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You can't transfer the tribe to the same user, {author.mention}!**")
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is not even in a tribe, you can't transfer the tribe to them, {author.mention}!**")
if tribe_member[0] != user_tribe['owner_id']:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**{member.mention} is in a different tribe, you can't transfer the tribe to them, {author.mention}!**")
confirm = await ConfirmSkill(
f"**Are you sure you want to transfer ownership of `{user_tribe['name']}` from <@{user_tribe['owner_id']}> to {member.mention}, {author.mention}?**"
).prompt(ctx)
if not confirm:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Not doing it, then, {author.mention}!**")
try:
await self.update_user_tribe_owner(user_tribe['owner_id'], member.id)
except:
await ctx.send(f"**Something went wrong with it, {author.mention}!**")
else:
await ctx.send(f"**Successfully transferred ownership of `{user_tribe['name']}` from <@{user_tribe['owner_id']}> to {member.mention}!**")
@commands.command(aliases=['get_mission', 'gq', 'gm'])
@Player.poisoned()
@Player.skills_used(requirement=50)
@Player.skill_on_cooldown(skill=Skill.FOUR, seconds=172800)
@Player.skills_locked()
@Player.user_is_class('munk')
@Player.skill_mark()
@Player.not_ready()
async def get_quest(self, ctx) -> None:
""" Gets a Quest for you and your Tribe to complete, and if so,
the involved people will get rewarded.
• Delay = 2 days
• Cost = Free """
perpetrator = ctx.author
# Do the magic here.
if ctx.channel.id != self.bots_txt.id:
return await ctx.send(f"**{perpetrator.mention}, you can only use this command in {self.bots_txt.mention}!**")
perpetrator_fx = await self.get_user_effects(perpetrator)
if 'knocked_out' in perpetrator_fx:
return await ctx.send(f"**{perpetrator.mention}, you can't use this skill, because you are knocked-out!**")
tribe_member = await self.get_tribe_member(perpetrator.id)
if not tribe_member:
return await ctx.send(f"**You are not in a tribe, {perpetrator.mention}**!")
user_tribe = await self.get_tribe_info_by_user_id(tribe_member[0])
# Checks whether there's already a max number of 1 open quests in that tribe
if await self.get_skill_action_by_user_id_and_skill_type(user_id=perpetrator.id, skill_type="quest"):
return await ctx.send(f"**You cannot have more than 1 on-going Quest at a time, {perpetrator.mention}!**")
random_quest = await self.generate_random_quest()
_, exists = await Player.skill_on_cooldown(skill=Skill.FOUR, seconds=172800).predicate(ctx)
try:
current_timestamp = await utils.get_timestamp()
await self.insert_skill_action(
user_id=perpetrator.id, skill_type="quest", skill_timestamp=current_timestamp,
target_id=perpetrator.id, channel_id=ctx.channel.id, price=random_quest["enum_value"], content=random_quest["message"]
)
if exists:
await self.update_user_skill_ts(perpetrator.id, Skill.FOUR, current_timestamp)
else:
await self.insert_user_skill_cooldown(perpetrator.id, Skill.FOUR, current_timestamp)
# Updates user's skills used counter
await self.update_user_skills_used(user_id=perpetrator.id)
except Exception as e:
print(e)
return await ctx.send(f"**Something went wrong with your skill and it failed, {perpetrator.mention}!**")
else:
tribe_quest_embed = await self.get_tribe_quest_embed(channel=ctx.channel, user_id=perpetrator.id, quest=random_quest, tribe=user_tribe)
await ctx.send(embed=tribe_quest_embed)
async def generate_random_quest(self) -> Any:
""" Generates a random question. """
quests: List[Dict[str, Union[str, int]]] = [
{"message": "Complete 5 `TheLanguageJungle` games.", "enum_value": 1},
{"message": "Rep someone and get repped back.", "enum_value": 2},
{"message": "Win a coinflip betting 50 leaves.", "enum_value": 3},
{"message": "Get a 15+ score in the `Flags` game.", "enum_value": 4},
{"message": "Spend 4 hours in a Voice Channel in a single day.", "enum_value": 5},
{"message": "Buy any item from the SlothShop, if you have all items you need to get ripped-off first.", "enum_value": 6},
{"message": "Ping DNK 3 times in a row and try to evade a BAN!!!!", "enum_value": 7},
]
return choice(quests)
async def get_tribe_quest_embed(self,
channel: Union[discord.TextChannel, discord.Thread], user_id: int, quest: Dict[str, Union[str, int]], tribe: Dict[str, Union[str, int]]
) -> discord.Embed:
""" Makes an embedded message for a Tribe Role creation.
:param channel: The context channel.
:param owner_id: The owner of the tribe.
:param tribe_info: The tribe info.
:param role_name: The role created for that tribe. """
current_ts = await utils.get_timestamp()
tribe_quest_embed = discord.Embed(
title="__A New Quest has been Started__",
description=f"<@{user_id}> has just started a Quest for their Tribe named `{tribe['name']}`!",
color=discord.Color.green(),
timestamp=datetime.fromtimestamp(current_ts)
)
tribe_quest_embed.add_field(name="__Quest__:", value=quest["message"])
if tribe["thumbnail"]:
tribe_quest_embed.set_thumbnail(url=tribe["thumbnail"])
tribe_quest_embed.set_image(url='https://c.tenor.com/MJ8Dxo58AJAAAAAC/muggers-quest.gif')
tribe_quest_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url)
return tribe_quest_embed
async def complete_quest(self, user_id: int) -> None:
""" Completes an on-going quest for a member.
:param user_id: The ID of the user who's completing the quest. """
# Gets Quest
quest = await self.get_skill_action_by_user_id_and_skill_type(user_id=user_id, skill_type="quest")
if not quest:
return
# Deletes Quest
await self.delete_skill_action_by_user_id_and_skill_type(user_id=user_id, skill_type='quest')
# Gets enum value
enum_name = QuestEnum.__dict__['_member_names_'][quest[7]-1]
function: Callable = QuestEnum.__getitem__(name=enum_name)
# Runs attached method if there's any
if function:
await function()
@tribe.command(aliases=["mission", "task", "chore", "quests"])
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.has_permissions(administrator=True)
async def quest(self, ctx) -> None:
""" Shows all Quests that the tribe you are in has. """
member = ctx.author
tribe_member = await self.get_tribe_member(member.id)
if not tribe_member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You're not even in a tribe, {member.mention}!**")
user_tribe = await self.get_tribe_info_by_user_id(tribe_member[0])
tribe_members = await self.get_tribe_members(tribe_member[0], tribe_member[1])
quests: List[List[Union[str, int]]] = []
for tribe_member in tribe_members:
quest = await self.get_skill_action_by_user_id_and_skill_type(user_id=tribe_member[0], skill_type="quest")
if quest:
quests.append(quest)
if not quests:
return await ctx.send(f"**No quests found in your tribe, {member.mention}!**")
quests_text: str = ''.join(list(map(lambda q: f"```• {q[8]} ({q[7]});```", quests)))
embed: discord.Embed = discord.Embed(
title="__Tribe Quests__",
description=f"Showing all `{len(quests)}` quests from this tribe:\n{quests_text}",
color=member.color,
timestamp=ctx.message.created_at
)
embed.set_footer(text=f"Requested by: {member}", icon_url=member.display_avatar)
if user_tribe["thumbnail"]:
embed.set_thumbnail(url=user_tribe["thumbnail"])
await ctx.send(embed=embed) |
from pdf2image import convert_from_path, convert_from_bytes
import requests
import cv2
import numpy as np
import re
import pytesseract
# config
DPI = 200 #-> default img shape 2339x1654, do not change this
BIN_INV_THRESHOLD = 192
#
VERTICAL_FILTER = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 20))
ROW_FILTER = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 5))
KERNEL_3x3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
KERNEL_5x5 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
#
HEADER, FOOTER = 300, 2205
HOUGH_LINES_THRESHOLD = 300
MIN_THETA, MAX_THETA = [-np.pi/18, np.pi/18] #-10, 10 degree
#
TONGDIEM_EXPECTED_REGION = 1300, 270, 1650, 750, #x1 y1 x2 y2
PATTERN_TONGDIEM = cv2.imread('./data/pattern_tongdiem.png', 0)
PATTERN_H, PATTERN_W = PATTERN_TONGDIEM.shape
TONGDIEM_COLUMN_WIDTH = 120
#
TESSERACT_NUMBER_CONFIG = "-l eng --oem 1 --psm 8 tessedit_char_whitelist=0123456789."
#GRADE_SCALE = [4.0, 5.0, 5.5, 6.5, 7.0, 8.0, 8.5, 9.0] #F,D,D+,C,C+,B,B+,A,A+
def is_local_file(path):
return path[:4] != 'http'
def pdf_to_np(path):
if is_local_file(path):
pil_images = convert_from_path(path, dpi=DPI, grayscale=True)
else:
print('Requesting pdf file from server... ', end='')
response = requests.get(path)
print('Done')
assert response.status_code == 200, 'Oops, got some problems requesting the server'
pil_images = convert_from_bytes(response.content, dpi=DPI, grayscale=True)
np_images = [np.array(pil_images[i]) for i in range(len(pil_images))]
return np_images
def deskew(img):
(thresh, img_bin) = cv2.threshold(img, BIN_INV_THRESHOLD, 255, cv2.THRESH_BINARY_INV)
img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_CLOSE, KERNEL_3x3)
img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, VERTICAL_FILTER)
lines = cv2.HoughLines(img_bin[HEADER:FOOTER], 1, np.pi/360, HOUGH_LINES_THRESHOLD, min_theta=MIN_THETA, max_theta=MAX_THETA)
if lines is None or len(lines) < 5:
return img, float('inf') # indicates a failed deskew, when the img doesn't contain enough lines for deskewing
# take average of the first 5 lines
angle = np.mean(lines[:5,:,1])*180/np.pi
# if skewed angle is considerable, deskew
if angle > 1.0:
h, w = img.shape
center_point = (w//2, h//2)
deskewed_img = cv2.warpAffine(img, cv2.getRotationMatrix2D(center_point,angle_degree,1.0), (w, h), borderValue=255)
img = deskewed_img
return img, angle
def detect_grade_column(deskewed_img):
''' return: (xmin, ymin, xmax, ymax) - the bounding box for the grade column
'''
x1,y1, x2,y2 = TONGDIEM_EXPECTED_REGION
tongdiem_expected_region = 255-deskewed_img[y1:y2,x1:x2]
res = cv2.matchTemplate(tongdiem_expected_region, PATTERN_TONGDIEM, cv2.TM_CCORR)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
x, y = max_loc
x, y = x+x1+PATTERN_W//2, y+y1+PATTERN_H+10 #now under the middle of tongdiem
x1, y1 = x-TONGDIEM_COLUMN_WIDTH//2, y
x2, y2 = x1+TONGDIEM_COLUMN_WIDTH, FOOTER
return x1,y1, x2,y2
def get_rows(column):
''' column: np ndarray - cropped grades column
return: list of bounding boxes for the grade in each rows
'''
(thresh, column_bin) = cv2.threshold(column, BIN_INV_THRESHOLD, 255, cv2.THRESH_BINARY_INV)
column_bin = cv2.morphologyEx(column_bin, cv2.MORPH_CLOSE, ROW_FILTER, borderValue=0)
column_bin = cv2.morphologyEx(column_bin, cv2.MORPH_OPEN, KERNEL_5x5)
contours, hierarchy = cv2.findContours(column_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
grade_bboxes = []
for c in contours:
x, y, w, h = cv2.boundingRect(c) #h should typically be 18px
if h <= 8: #discard
pass
elif h <= 12: #add border: 7px
grade_bboxes.append((x-7, y-7, x+w+7, y+h+7))
else: #add border: 5px
grade_bboxes.append((x-5, y-5, x+w+5, y+h+5))
return grade_bboxes
def read_grades(path):
''' path: string - path to the single pdf file
return: list of final grades
'''
grades = []
images = pdf_to_np(path)
for i,img in enumerate(images):
img, angle = deskew(img)
if angle == float('inf'):
continue
x1,y1, x2,y2 = detect_grade_column(img)
column = img[y1:y2,x1:x2]
grade_bboxes = get_rows(column)
for x1,y1, x2,y2 in grade_bboxes:
ROI = column[y1:y2,x1:x2]
grade = ocr(ROI)
grades.append(grade)
# try multi thread
return grades
def ocr(ROI):
''' ROI: np ndarray - the grade cropped from each row
return: float - -1.0 for unrecognized
'''
grade = -1.0
try:
text = pytesseract.image_to_string(ROI, config=TESSERACT_NUMBER_CONFIG)
text = re.sub("[^0-9.]", "", text) #exclude '\n\x0c' and failed ocr
grade = float(text)
if grade > 10: grade /= 10
except:
pass
return grade
def count_grades(grades):
grade_map = {
'A+': 0,
'A' : 0,
'B+': 0,
'B' : 0,
'C+': 0,
'C' : 0,
'D+': 0,
'D' : 0,
'F' : 0,
'N/A': 0,
}
for grade in grades:
#binary_search(GRADE_SCALE, grade, 0, n)
if grade >= 9:
grade_map['A+'] += 1
elif grade >= 8.5:
grade_map['A'] += 1
elif grade >= 8.0:
grade_map['B+'] += 1
elif grade >= 7.0:
grade_map['B'] += 1
elif grade >= 6.5:
grade_map['C+'] += 1
elif grade >= 5.5:
grade_map['C'] += 1
elif grade >= 5.0:
grade_map['D+'] += 1
elif grade >= 4.0:
grade_map['D'] += 1
elif grade >= 0.0:
grade_map['F'] += 1
else:
grade_map['N/A']+= 1
return grade_map
if __name__ == '__main__':
while True:
path = input('\nEnter path: ')
if path == 'q':
quit()
grades = read_grades(path)
grade_map = count_grades(grades)
n_grades = len(grades)
print('\nTotal recognized:', n_grades)
print('Grade: %')
for grade, count in grade_map.items():
print(f' {grade.ljust(3)} : {round(count*100/n_grades) if count != 0 else '-'}')
#breakpoint() | from pdf2image import convert_from_path, convert_from_bytes
import requests
import cv2
import numpy as np
import re
import pytesseract
# config
DPI = 200 #-> default img shape 2339x1654, do not change this
BIN_INV_THRESHOLD = 192
#
VERTICAL_FILTER = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 20))
ROW_FILTER = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 5))
KERNEL_3x3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
KERNEL_5x5 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
#
HEADER, FOOTER = 300, 2205
HOUGH_LINES_THRESHOLD = 300
MIN_THETA, MAX_THETA = [-np.pi/18, np.pi/18] #-10, 10 degree
#
TONGDIEM_EXPECTED_REGION = 1300, 270, 1650, 750, #x1 y1 x2 y2
PATTERN_TONGDIEM = cv2.imread('./data/pattern_tongdiem.png', 0)
PATTERN_H, PATTERN_W = PATTERN_TONGDIEM.shape
TONGDIEM_COLUMN_WIDTH = 120
#
TESSERACT_NUMBER_CONFIG = "-l eng --oem 1 --psm 8 tessedit_char_whitelist=0123456789."
#GRADE_SCALE = [4.0, 5.0, 5.5, 6.5, 7.0, 8.0, 8.5, 9.0] #F,D,D+,C,C+,B,B+,A,A+
def is_local_file(path):
return path[:4] != 'http'
def pdf_to_np(path):
if is_local_file(path):
pil_images = convert_from_path(path, dpi=DPI, grayscale=True)
else:
print('Requesting pdf file from server... ', end='')
response = requests.get(path)
print('Done')
assert response.status_code == 200, 'Oops, got some problems requesting the server'
pil_images = convert_from_bytes(response.content, dpi=DPI, grayscale=True)
np_images = [np.array(pil_images[i]) for i in range(len(pil_images))]
return np_images
def deskew(img):
(thresh, img_bin) = cv2.threshold(img, BIN_INV_THRESHOLD, 255, cv2.THRESH_BINARY_INV)
img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_CLOSE, KERNEL_3x3)
img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, VERTICAL_FILTER)
lines = cv2.HoughLines(img_bin[HEADER:FOOTER], 1, np.pi/360, HOUGH_LINES_THRESHOLD, min_theta=MIN_THETA, max_theta=MAX_THETA)
if lines is None or len(lines) < 5:
return img, float('inf') # indicates a failed deskew, when the img doesn't contain enough lines for deskewing
# take average of the first 5 lines
angle = np.mean(lines[:5,:,1])*180/np.pi
# if skewed angle is considerable, deskew
if angle > 1.0:
h, w = img.shape
center_point = (w//2, h//2)
deskewed_img = cv2.warpAffine(img, cv2.getRotationMatrix2D(center_point,angle_degree,1.0), (w, h), borderValue=255)
img = deskewed_img
return img, angle
def detect_grade_column(deskewed_img):
''' return: (xmin, ymin, xmax, ymax) - the bounding box for the grade column
'''
x1,y1, x2,y2 = TONGDIEM_EXPECTED_REGION
tongdiem_expected_region = 255-deskewed_img[y1:y2,x1:x2]
res = cv2.matchTemplate(tongdiem_expected_region, PATTERN_TONGDIEM, cv2.TM_CCORR)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
x, y = max_loc
x, y = x+x1+PATTERN_W//2, y+y1+PATTERN_H+10 #now under the middle of tongdiem
x1, y1 = x-TONGDIEM_COLUMN_WIDTH//2, y
x2, y2 = x1+TONGDIEM_COLUMN_WIDTH, FOOTER
return x1,y1, x2,y2
def get_rows(column):
''' column: np ndarray - cropped grades column
return: list of bounding boxes for the grade in each rows
'''
(thresh, column_bin) = cv2.threshold(column, BIN_INV_THRESHOLD, 255, cv2.THRESH_BINARY_INV)
column_bin = cv2.morphologyEx(column_bin, cv2.MORPH_CLOSE, ROW_FILTER, borderValue=0)
column_bin = cv2.morphologyEx(column_bin, cv2.MORPH_OPEN, KERNEL_5x5)
contours, hierarchy = cv2.findContours(column_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
grade_bboxes = []
for c in contours:
x, y, w, h = cv2.boundingRect(c) #h should typically be 18px
if h <= 8: #discard
pass
elif h <= 12: #add border: 7px
grade_bboxes.append((x-7, y-7, x+w+7, y+h+7))
else: #add border: 5px
grade_bboxes.append((x-5, y-5, x+w+5, y+h+5))
return grade_bboxes
def read_grades(path):
''' path: string - path to the single pdf file
return: list of final grades
'''
grades = []
images = pdf_to_np(path)
for i,img in enumerate(images):
img, angle = deskew(img)
if angle == float('inf'):
continue
x1,y1, x2,y2 = detect_grade_column(img)
column = img[y1:y2,x1:x2]
grade_bboxes = get_rows(column)
for x1,y1, x2,y2 in grade_bboxes:
ROI = column[y1:y2,x1:x2]
grade = ocr(ROI)
grades.append(grade)
# try multi thread
return grades
def ocr(ROI):
''' ROI: np ndarray - the grade cropped from each row
return: float - -1.0 for unrecognized
'''
grade = -1.0
try:
text = pytesseract.image_to_string(ROI, config=TESSERACT_NUMBER_CONFIG)
text = re.sub("[^0-9.]", "", text) #exclude '\n\x0c' and failed ocr
grade = float(text)
if grade > 10: grade /= 10
except:
pass
return grade
def count_grades(grades):
grade_map = {
'A+': 0,
'A' : 0,
'B+': 0,
'B' : 0,
'C+': 0,
'C' : 0,
'D+': 0,
'D' : 0,
'F' : 0,
'N/A': 0,
}
for grade in grades:
#binary_search(GRADE_SCALE, grade, 0, n)
if grade >= 9:
grade_map['A+'] += 1
elif grade >= 8.5:
grade_map['A'] += 1
elif grade >= 8.0:
grade_map['B+'] += 1
elif grade >= 7.0:
grade_map['B'] += 1
elif grade >= 6.5:
grade_map['C+'] += 1
elif grade >= 5.5:
grade_map['C'] += 1
elif grade >= 5.0:
grade_map['D+'] += 1
elif grade >= 4.0:
grade_map['D'] += 1
elif grade >= 0.0:
grade_map['F'] += 1
else:
grade_map['N/A']+= 1
return grade_map
if __name__ == '__main__':
while True:
path = input('\nEnter path: ')
if path == 'q':
quit()
grades = read_grades(path)
grade_map = count_grades(grades)
n_grades = len(grades)
print('\nTotal recognized:', n_grades)
print('Grade: %')
for grade, count in grade_map.items():
print(f' {grade.ljust(3)} : {round(count*100/n_grades) if count != 0 else "-"}')
#breakpoint() |
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import tempfile
import unittest
import unittest.mock
from huggingface_hub import Repository, delete_repo, login
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPT2Config, is_torch_available
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import PASS, USER, is_staging_test
config_common_kwargs = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
class ConfigTester(object):
def __init__(self, parent, config_class=None, has_text_modality=True, **kwargs):
self.parent = parent
self.config_class = config_class
self.has_text_modality = has_text_modality
self.inputs_dict = kwargs
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
common_properties = ["hidden_size", "num_attention_heads", "num_hidden_layers"]
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"])
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(config, prop), msg=f"`{prop}` does not exist")
# Test that config has the common properties as setter
for idx, name in enumerate(common_properties):
try:
setattr(config, name, idx)
self.parent.assertEqual(
getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}"
)
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(common_properties):
try:
config = self.config_class(**{name: idx})
self.parent.assertEqual(
getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}"
)
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def create_and_test_config_to_json_string(self):
config = self.config_class(**self.inputs_dict)
obj = json.loads(config.to_json_string())
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key], value)
def create_and_test_config_to_json_file(self):
config_first = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "config.json")
config_first.to_json_file(json_file_path)
config_second = self.config_class.from_json_file(json_file_path)
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def create_and_test_config_from_and_save_pretrained(self):
config_first = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(tmpdirname)
config_second = self.config_class.from_pretrained(tmpdirname)
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def create_and_test_config_with_num_labels(self):
config = self.config_class(**self.inputs_dict, num_labels=5)
self.parent.assertEqual(len(config.id2label), 5)
self.parent.assertEqual(len(config.label2id), 5)
config.num_labels = 3
self.parent.assertEqual(len(config.id2label), 3)
self.parent.assertEqual(len(config.label2id), 3)
def check_config_can_be_init_without_params(self):
if self.config_class.is_composition:
return
config = self.config_class()
self.parent.assertIsNotNone(config)
def check_config_arguments_init(self):
kwargs = copy.deepcopy(config_common_kwargs)
config = self.config_class(**kwargs)
wrong_values = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.float16:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.float16))
elif getattr(config, key) != value:
wrong_values.append((key, getattr(config, key), value))
if len(wrong_values) > 0:
errors = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values])
raise ValueError(f"The following keys were not properly set in the config:\n{errors}")
def run_common_tests(self):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
class FakeConfig(PretrainedConfig):
def __init__(self, attribute=1, **kwargs):
self.attribute = attribute
super().__init__(**kwargs)
# Make sure this is synchronized with the config above.
FAKE_CONFIG_CODE = """
from transformers import PretrainedConfig
class FakeConfig(PretrainedConfig):
def __init__(self, attribute=1, **kwargs):
self.attribute = attribute
super().__init__(**kwargs)
"""
@is_staging_test
class ConfigPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = login(username=USER, password=PASS)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, name="test-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token, name="test-config-org", organization="valid_org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, name="test-dynamic-config")
except HTTPError:
pass
def test_push_to_hub(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(os.path.join(tmp_dir, "test-config"), push_to_hub=True, use_auth_token=self._token)
new_config = BertConfig.from_pretrained(f"{USER}/test-config")
for k, v in config.__dict__.items():
if k != "transformers_version":
self.assertEqual(v, getattr(new_config, k))
def test_push_to_hub_in_organization(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
os.path.join(tmp_dir, "test-config-org"),
push_to_hub=True,
use_auth_token=self._token,
organization="valid_org",
)
new_config = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.__dict__.items():
if k != "transformers_version":
self.assertEqual(v, getattr(new_config, k))
def test_push_to_hub_dynamic_config(self):
config = FakeConfig(attribute=42)
config.auto_map = {"AutoConfig": "configuration.FakeConfig"}
with tempfile.TemporaryDirectory() as tmp_dir:
repo = Repository(tmp_dir, clone_from=f"{USER}/test-dynamic-config", use_auth_token=self._token)
config.save_pretrained(tmp_dir)
with open(os.path.join(tmp_dir, "configuration.py"), "w") as f:
f.write(FAKE_CONFIG_CODE)
repo.push_to_hub()
new_config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config", trust_remote_code=True)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, "FakeConfig")
self.assertEqual(new_config.attribute, 42)
class ConfigTestUtils(unittest.TestCase):
def test_config_from_string(self):
c = GPT2Config()
# attempt to modify each of int/float/bool/str config records and verify they were updated
n_embd = c.n_embd + 1 # int
resid_pdrop = c.resid_pdrop + 1.0 # float
scale_attn_weights = not c.scale_attn_weights # bool
summary_type = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}"
)
self.assertEqual(n_embd, c.n_embd, "mismatch for key: n_embd")
self.assertEqual(resid_pdrop, c.resid_pdrop, "mismatch for key: resid_pdrop")
self.assertEqual(scale_attn_weights, c.scale_attn_weights, "mismatch for key: scale_attn_weights")
self.assertEqual(summary_type, c.summary_type, "mismatch for key: summary_type")
def test_config_common_kwargs_is_complete(self):
base_config = PretrainedConfig()
missing_keys = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(missing_keys, ["is_encoder_decoder", "_name_or_path", "transformers_version"])
keys_with_defaults = [key for key, value in config_common_kwargs.items() if value == getattr(base_config, key)]
if len(keys_with_defaults) > 0:
raise ValueError(
"The following keys are set with the default values in `test_configuration_common.config_common_kwargs` "
f"pick another value for them: {", ".join(keys_with_defaults)}."
)
class ConfigurationVersioningTest(unittest.TestCase):
def test_local_versioning(self):
configuration = AutoConfig.from_pretrained("bert-base-cased")
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(tmp_dir)
configuration.hidden_size = 2
json.dump(configuration.to_dict(), open(os.path.join(tmp_dir, "config.4.0.0.json"), "w"))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
new_configuration = AutoConfig.from_pretrained(tmp_dir)
self.assertEqual(new_configuration.hidden_size, 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
shutil.move(os.path.join(tmp_dir, "config.4.0.0.json"), os.path.join(tmp_dir, "config.42.0.0.json"))
new_configuration = AutoConfig.from_pretrained(tmp_dir)
self.assertEqual(new_configuration.hidden_size, 768)
def test_repo_versioning_before(self):
# This repo has two configuration files, one for v5.0.0 and above with an added token, one for versions lower.
repo = "microsoft/layoutxlm-base"
import transformers as new_transformers
new_transformers.configuration_utils.__version__ = "v5.0.0"
new_configuration = new_transformers.models.auto.AutoConfig.from_pretrained(repo)
self.assertEqual(new_configuration.tokenizer_class, None)
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
old_transformers.configuration_utils.__version__ = "v3.0.0"
old_configuration = old_transformers.models.auto.AutoConfig.from_pretrained(repo)
self.assertEqual(old_configuration.tokenizer_class, "XLMRobertaTokenizer")
| # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import tempfile
import unittest
import unittest.mock
from huggingface_hub import Repository, delete_repo, login
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPT2Config, is_torch_available
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import PASS, USER, is_staging_test
config_common_kwargs = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
class ConfigTester(object):
def __init__(self, parent, config_class=None, has_text_modality=True, **kwargs):
self.parent = parent
self.config_class = config_class
self.has_text_modality = has_text_modality
self.inputs_dict = kwargs
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
common_properties = ["hidden_size", "num_attention_heads", "num_hidden_layers"]
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"])
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(config, prop), msg=f"`{prop}` does not exist")
# Test that config has the common properties as setter
for idx, name in enumerate(common_properties):
try:
setattr(config, name, idx)
self.parent.assertEqual(
getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}"
)
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(common_properties):
try:
config = self.config_class(**{name: idx})
self.parent.assertEqual(
getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}"
)
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def create_and_test_config_to_json_string(self):
config = self.config_class(**self.inputs_dict)
obj = json.loads(config.to_json_string())
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key], value)
def create_and_test_config_to_json_file(self):
config_first = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "config.json")
config_first.to_json_file(json_file_path)
config_second = self.config_class.from_json_file(json_file_path)
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def create_and_test_config_from_and_save_pretrained(self):
config_first = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(tmpdirname)
config_second = self.config_class.from_pretrained(tmpdirname)
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def create_and_test_config_with_num_labels(self):
config = self.config_class(**self.inputs_dict, num_labels=5)
self.parent.assertEqual(len(config.id2label), 5)
self.parent.assertEqual(len(config.label2id), 5)
config.num_labels = 3
self.parent.assertEqual(len(config.id2label), 3)
self.parent.assertEqual(len(config.label2id), 3)
def check_config_can_be_init_without_params(self):
if self.config_class.is_composition:
return
config = self.config_class()
self.parent.assertIsNotNone(config)
def check_config_arguments_init(self):
kwargs = copy.deepcopy(config_common_kwargs)
config = self.config_class(**kwargs)
wrong_values = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.float16:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.float16))
elif getattr(config, key) != value:
wrong_values.append((key, getattr(config, key), value))
if len(wrong_values) > 0:
errors = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values])
raise ValueError(f"The following keys were not properly set in the config:\n{errors}")
def run_common_tests(self):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
class FakeConfig(PretrainedConfig):
def __init__(self, attribute=1, **kwargs):
self.attribute = attribute
super().__init__(**kwargs)
# Make sure this is synchronized with the config above.
FAKE_CONFIG_CODE = """
from transformers import PretrainedConfig
class FakeConfig(PretrainedConfig):
def __init__(self, attribute=1, **kwargs):
self.attribute = attribute
super().__init__(**kwargs)
"""
@is_staging_test
class ConfigPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = login(username=USER, password=PASS)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, name="test-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token, name="test-config-org", organization="valid_org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, name="test-dynamic-config")
except HTTPError:
pass
def test_push_to_hub(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(os.path.join(tmp_dir, "test-config"), push_to_hub=True, use_auth_token=self._token)
new_config = BertConfig.from_pretrained(f"{USER}/test-config")
for k, v in config.__dict__.items():
if k != "transformers_version":
self.assertEqual(v, getattr(new_config, k))
def test_push_to_hub_in_organization(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
os.path.join(tmp_dir, "test-config-org"),
push_to_hub=True,
use_auth_token=self._token,
organization="valid_org",
)
new_config = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.__dict__.items():
if k != "transformers_version":
self.assertEqual(v, getattr(new_config, k))
def test_push_to_hub_dynamic_config(self):
config = FakeConfig(attribute=42)
config.auto_map = {"AutoConfig": "configuration.FakeConfig"}
with tempfile.TemporaryDirectory() as tmp_dir:
repo = Repository(tmp_dir, clone_from=f"{USER}/test-dynamic-config", use_auth_token=self._token)
config.save_pretrained(tmp_dir)
with open(os.path.join(tmp_dir, "configuration.py"), "w") as f:
f.write(FAKE_CONFIG_CODE)
repo.push_to_hub()
new_config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config", trust_remote_code=True)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, "FakeConfig")
self.assertEqual(new_config.attribute, 42)
class ConfigTestUtils(unittest.TestCase):
def test_config_from_string(self):
c = GPT2Config()
# attempt to modify each of int/float/bool/str config records and verify they were updated
n_embd = c.n_embd + 1 # int
resid_pdrop = c.resid_pdrop + 1.0 # float
scale_attn_weights = not c.scale_attn_weights # bool
summary_type = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}"
)
self.assertEqual(n_embd, c.n_embd, "mismatch for key: n_embd")
self.assertEqual(resid_pdrop, c.resid_pdrop, "mismatch for key: resid_pdrop")
self.assertEqual(scale_attn_weights, c.scale_attn_weights, "mismatch for key: scale_attn_weights")
self.assertEqual(summary_type, c.summary_type, "mismatch for key: summary_type")
def test_config_common_kwargs_is_complete(self):
base_config = PretrainedConfig()
missing_keys = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(missing_keys, ["is_encoder_decoder", "_name_or_path", "transformers_version"])
keys_with_defaults = [key for key, value in config_common_kwargs.items() if value == getattr(base_config, key)]
if len(keys_with_defaults) > 0:
raise ValueError(
"The following keys are set with the default values in `test_configuration_common.config_common_kwargs` "
f"pick another value for them: {', '.join(keys_with_defaults)}."
)
class ConfigurationVersioningTest(unittest.TestCase):
def test_local_versioning(self):
configuration = AutoConfig.from_pretrained("bert-base-cased")
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(tmp_dir)
configuration.hidden_size = 2
json.dump(configuration.to_dict(), open(os.path.join(tmp_dir, "config.4.0.0.json"), "w"))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
new_configuration = AutoConfig.from_pretrained(tmp_dir)
self.assertEqual(new_configuration.hidden_size, 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
shutil.move(os.path.join(tmp_dir, "config.4.0.0.json"), os.path.join(tmp_dir, "config.42.0.0.json"))
new_configuration = AutoConfig.from_pretrained(tmp_dir)
self.assertEqual(new_configuration.hidden_size, 768)
def test_repo_versioning_before(self):
# This repo has two configuration files, one for v5.0.0 and above with an added token, one for versions lower.
repo = "microsoft/layoutxlm-base"
import transformers as new_transformers
new_transformers.configuration_utils.__version__ = "v5.0.0"
new_configuration = new_transformers.models.auto.AutoConfig.from_pretrained(repo)
self.assertEqual(new_configuration.tokenizer_class, None)
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
old_transformers.configuration_utils.__version__ = "v3.0.0"
old_configuration = old_transformers.models.auto.AutoConfig.from_pretrained(repo)
self.assertEqual(old_configuration.tokenizer_class, "XLMRobertaTokenizer")
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import random
import re
import shutil
import sys
import tempfile
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
deepspeed_init,
is_deepspeed_zero3_enabled,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from . import __version__
from .configuration_utils import PretrainedConfig
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .debug_utils import DebugOption, DebugUnderflowOverflow
from .dependency_versions_check import dep_version_check
from .file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
PushToHubMixin,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from .modelcard import TrainingSummary
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_torch_generator_available = False
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_torch_generator_available = True
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset` or :obj:`torch.utils.data.dataset.IterableDataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
Note that if it's a :obj:`torch.utils.data.dataset.IterableDataset` with some randomization and you are
training in a distributed fashion, your iterable dataset should either use a internal attribute
:obj:`generator` that is a :obj:`torch.Generator` for the randomization that must be identical on all
processes (and the Trainer will manually set the seed of this :obj:`generator` at each epoch) or have a
:obj:`set_epoch()` method that internally sets the seed of the RNGs used.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full fp16 eval - since the model needs to be half'ed first
# 4. Sharded DDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
if is_sagemaker_mp_enabled():
self.scaler = smp.amp.GradScaler()
elif self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if is_sagemaker_mp_enabled() and self.use_amp and args.max_grad_norm is not None and args.max_grad_norm > 0:
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return dataset
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {", ".join(ignored_columns)}."
)
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if not isinstance(self.train_dataset, collections.abc.Sized):
return None
generator = None
if self.args.world_size <= 1 and _is_torch_generator_available:
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
lengths=lengths,
model_input_name=model_input_name,
generator=generator,
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
seed=self.args.seed,
)
else:
if self.args.world_size <= 1:
if _is_torch_generator_available:
return RandomSampler(self.train_dataset, generator=generator)
return RandomSampler(self.train_dataset)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
else:
return DistributedSampler(
self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
if isinstance(train_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self.args.train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
# Deprecated code
if self.args.use_legacy_prediction_loop:
if is_torch_tpu_available():
return SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return ShardSampler(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
if isinstance(test_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method (or :obj:`create_optimizer`
and/or :obj:`create_scheduler`) in a subclass.
"""
self.create_optimizer()
self.create_scheduler(num_training_steps)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
def create_scheduler(self, num_training_steps: int):
"""
Setup the scheduler. The optimizer of the trainer must have been set up before this method is called.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
"""HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
if self.args.deepspeed:
# Rebuild the deepspeed config to reflect the updated training parameters
from transformers.integrations import DeepSpeedConfigHF
self.args.deepspeed_config_hf = DeepSpeedConfigHF(self.args)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_dp_enabled():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if args.fp16_full_eval and not args.do_train:
self.model = self.model.to(args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {", ".join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None:
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}).")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warn(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if args.deepspeed:
# will be resumed in deepspeed_init
pass
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = len(self.train_dataset) * args.num_train_epochs
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = args.max_steps
num_train_epochs = int(args.num_train_epochs)
num_update_steps_per_epoch = max_steps
num_train_samples = args.max_steps * total_train_batch_size
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
num_examples = (
self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
"flag to your launch command, but you will resume the training on data already seen by your model."
)
if self.is_local_process_zero() and not args.disable_tqdm:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
((step + 1) % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _load_state_dict_in_model(self, state_dict):
load_result = self.model.load_state_dict(state_dict, strict=False)
if len(load_result.missing_keys) != 0:
if set(load_result.missing_keys) == set(self.model._keys_to_ignore_on_save):
self.model.tie_weights()
else:
logger.warn(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warn(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank != -1:
rng_file = os.path.join(checkpoint, f"rng_state_{local_rank}.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if torch.cuda.is_available():
if self.args.local_rank != -1:
torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
else:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_fp16_weights_on_model_save` is True
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
if smp.dp_rank() == 0:
# Consolidate the state dict on all processed of dp_rank 0
opt_state_dict = self.optimizer.state_dict()
# Save it and the scheduler on the main process
if self.is_world_process_zero():
torch.save(opt_state_dict, os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.local_rank == -1:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank == -1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{local_rank}.pth"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.use_amp and os.path.isfile(os.path.join(checkpoint, "scaler.pt")):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, "scaler.pt")))
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
scaler = self.scaler if self.use_amp else None
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps, scaler=scaler)
return loss_mb.reduce_mean().detach().to(self.args.device)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
try:
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
except IndexError:
loss = outputs["loss"] if isinstance(outputs, dict) else outputs.item()
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
state_dict = self.model_wrapped.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.is_world_process_zero():
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.is_world_process_zero():
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_fp16_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
self.deepspeed.save_fp16_model(output_dir, WEIGHTS_NAME)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.local_rank != -1:
self.state.total_flos += distributed_broadcast_scalars([self.current_flos]).sum().item()
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_mode_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
logger.info(f"***** Running {description} *****")
if isinstance(dataloader.dataset, collections.abc.Sized):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = dataloader.dataset
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if not isinstance(eval_dataset, IterableDataset):
num_samples = len(eval_dataset)
elif isinstance(eval_dataset, IterableDatasetShard):
num_samples = eval_dataset.num_examples
else:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
if tensor.shape[1] == max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def create_model_card(
self,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Optional[str] = None,
model_name: Optional[str] = None,
finetuned_from: Optional[str] = None,
dataset_tags: Optional[Union[str, List[str]]] = None,
dataset: Optional[Union[str, List[str]]] = None,
dataset_args: Optional[Union[str, List[str]]] = None,
):
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
f.write(model_card)
def push_to_hub(
self,
repo_name: Optional[str] = None,
repo_url: Optional[str] = None,
commit_message: Optional[str] = "add model",
organization: Optional[str] = None,
private: bool = None,
use_auth_token: Optional[Union[bool, str]] = None,
**kwargs,
):
"""
Upload `self.model` to the 🤗 model hub.
Parameters:
repo_name (:obj:`str`, `optional`):
Repository name for your model or tokenizer in the hub. If not specified and :obj:`repo_url` is not
specified either, will default to the stem of :obj:`self.args.output_dir`.
repo_url (:obj:`str`, `optional`):
Specify this in case you want to push to an existing repository in the hub. If unspecified, a new
repository will be created in your namespace (unless you specify an :obj:`organization`) with
:obj:`repo_name`.
commit_message (:obj:`str`, `optional`, defaults to :obj:`"add model"`):
Message to commit while pushing.
organization (:obj:`str`, `optional`):
Organization in which you want to push your model or tokenizer (you must be a member of this
organization).
private (:obj:`bool`, `optional`):
Whether or not the repository created should be private (requires a paying subscription).
use_auth_token (:obj:`bool` or :obj:`str`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). Will default to
:obj:`True` if :obj:`repo_url` is not specified.
kwargs:
Additional keyword arguments passed along to :meth:`~transformers.Trainer.create_model_card`.
Returns:
The url of the commit of your model in the given repository.
"""
if not self.is_world_process_zero():
return
if not isinstance(unwrap_model(self.model), PushToHubMixin):
raise ValueError(
"The `upload_model_to_hub` method only works for models that inherit from `PushToHubMixin` models."
)
if repo_url is None and repo_name is None:
repo_name = Path(self.args.output_dir).name
if repo_name is not None:
model_name = repo_name
elif repo_url is not None:
model_name = repo_url.split("/")[-1]
else:
model_name = None
self.create_model_card(model_name=model_name, **kwargs)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy(os.path.join(self.args.output_dir, "README.md"), os.path.join(tmp_dir, "README.md"))
unwrap_model(self.model).save_pretrained(tmp_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(tmp_dir)
return unwrap_model(self.model)._push_to_hub(
save_directory=tmp_dir,
repo_name=repo_name,
repo_url=repo_url,
commit_message=commit_message,
organization=organization,
private=private,
use_auth_token=use_auth_token,
)
#
# Deprecated code
#
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
| # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import random
import re
import shutil
import sys
import tempfile
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
deepspeed_init,
is_deepspeed_zero3_enabled,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from . import __version__
from .configuration_utils import PretrainedConfig
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .debug_utils import DebugOption, DebugUnderflowOverflow
from .dependency_versions_check import dep_version_check
from .file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
PushToHubMixin,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from .modelcard import TrainingSummary
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_torch_generator_available = False
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_torch_generator_available = True
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset` or :obj:`torch.utils.data.dataset.IterableDataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
Note that if it's a :obj:`torch.utils.data.dataset.IterableDataset` with some randomization and you are
training in a distributed fashion, your iterable dataset should either use a internal attribute
:obj:`generator` that is a :obj:`torch.Generator` for the randomization that must be identical on all
processes (and the Trainer will manually set the seed of this :obj:`generator` at each epoch) or have a
:obj:`set_epoch()` method that internally sets the seed of the RNGs used.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full fp16 eval - since the model needs to be half'ed first
# 4. Sharded DDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
if is_sagemaker_mp_enabled():
self.scaler = smp.amp.GradScaler()
elif self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if is_sagemaker_mp_enabled() and self.use_amp and args.max_grad_norm is not None and args.max_grad_norm > 0:
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return dataset
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if not isinstance(self.train_dataset, collections.abc.Sized):
return None
generator = None
if self.args.world_size <= 1 and _is_torch_generator_available:
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
lengths=lengths,
model_input_name=model_input_name,
generator=generator,
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
seed=self.args.seed,
)
else:
if self.args.world_size <= 1:
if _is_torch_generator_available:
return RandomSampler(self.train_dataset, generator=generator)
return RandomSampler(self.train_dataset)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
else:
return DistributedSampler(
self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
if isinstance(train_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self.args.train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
# Deprecated code
if self.args.use_legacy_prediction_loop:
if is_torch_tpu_available():
return SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return ShardSampler(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
if isinstance(test_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method (or :obj:`create_optimizer`
and/or :obj:`create_scheduler`) in a subclass.
"""
self.create_optimizer()
self.create_scheduler(num_training_steps)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
def create_scheduler(self, num_training_steps: int):
"""
Setup the scheduler. The optimizer of the trainer must have been set up before this method is called.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
"""HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
if self.args.deepspeed:
# Rebuild the deepspeed config to reflect the updated training parameters
from transformers.integrations import DeepSpeedConfigHF
self.args.deepspeed_config_hf = DeepSpeedConfigHF(self.args)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_dp_enabled():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if args.fp16_full_eval and not args.do_train:
self.model = self.model.to(args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None:
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}).")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warn(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if args.deepspeed:
# will be resumed in deepspeed_init
pass
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = len(self.train_dataset) * args.num_train_epochs
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = args.max_steps
num_train_epochs = int(args.num_train_epochs)
num_update_steps_per_epoch = max_steps
num_train_samples = args.max_steps * total_train_batch_size
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
num_examples = (
self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
"flag to your launch command, but you will resume the training on data already seen by your model."
)
if self.is_local_process_zero() and not args.disable_tqdm:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
((step + 1) % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _load_state_dict_in_model(self, state_dict):
load_result = self.model.load_state_dict(state_dict, strict=False)
if len(load_result.missing_keys) != 0:
if set(load_result.missing_keys) == set(self.model._keys_to_ignore_on_save):
self.model.tie_weights()
else:
logger.warn(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warn(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank != -1:
rng_file = os.path.join(checkpoint, f"rng_state_{local_rank}.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if torch.cuda.is_available():
if self.args.local_rank != -1:
torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
else:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_fp16_weights_on_model_save` is True
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
if smp.dp_rank() == 0:
# Consolidate the state dict on all processed of dp_rank 0
opt_state_dict = self.optimizer.state_dict()
# Save it and the scheduler on the main process
if self.is_world_process_zero():
torch.save(opt_state_dict, os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.local_rank == -1:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank == -1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{local_rank}.pth"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.use_amp and os.path.isfile(os.path.join(checkpoint, "scaler.pt")):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, "scaler.pt")))
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
scaler = self.scaler if self.use_amp else None
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps, scaler=scaler)
return loss_mb.reduce_mean().detach().to(self.args.device)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
try:
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
except IndexError:
loss = outputs["loss"] if isinstance(outputs, dict) else outputs.item()
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
state_dict = self.model_wrapped.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.is_world_process_zero():
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.is_world_process_zero():
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_fp16_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
self.deepspeed.save_fp16_model(output_dir, WEIGHTS_NAME)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.local_rank != -1:
self.state.total_flos += distributed_broadcast_scalars([self.current_flos]).sum().item()
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_mode_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
logger.info(f"***** Running {description} *****")
if isinstance(dataloader.dataset, collections.abc.Sized):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = dataloader.dataset
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if not isinstance(eval_dataset, IterableDataset):
num_samples = len(eval_dataset)
elif isinstance(eval_dataset, IterableDatasetShard):
num_samples = eval_dataset.num_examples
else:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
if tensor.shape[1] == max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def create_model_card(
self,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Optional[str] = None,
model_name: Optional[str] = None,
finetuned_from: Optional[str] = None,
dataset_tags: Optional[Union[str, List[str]]] = None,
dataset: Optional[Union[str, List[str]]] = None,
dataset_args: Optional[Union[str, List[str]]] = None,
):
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
f.write(model_card)
def push_to_hub(
self,
repo_name: Optional[str] = None,
repo_url: Optional[str] = None,
commit_message: Optional[str] = "add model",
organization: Optional[str] = None,
private: bool = None,
use_auth_token: Optional[Union[bool, str]] = None,
**kwargs,
):
"""
Upload `self.model` to the 🤗 model hub.
Parameters:
repo_name (:obj:`str`, `optional`):
Repository name for your model or tokenizer in the hub. If not specified and :obj:`repo_url` is not
specified either, will default to the stem of :obj:`self.args.output_dir`.
repo_url (:obj:`str`, `optional`):
Specify this in case you want to push to an existing repository in the hub. If unspecified, a new
repository will be created in your namespace (unless you specify an :obj:`organization`) with
:obj:`repo_name`.
commit_message (:obj:`str`, `optional`, defaults to :obj:`"add model"`):
Message to commit while pushing.
organization (:obj:`str`, `optional`):
Organization in which you want to push your model or tokenizer (you must be a member of this
organization).
private (:obj:`bool`, `optional`):
Whether or not the repository created should be private (requires a paying subscription).
use_auth_token (:obj:`bool` or :obj:`str`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). Will default to
:obj:`True` if :obj:`repo_url` is not specified.
kwargs:
Additional keyword arguments passed along to :meth:`~transformers.Trainer.create_model_card`.
Returns:
The url of the commit of your model in the given repository.
"""
if not self.is_world_process_zero():
return
if not isinstance(unwrap_model(self.model), PushToHubMixin):
raise ValueError(
"The `upload_model_to_hub` method only works for models that inherit from `PushToHubMixin` models."
)
if repo_url is None and repo_name is None:
repo_name = Path(self.args.output_dir).name
if repo_name is not None:
model_name = repo_name
elif repo_url is not None:
model_name = repo_url.split("/")[-1]
else:
model_name = None
self.create_model_card(model_name=model_name, **kwargs)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy(os.path.join(self.args.output_dir, "README.md"), os.path.join(tmp_dir, "README.md"))
unwrap_model(self.model).save_pretrained(tmp_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(tmp_dir)
return unwrap_model(self.model)._push_to_hub(
save_directory=tmp_dir,
repo_name=repo_name,
repo_url=repo_url,
commit_message=commit_message,
organization=organization,
private=private,
use_auth_token=use_auth_token,
)
#
# Deprecated code
#
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
|
import logging
import sys
import json
import reconcile.queries as queries
from reconcile.utils.aws_api import AWSApi
from reconcile.utils.defer import defer
from reconcile.utils.ocm import OCMMap
from reconcile.utils.terraform_client import TerraformClient as Terraform
from reconcile.utils.terrascript_client import TerrascriptClient as Terrascript
from reconcile.utils.semver_helper import make_semver
QONTRACT_INTEGRATION = 'terraform_vpc_peerings'
QONTRACT_INTEGRATION_VERSION = make_semver(0, 1, 0)
def find_matching_peering(from_cluster, peering, to_cluster, desired_provider):
"""
Ensures there is a matching peering with the desired provider type
going from the destination (to) cluster back to this one (from)
"""
peering_info = to_cluster['peering']
peer_connections = peering_info['connections']
for peer_connection in peer_connections:
if not peer_connection['provider'] == desired_provider:
continue
if not peer_connection['cluster']:
continue
if from_cluster['name'] == peer_connection['cluster']['name']:
return peer_connection
return None
def aws_account_from_infrastructure_access(cluster, access_level, ocm_map):
"""
Generate an AWS account object from a cluster's awsInfrastructureAccess
groups and access levels
"""
ocm = ocm_map.get(cluster['name'])
account = None
for awsAccess in cluster['awsInfrastructureAccess']:
if awsAccess.get('accessLevel', "") == access_level:
account = {
'name': awsAccess['awsGroup']['account']['name'],
'uid': awsAccess['awsGroup']['account']['uid'],
'terraformUsername':
awsAccess['awsGroup']['account']['terraformUsername'],
'automationToken':
awsAccess['awsGroup']['account']['automationToken'],
'assume_role':
ocm.get_aws_infrastructure_access_terraform_assume_role(
cluster['name'],
awsAccess['awsGroup']['account']['uid'],
awsAccess['awsGroup']['account']['terraformUsername'],
)
}
return account
def build_desired_state_cluster(clusters, ocm_map, settings):
"""
Fetch state for VPC peerings between two OCM clusters
"""
desired_state = []
error = False
for cluster_info in clusters:
cluster_name = cluster_info['name']
# Find an aws account with the "network-mgmt" access level on the
# requester cluster and use that as the account for the requester
req_aws = aws_account_from_infrastructure_access(cluster_info,
'network-mgmt',
ocm_map)
if not req_aws:
msg = f"could not find an AWS account with the " \
f"'network-mgmt' access level on the cluster {cluster_name}"
logging.error(msg)
error = True
continue
req_aws['assume_region'] = cluster_info['spec']['region']
req_aws['assume_cidr'] = cluster_info['network']['vpc']
peering_info = cluster_info['peering']
peer_connections = peering_info['connections']
for peer_connection in peer_connections:
# We only care about cluster-vpc-requester peering providers
peer_connection_provider = peer_connection['provider']
if not peer_connection_provider == 'cluster-vpc-requester':
continue
peer_connection_name = peer_connection['name']
peer_cluster = peer_connection['cluster']
peer_cluster_name = peer_cluster['name']
requester_manage_routes = peer_connection.get('manageRoutes')
# Ensure we have a matching peering connection
peer_info = find_matching_peering(cluster_info,
peer_connection,
peer_cluster,
'cluster-vpc-accepter')
if not peer_info:
msg = f"could not find a matching peering connection for " \
f"cluster {cluster_name}, " \
f"connection {peer_connection_name}"
logging.error(msg)
error = True
continue
accepter_manage_routes = peer_info.get('manageRoutes')
aws_api = AWSApi(1, [req_aws], settings=settings)
requester_vpc_id, requester_route_table_ids, _ = \
aws_api.get_cluster_vpc_details(
req_aws,
route_tables=requester_manage_routes
)
if requester_vpc_id is None:
msg = f'[{cluster_name} could not find VPC ID for cluster'
logging.error(msg)
error = True
continue
requester = {
'cidr_block': cluster_info['network']['vpc'],
'region': cluster_info['spec']['region'],
'vpc_id': requester_vpc_id,
'route_table_ids': requester_route_table_ids,
'account': req_aws
}
# Find an aws account with the "network-mgmt" access level on the
# peer cluster and use that as the account for the accepter
acc_aws = aws_account_from_infrastructure_access(peer_cluster,
'network-mgmt',
ocm_map)
if not acc_aws:
msg = "could not find an AWS account with the " \
"'network-mgmt' access level on the cluster"
logging.error(msg)
error = True
continue
acc_aws['assume_region'] = peer_cluster['spec']['region']
acc_aws['assume_cidr'] = peer_cluster['network']['vpc']
aws_api = AWSApi(1, [acc_aws], settings=settings)
accepter_vpc_id, accepter_route_table_ids, _ = \
aws_api.get_cluster_vpc_details(
acc_aws,
route_tables=accepter_manage_routes
)
if accepter_vpc_id is None:
msg = f'[{peer_cluster_name} could not find VPC ID for cluster'
logging.error(msg)
error = True
continue
requester['peer_owner_id'] = acc_aws['assume_role'].split(':')[4]
accepter = {
'cidr_block': peer_cluster['network']['vpc'],
'region': peer_cluster['spec']['region'],
'vpc_id': accepter_vpc_id,
'route_table_ids': accepter_route_table_ids,
'account': acc_aws
}
item = {
'connection_provider': peer_connection_provider,
'connection_name': peer_connection_name,
'requester': requester,
'accepter': accepter,
'deleted': peer_connection.get('delete', False)
}
desired_state.append(item)
return desired_state, error
def build_desired_state_vpc_mesh(clusters, ocm_map, settings):
"""
Fetch state for VPC peerings between a cluster and all VPCs in an account
"""
desired_state = []
error = False
for cluster_info in clusters:
cluster = cluster_info['name']
ocm = ocm_map.get(cluster)
peering_info = cluster_info['peering']
peer_connections = peering_info['connections']
for peer_connection in peer_connections:
# We only care about account-vpc-mesh peering providers
peer_connection_provider = peer_connection['provider']
if not peer_connection_provider == 'account-vpc-mesh':
continue
# requester is the cluster's AWS account
requester = {
'cidr_block': cluster_info['network']['vpc'],
'region': cluster_info['spec']['region']
}
account = peer_connection['account']
# assume_role is the role to assume to provision the
# peering connection request, through the accepter AWS account.
account['assume_role'] = \
ocm.get_aws_infrastructure_access_terraform_assume_role(
cluster,
account['uid'],
account['terraformUsername']
)
account['assume_region'] = requester['region']
account['assume_cidr'] = requester['cidr_block']
aws_api = AWSApi(1, [account], settings=settings)
requester_vpc_id, requester_route_table_ids, _ = \
aws_api.get_cluster_vpc_details(
account,
route_tables=peer_connection.get('manageRoutes')
)
if requester_vpc_id is None:
logging.error(f'[{cluster} could not find VPC ID for cluster')
error = True
continue
requester['vpc_id'] = requester_vpc_id
requester['route_table_ids'] = requester_route_table_ids
requester['account'] = account
account_vpcs = \
aws_api.get_vpcs_details(
account,
tags=json.loads(peer_connection.get('tags') or {}),
route_tables=peer_connection.get('manageRoutes'),
)
for vpc in account_vpcs:
vpc_id = vpc['vpc_id']
connection_name = \
f"{peer_connection["name"]}_" + \
f"{account["name"]}-{vpc_id}"
accepter = {
'vpc_id': vpc_id,
'region': vpc['region'],
'cidr_block': vpc['cidr_block'],
'route_table_ids': vpc['route_table_ids'],
'account': account,
}
item = {
'connection_provider': peer_connection_provider,
'connection_name': connection_name,
'requester': requester,
'accepter': accepter,
'deleted': peer_connection.get('delete', False)
}
desired_state.append(item)
return desired_state, error
def build_desired_state_vpc(clusters, ocm_map, settings):
"""
Fetch state for VPC peerings between a cluster and a VPC (account)
"""
desired_state = []
error = False
for cluster_info in clusters:
cluster = cluster_info['name']
ocm = ocm_map.get(cluster)
peering_info = cluster_info['peering']
peer_connections = peering_info['connections']
for peer_connection in peer_connections:
# We only care about account-vpc peering providers
peer_connection_provider = peer_connection['provider']
if not peer_connection_provider == 'account-vpc':
continue
# requester is the cluster's AWS account
requester = {
'cidr_block': cluster_info['network']['vpc'],
'region': cluster_info['spec']['region']
}
connection_name = peer_connection['name']
peer_vpc = peer_connection['vpc']
# accepter is the peered AWS account
accepter = {
'vpc_id': peer_vpc['vpc_id'],
'cidr_block': peer_vpc['cidr_block'],
'region': peer_vpc['region']
}
account = peer_vpc['account']
# assume_role is the role to assume to provision the
# peering connection request, through the accepter AWS account.
account['assume_role'] = \
ocm.get_aws_infrastructure_access_terraform_assume_role(
cluster,
peer_vpc['account']['uid'],
peer_vpc['account']['terraformUsername']
)
account['assume_region'] = requester['region']
account['assume_cidr'] = requester['cidr_block']
aws_api = AWSApi(1, [account], settings=settings)
requester_vpc_id, requester_route_table_ids, _ = \
aws_api.get_cluster_vpc_details(
account,
route_tables=peer_connection.get('manageRoutes')
)
if requester_vpc_id is None:
logging.error(f'[{cluster} could not find VPC ID for cluster')
error = True
continue
requester['vpc_id'] = requester_vpc_id
requester['route_table_ids'] = requester_route_table_ids
requester['account'] = account
accepter['account'] = account
item = {
'connection_provider': peer_connection_provider,
'connection_name': connection_name,
'requester': requester,
'accepter': accepter,
'deleted': peer_connection.get('delete', False)
}
desired_state.append(item)
return desired_state, error
@defer
def run(dry_run, print_only=False,
enable_deletion=False, thread_pool_size=10, defer=None):
settings = queries.get_app_interface_settings()
clusters = [c for c in queries.get_clusters()
if c.get('peering') is not None]
ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
settings=settings)
# Fetch desired state for cluster-to-vpc(account) VPCs
desired_state_vpc, err = \
build_desired_state_vpc(clusters, ocm_map, settings)
if err:
sys.exit(1)
# Fetch desired state for cluster-to-account (vpc mesh) VPCs
desired_state_vpc_mesh, err = \
build_desired_state_vpc_mesh(clusters, ocm_map, settings)
if err:
sys.exit(1)
# Fetch desired state for cluster-to-cluster VPCs
desired_state_cluster, err = \
build_desired_state_cluster(clusters, ocm_map, settings)
if err:
sys.exit(1)
desired_state = \
desired_state_vpc + \
desired_state_vpc_mesh + \
desired_state_cluster
# check there are no repeated vpc connection names
connection_names = [c['connection_name'] for c in desired_state]
if len(set(connection_names)) != len(connection_names):
logging.error("duplicate vpc connection names found")
sys.exit(1)
participating_accounts = \
[item['requester']['account'] for item in desired_state]
participating_accounts += \
[item['accepter']['account'] for item in desired_state]
participating_account_names = \
[a['name'] for a in participating_accounts]
accounts = [a for a in queries.get_aws_accounts()
if a['name'] in participating_account_names]
ts = Terrascript(QONTRACT_INTEGRATION,
"",
thread_pool_size,
accounts,
settings=settings)
ts.populate_additional_providers(participating_accounts)
ts.populate_vpc_peerings(desired_state)
working_dirs = ts.dump(print_only=print_only)
if print_only:
sys.exit()
tf = Terraform(QONTRACT_INTEGRATION,
QONTRACT_INTEGRATION_VERSION,
"",
accounts,
working_dirs,
thread_pool_size)
if tf is None:
sys.exit(1)
defer(lambda: tf.cleanup())
disabled_deletions_detected, err = tf.plan(enable_deletion)
if err:
sys.exit(1)
if disabled_deletions_detected:
sys.exit(1)
if dry_run:
return
err = tf.apply()
if err:
sys.exit(1)
| import logging
import sys
import json
import reconcile.queries as queries
from reconcile.utils.aws_api import AWSApi
from reconcile.utils.defer import defer
from reconcile.utils.ocm import OCMMap
from reconcile.utils.terraform_client import TerraformClient as Terraform
from reconcile.utils.terrascript_client import TerrascriptClient as Terrascript
from reconcile.utils.semver_helper import make_semver
QONTRACT_INTEGRATION = 'terraform_vpc_peerings'
QONTRACT_INTEGRATION_VERSION = make_semver(0, 1, 0)
def find_matching_peering(from_cluster, peering, to_cluster, desired_provider):
"""
Ensures there is a matching peering with the desired provider type
going from the destination (to) cluster back to this one (from)
"""
peering_info = to_cluster['peering']
peer_connections = peering_info['connections']
for peer_connection in peer_connections:
if not peer_connection['provider'] == desired_provider:
continue
if not peer_connection['cluster']:
continue
if from_cluster['name'] == peer_connection['cluster']['name']:
return peer_connection
return None
def aws_account_from_infrastructure_access(cluster, access_level, ocm_map):
"""
Generate an AWS account object from a cluster's awsInfrastructureAccess
groups and access levels
"""
ocm = ocm_map.get(cluster['name'])
account = None
for awsAccess in cluster['awsInfrastructureAccess']:
if awsAccess.get('accessLevel', "") == access_level:
account = {
'name': awsAccess['awsGroup']['account']['name'],
'uid': awsAccess['awsGroup']['account']['uid'],
'terraformUsername':
awsAccess['awsGroup']['account']['terraformUsername'],
'automationToken':
awsAccess['awsGroup']['account']['automationToken'],
'assume_role':
ocm.get_aws_infrastructure_access_terraform_assume_role(
cluster['name'],
awsAccess['awsGroup']['account']['uid'],
awsAccess['awsGroup']['account']['terraformUsername'],
)
}
return account
def build_desired_state_cluster(clusters, ocm_map, settings):
"""
Fetch state for VPC peerings between two OCM clusters
"""
desired_state = []
error = False
for cluster_info in clusters:
cluster_name = cluster_info['name']
# Find an aws account with the "network-mgmt" access level on the
# requester cluster and use that as the account for the requester
req_aws = aws_account_from_infrastructure_access(cluster_info,
'network-mgmt',
ocm_map)
if not req_aws:
msg = f"could not find an AWS account with the " \
f"'network-mgmt' access level on the cluster {cluster_name}"
logging.error(msg)
error = True
continue
req_aws['assume_region'] = cluster_info['spec']['region']
req_aws['assume_cidr'] = cluster_info['network']['vpc']
peering_info = cluster_info['peering']
peer_connections = peering_info['connections']
for peer_connection in peer_connections:
# We only care about cluster-vpc-requester peering providers
peer_connection_provider = peer_connection['provider']
if not peer_connection_provider == 'cluster-vpc-requester':
continue
peer_connection_name = peer_connection['name']
peer_cluster = peer_connection['cluster']
peer_cluster_name = peer_cluster['name']
requester_manage_routes = peer_connection.get('manageRoutes')
# Ensure we have a matching peering connection
peer_info = find_matching_peering(cluster_info,
peer_connection,
peer_cluster,
'cluster-vpc-accepter')
if not peer_info:
msg = f"could not find a matching peering connection for " \
f"cluster {cluster_name}, " \
f"connection {peer_connection_name}"
logging.error(msg)
error = True
continue
accepter_manage_routes = peer_info.get('manageRoutes')
aws_api = AWSApi(1, [req_aws], settings=settings)
requester_vpc_id, requester_route_table_ids, _ = \
aws_api.get_cluster_vpc_details(
req_aws,
route_tables=requester_manage_routes
)
if requester_vpc_id is None:
msg = f'[{cluster_name} could not find VPC ID for cluster'
logging.error(msg)
error = True
continue
requester = {
'cidr_block': cluster_info['network']['vpc'],
'region': cluster_info['spec']['region'],
'vpc_id': requester_vpc_id,
'route_table_ids': requester_route_table_ids,
'account': req_aws
}
# Find an aws account with the "network-mgmt" access level on the
# peer cluster and use that as the account for the accepter
acc_aws = aws_account_from_infrastructure_access(peer_cluster,
'network-mgmt',
ocm_map)
if not acc_aws:
msg = "could not find an AWS account with the " \
"'network-mgmt' access level on the cluster"
logging.error(msg)
error = True
continue
acc_aws['assume_region'] = peer_cluster['spec']['region']
acc_aws['assume_cidr'] = peer_cluster['network']['vpc']
aws_api = AWSApi(1, [acc_aws], settings=settings)
accepter_vpc_id, accepter_route_table_ids, _ = \
aws_api.get_cluster_vpc_details(
acc_aws,
route_tables=accepter_manage_routes
)
if accepter_vpc_id is None:
msg = f'[{peer_cluster_name} could not find VPC ID for cluster'
logging.error(msg)
error = True
continue
requester['peer_owner_id'] = acc_aws['assume_role'].split(':')[4]
accepter = {
'cidr_block': peer_cluster['network']['vpc'],
'region': peer_cluster['spec']['region'],
'vpc_id': accepter_vpc_id,
'route_table_ids': accepter_route_table_ids,
'account': acc_aws
}
item = {
'connection_provider': peer_connection_provider,
'connection_name': peer_connection_name,
'requester': requester,
'accepter': accepter,
'deleted': peer_connection.get('delete', False)
}
desired_state.append(item)
return desired_state, error
def build_desired_state_vpc_mesh(clusters, ocm_map, settings):
"""
Fetch state for VPC peerings between a cluster and all VPCs in an account
"""
desired_state = []
error = False
for cluster_info in clusters:
cluster = cluster_info['name']
ocm = ocm_map.get(cluster)
peering_info = cluster_info['peering']
peer_connections = peering_info['connections']
for peer_connection in peer_connections:
# We only care about account-vpc-mesh peering providers
peer_connection_provider = peer_connection['provider']
if not peer_connection_provider == 'account-vpc-mesh':
continue
# requester is the cluster's AWS account
requester = {
'cidr_block': cluster_info['network']['vpc'],
'region': cluster_info['spec']['region']
}
account = peer_connection['account']
# assume_role is the role to assume to provision the
# peering connection request, through the accepter AWS account.
account['assume_role'] = \
ocm.get_aws_infrastructure_access_terraform_assume_role(
cluster,
account['uid'],
account['terraformUsername']
)
account['assume_region'] = requester['region']
account['assume_cidr'] = requester['cidr_block']
aws_api = AWSApi(1, [account], settings=settings)
requester_vpc_id, requester_route_table_ids, _ = \
aws_api.get_cluster_vpc_details(
account,
route_tables=peer_connection.get('manageRoutes')
)
if requester_vpc_id is None:
logging.error(f'[{cluster} could not find VPC ID for cluster')
error = True
continue
requester['vpc_id'] = requester_vpc_id
requester['route_table_ids'] = requester_route_table_ids
requester['account'] = account
account_vpcs = \
aws_api.get_vpcs_details(
account,
tags=json.loads(peer_connection.get('tags') or {}),
route_tables=peer_connection.get('manageRoutes'),
)
for vpc in account_vpcs:
vpc_id = vpc['vpc_id']
connection_name = \
f"{peer_connection['name']}_" + \
f"{account['name']}-{vpc_id}"
accepter = {
'vpc_id': vpc_id,
'region': vpc['region'],
'cidr_block': vpc['cidr_block'],
'route_table_ids': vpc['route_table_ids'],
'account': account,
}
item = {
'connection_provider': peer_connection_provider,
'connection_name': connection_name,
'requester': requester,
'accepter': accepter,
'deleted': peer_connection.get('delete', False)
}
desired_state.append(item)
return desired_state, error
def build_desired_state_vpc(clusters, ocm_map, settings):
"""
Fetch state for VPC peerings between a cluster and a VPC (account)
"""
desired_state = []
error = False
for cluster_info in clusters:
cluster = cluster_info['name']
ocm = ocm_map.get(cluster)
peering_info = cluster_info['peering']
peer_connections = peering_info['connections']
for peer_connection in peer_connections:
# We only care about account-vpc peering providers
peer_connection_provider = peer_connection['provider']
if not peer_connection_provider == 'account-vpc':
continue
# requester is the cluster's AWS account
requester = {
'cidr_block': cluster_info['network']['vpc'],
'region': cluster_info['spec']['region']
}
connection_name = peer_connection['name']
peer_vpc = peer_connection['vpc']
# accepter is the peered AWS account
accepter = {
'vpc_id': peer_vpc['vpc_id'],
'cidr_block': peer_vpc['cidr_block'],
'region': peer_vpc['region']
}
account = peer_vpc['account']
# assume_role is the role to assume to provision the
# peering connection request, through the accepter AWS account.
account['assume_role'] = \
ocm.get_aws_infrastructure_access_terraform_assume_role(
cluster,
peer_vpc['account']['uid'],
peer_vpc['account']['terraformUsername']
)
account['assume_region'] = requester['region']
account['assume_cidr'] = requester['cidr_block']
aws_api = AWSApi(1, [account], settings=settings)
requester_vpc_id, requester_route_table_ids, _ = \
aws_api.get_cluster_vpc_details(
account,
route_tables=peer_connection.get('manageRoutes')
)
if requester_vpc_id is None:
logging.error(f'[{cluster} could not find VPC ID for cluster')
error = True
continue
requester['vpc_id'] = requester_vpc_id
requester['route_table_ids'] = requester_route_table_ids
requester['account'] = account
accepter['account'] = account
item = {
'connection_provider': peer_connection_provider,
'connection_name': connection_name,
'requester': requester,
'accepter': accepter,
'deleted': peer_connection.get('delete', False)
}
desired_state.append(item)
return desired_state, error
@defer
def run(dry_run, print_only=False,
enable_deletion=False, thread_pool_size=10, defer=None):
settings = queries.get_app_interface_settings()
clusters = [c for c in queries.get_clusters()
if c.get('peering') is not None]
ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
settings=settings)
# Fetch desired state for cluster-to-vpc(account) VPCs
desired_state_vpc, err = \
build_desired_state_vpc(clusters, ocm_map, settings)
if err:
sys.exit(1)
# Fetch desired state for cluster-to-account (vpc mesh) VPCs
desired_state_vpc_mesh, err = \
build_desired_state_vpc_mesh(clusters, ocm_map, settings)
if err:
sys.exit(1)
# Fetch desired state for cluster-to-cluster VPCs
desired_state_cluster, err = \
build_desired_state_cluster(clusters, ocm_map, settings)
if err:
sys.exit(1)
desired_state = \
desired_state_vpc + \
desired_state_vpc_mesh + \
desired_state_cluster
# check there are no repeated vpc connection names
connection_names = [c['connection_name'] for c in desired_state]
if len(set(connection_names)) != len(connection_names):
logging.error("duplicate vpc connection names found")
sys.exit(1)
participating_accounts = \
[item['requester']['account'] for item in desired_state]
participating_accounts += \
[item['accepter']['account'] for item in desired_state]
participating_account_names = \
[a['name'] for a in participating_accounts]
accounts = [a for a in queries.get_aws_accounts()
if a['name'] in participating_account_names]
ts = Terrascript(QONTRACT_INTEGRATION,
"",
thread_pool_size,
accounts,
settings=settings)
ts.populate_additional_providers(participating_accounts)
ts.populate_vpc_peerings(desired_state)
working_dirs = ts.dump(print_only=print_only)
if print_only:
sys.exit()
tf = Terraform(QONTRACT_INTEGRATION,
QONTRACT_INTEGRATION_VERSION,
"",
accounts,
working_dirs,
thread_pool_size)
if tf is None:
sys.exit(1)
defer(lambda: tf.cleanup())
disabled_deletions_detected, err = tf.plan(enable_deletion)
if err:
sys.exit(1)
if disabled_deletions_detected:
sys.exit(1)
if dry_run:
return
err = tf.apply()
if err:
sys.exit(1)
|
import json
import shlex
from os import _exit, chdir, getcwd
from re import compile as re_compile
from re import findall, match
from sys import exc_info, path
from traceback import print_exception
from libs.config import custom_get, gget, gset, order_alias, set_namespace, color
from libs.readline import LovelyReadline
from Myplugin import Platform
NUMBER_PATTERN = re_compile(r"^[-+]?\d*(\.?\d+|)$")
STDIN_STREAM = b''
HISTORY = None
HISTORY_POINTER = 0
FROM_HISTORY = False
readline = LovelyReadline()
readline.init({}, {})
"""
api ['main']
history_commands ['main']
leave_message ['main']
namespace ['main']
namespace_folders ['main']
folders_namespace ['main']
root_path ['main']
{platform}.pf ['main']
{platform}.prompt ['main']
{plugin_name}.reverse_alias [namespace]
order_alias [namespace]
special plugin platform:general general commands
special plugin platform:encode Encoders
"""
class Loop_init:
def __init__(self, api: str = "run", init_namespace: str = "main"):
"""
Initialize the loop
Args:
api (str, optional): The name of the entry function that is common to all plugins.. Defaults to "run".
default_namespace (str, optional): Initial namespace. Defaults to "main".
"""
platforms = self.set_platforms()
gset("api", api)
gset("loop", True)
gset("blockexit", False)
gset("namespace", init_namespace)
gset("namespace_folders", platforms)
gset("folders_namespace", {v: k for k, v in platforms.items()})
root_path = gget("root_path")
cwd = getcwd()
chdir(root_path)
# {平台名称 -> 插件路径}
for k, v in platforms.items():
pf = import_platform(v, api)
gset(k + ".pf", pf)
# 平台 -> 命令列表
gset(k + ".wordlist", {"command_wordlist": list(pf.names())})
# 平台 -> {命令名称 -> [命令参数,]}
gset(k + ".prefix_wordlist", {command: gget(command + ".arg_wordlist", k)
for command in gget(k + ".wordlist")["command_wordlist"]})
general_wordlist = gget("general.wordlist")["command_wordlist"]
for k in platforms.keys(): # 往其他插件平台添加general平台的命令列表
if (k == "general"):
continue
wordlist = gget(k + ".wordlist")
wordlist["command_wordlist"] += general_wordlist
# 设置输入提示符
for k, v in self.set_prompts().items():
gset(k + ".prompt", v)
chdir(cwd)
def set_platforms(self) -> dict:
return {"main": "main_plugins"}
def set_prompts(self) -> dict:
return {"main": ":>"}
def import_platform(platform_path: str, api: str):
return Platform(platform_path, api, message=True)
def is_numberic(string):
global NUMBER_PATTERN
return True if (len(string) and (isinstance(string, (int, float)) or NUMBER_PATTERN.match(string))) else False
def value_translation(arg):
if is_numberic(arg):
arg = float(arg) if "." in arg else int(arg)
else:
try:
arg = json.loads(arg)
except json.JSONDecodeError:
pass
if (isinstance(arg, str)):
custom_vars = findall("#{(\w+)}", arg)
if (match("#{(\w+)}", arg)):
arg = custom_get(custom_vars[0], arg)
else:
if (not custom_vars):
return arg
for var in custom_vars:
arg = arg.replace("#{%s}" % var, custom_get(var, ''))
return arg
def args_parse(args: list) -> dict:
arg_name = ""
arg_dict = {"": []}
for each in args: # 解析参数
if each.startswith("-"):
if len(each) > 2 and each[1] == "-":
arg_name = each[2:]
elif (is_numberic(each)):
arg_dict[""].append(value_translation(each))
else:
arg_name = each[1:]
arg_dict[arg_name] = True
else:
if arg_name == "":
arg_dict[""].append(value_translation(each))
elif arg_name in arg_dict:
if (arg_dict[arg_name] is True):
arg_dict[arg_name] = value_translation(each)
else:
arg_dict[arg_name] = f"{arg_dict[arg_name]} {value_translation(each)}"
else:
arg_dict[arg_name] = value_translation(each)
if (not len(arg_dict[""])):
del arg_dict[""]
return arg_dict
def sys_exit():
print('\n' + gget("leave_message"))
if (gget("log_filepath")):
gget("log_stdout").log.close()
gget("log_stderr").log.close()
_exit(0)
def loop_main():
"""
run_loop main function
"""
gpf = gget("general.pf")
api = gget("api")
old_namespace = ''
while gget("loop"):
# 获取当前命名空间
namespace = gget("namespace")
tpf = None
# 获取当前平台
npf = gget(f"{namespace}.pf")
# 获取自定义平台
cpf = gget("custom.pf")
# 如果跳出当前平台(进入其他平台时)
if (namespace != old_namespace):
# 初始化新平台命令列表
wordlist = gget(namespace + ".wordlist")
# 初始化新平台{命令名称 -> 命令参数}
prefix_wordlist = gget(namespace + ".prefix_wordlist")
# 合并general的参数部分
prefix_wordlist = {**prefix_wordlist, **gget("general.prefix_wordlist")}
# 初始化readline
readline.set_wordlist(wordlist)
readline.set_prefix_wordlist(prefix_wordlist)
# 记录
old_namespace = namespace
# --------------------------------------
# 判断是否有预载命令
if (gget("preload_command")):
cmd = gget("preload_command")
gset("preload_command", None, True)
else:
print(gget(f"{namespace}.prompt"), end="", flush=True)
if (gget("raw_input") is True):
cmd = input().strip()
else:
cmd = readline().strip()
# 存储输入的命令值
gset("raw_command", cmd, True)
# 若输入空值
if (not cmd):
continue
try:
args = shlex.split(cmd) # 切割
except ValueError:
print(color.red("Invalid command"))
continue
# 判断输入的命令值有无参数,获取输入的命令
if " " in cmd: # 输入的命令
order = args[0]
else:
order = cmd
del args[0]
# 存储输入的命令值的参数
raw_command_args = " ".join(args)
gset("raw_command_args", raw_command_args, True)
order = order_alias(order) # 解析别名
# --------------------------------------
# 判断命令是否存在于[当前平台]/[general]/[自定义平台]
if order in npf: # 命令存在
tpf = npf
elif order in gpf:
tpf = gpf
elif order in cpf:
tpf = cpf
elif cmd:
print(f'\n{order}: {color.red('Command Not Found')}\n')
if tpf:
debug = gget("DEBUG.LOOP")
try:
arg_dict = args_parse(args) # 解析参数
tpf[order].run(**arg_dict)
except TypeError as e:
exc_type, exc_value, exc_tb = exc_info()
print("[TypeError] %s" % str(e).replace("%s()" % api, "%s()" % order))
if debug:
print_exception(exc_type, exc_value, exc_tb)
except Exception as e:
exc_type, exc_value, exc_tb = exc_info()
print("[%s] %s" % (exc_type.__name__, e))
if debug:
print_exception(exc_type, exc_value, exc_tb)
def run_loop(loop_init_object: Loop_init, leave_message: str = "Bye!"):
"""
run_loop
Args:
loop_init_object (Loop_init): Loop Init class
leave_message (str, optional): The message when you leave. Defaults to 'Bye!'.
"""
from threading import Thread
from time import sleep
set_namespace("main", callback=False if gget("preload_command") else True)
gset("leave_message", leave_message)
t = Thread(target=loop_main)
t.setDaemon(True)
t.start()
while gget("loop"):
try:
sleep(10)
except KeyboardInterrupt:
continue
except EOFError:
break
sys_exit()
| import json
import shlex
from os import _exit, chdir, getcwd
from re import compile as re_compile
from re import findall, match
from sys import exc_info, path
from traceback import print_exception
from libs.config import custom_get, gget, gset, order_alias, set_namespace, color
from libs.readline import LovelyReadline
from Myplugin import Platform
NUMBER_PATTERN = re_compile(r"^[-+]?\d*(\.?\d+|)$")
STDIN_STREAM = b''
HISTORY = None
HISTORY_POINTER = 0
FROM_HISTORY = False
readline = LovelyReadline()
readline.init({}, {})
"""
api ['main']
history_commands ['main']
leave_message ['main']
namespace ['main']
namespace_folders ['main']
folders_namespace ['main']
root_path ['main']
{platform}.pf ['main']
{platform}.prompt ['main']
{plugin_name}.reverse_alias [namespace]
order_alias [namespace]
special plugin platform:general general commands
special plugin platform:encode Encoders
"""
class Loop_init:
def __init__(self, api: str = "run", init_namespace: str = "main"):
"""
Initialize the loop
Args:
api (str, optional): The name of the entry function that is common to all plugins.. Defaults to "run".
default_namespace (str, optional): Initial namespace. Defaults to "main".
"""
platforms = self.set_platforms()
gset("api", api)
gset("loop", True)
gset("blockexit", False)
gset("namespace", init_namespace)
gset("namespace_folders", platforms)
gset("folders_namespace", {v: k for k, v in platforms.items()})
root_path = gget("root_path")
cwd = getcwd()
chdir(root_path)
# {平台名称 -> 插件路径}
for k, v in platforms.items():
pf = import_platform(v, api)
gset(k + ".pf", pf)
# 平台 -> 命令列表
gset(k + ".wordlist", {"command_wordlist": list(pf.names())})
# 平台 -> {命令名称 -> [命令参数,]}
gset(k + ".prefix_wordlist", {command: gget(command + ".arg_wordlist", k)
for command in gget(k + ".wordlist")["command_wordlist"]})
general_wordlist = gget("general.wordlist")["command_wordlist"]
for k in platforms.keys(): # 往其他插件平台添加general平台的命令列表
if (k == "general"):
continue
wordlist = gget(k + ".wordlist")
wordlist["command_wordlist"] += general_wordlist
# 设置输入提示符
for k, v in self.set_prompts().items():
gset(k + ".prompt", v)
chdir(cwd)
def set_platforms(self) -> dict:
return {"main": "main_plugins"}
def set_prompts(self) -> dict:
return {"main": ":>"}
def import_platform(platform_path: str, api: str):
return Platform(platform_path, api, message=True)
def is_numberic(string):
global NUMBER_PATTERN
return True if (len(string) and (isinstance(string, (int, float)) or NUMBER_PATTERN.match(string))) else False
def value_translation(arg):
if is_numberic(arg):
arg = float(arg) if "." in arg else int(arg)
else:
try:
arg = json.loads(arg)
except json.JSONDecodeError:
pass
if (isinstance(arg, str)):
custom_vars = findall("#{(\w+)}", arg)
if (match("#{(\w+)}", arg)):
arg = custom_get(custom_vars[0], arg)
else:
if (not custom_vars):
return arg
for var in custom_vars:
arg = arg.replace("#{%s}" % var, custom_get(var, ''))
return arg
def args_parse(args: list) -> dict:
arg_name = ""
arg_dict = {"": []}
for each in args: # 解析参数
if each.startswith("-"):
if len(each) > 2 and each[1] == "-":
arg_name = each[2:]
elif (is_numberic(each)):
arg_dict[""].append(value_translation(each))
else:
arg_name = each[1:]
arg_dict[arg_name] = True
else:
if arg_name == "":
arg_dict[""].append(value_translation(each))
elif arg_name in arg_dict:
if (arg_dict[arg_name] is True):
arg_dict[arg_name] = value_translation(each)
else:
arg_dict[arg_name] = f"{arg_dict[arg_name]} {value_translation(each)}"
else:
arg_dict[arg_name] = value_translation(each)
if (not len(arg_dict[""])):
del arg_dict[""]
return arg_dict
def sys_exit():
print('\n' + gget("leave_message"))
if (gget("log_filepath")):
gget("log_stdout").log.close()
gget("log_stderr").log.close()
_exit(0)
def loop_main():
"""
run_loop main function
"""
gpf = gget("general.pf")
api = gget("api")
old_namespace = ''
while gget("loop"):
# 获取当前命名空间
namespace = gget("namespace")
tpf = None
# 获取当前平台
npf = gget(f"{namespace}.pf")
# 获取自定义平台
cpf = gget("custom.pf")
# 如果跳出当前平台(进入其他平台时)
if (namespace != old_namespace):
# 初始化新平台命令列表
wordlist = gget(namespace + ".wordlist")
# 初始化新平台{命令名称 -> 命令参数}
prefix_wordlist = gget(namespace + ".prefix_wordlist")
# 合并general的参数部分
prefix_wordlist = {**prefix_wordlist, **gget("general.prefix_wordlist")}
# 初始化readline
readline.set_wordlist(wordlist)
readline.set_prefix_wordlist(prefix_wordlist)
# 记录
old_namespace = namespace
# --------------------------------------
# 判断是否有预载命令
if (gget("preload_command")):
cmd = gget("preload_command")
gset("preload_command", None, True)
else:
print(gget(f"{namespace}.prompt"), end="", flush=True)
if (gget("raw_input") is True):
cmd = input().strip()
else:
cmd = readline().strip()
# 存储输入的命令值
gset("raw_command", cmd, True)
# 若输入空值
if (not cmd):
continue
try:
args = shlex.split(cmd) # 切割
except ValueError:
print(color.red("Invalid command"))
continue
# 判断输入的命令值有无参数,获取输入的命令
if " " in cmd: # 输入的命令
order = args[0]
else:
order = cmd
del args[0]
# 存储输入的命令值的参数
raw_command_args = " ".join(args)
gset("raw_command_args", raw_command_args, True)
order = order_alias(order) # 解析别名
# --------------------------------------
# 判断命令是否存在于[当前平台]/[general]/[自定义平台]
if order in npf: # 命令存在
tpf = npf
elif order in gpf:
tpf = gpf
elif order in cpf:
tpf = cpf
elif cmd:
print(f'\n{order}: {color.red("Command Not Found")}\n')
if tpf:
debug = gget("DEBUG.LOOP")
try:
arg_dict = args_parse(args) # 解析参数
tpf[order].run(**arg_dict)
except TypeError as e:
exc_type, exc_value, exc_tb = exc_info()
print("[TypeError] %s" % str(e).replace("%s()" % api, "%s()" % order))
if debug:
print_exception(exc_type, exc_value, exc_tb)
except Exception as e:
exc_type, exc_value, exc_tb = exc_info()
print("[%s] %s" % (exc_type.__name__, e))
if debug:
print_exception(exc_type, exc_value, exc_tb)
def run_loop(loop_init_object: Loop_init, leave_message: str = "Bye!"):
"""
run_loop
Args:
loop_init_object (Loop_init): Loop Init class
leave_message (str, optional): The message when you leave. Defaults to 'Bye!'.
"""
from threading import Thread
from time import sleep
set_namespace("main", callback=False if gget("preload_command") else True)
gset("leave_message", leave_message)
t = Thread(target=loop_main)
t.setDaemon(True)
t.start()
while gget("loop"):
try:
sleep(10)
except KeyboardInterrupt:
continue
except EOFError:
break
sys_exit()
|
"""A small utility for filling in the README paths to experiment artifacts.
The template contains tags of the form {{filetype.experiment_name}}, which are then
replaced with the urls for each resource.
Note: print a summary of the most recent results for a given dataset via:
python find_latest_checkpoints.py --dataset audiocaps
"""
import argparse
import glob
import importlib
import json
import multiprocessing
import os
import pickle
import re
import subprocess
import time
from collections import OrderedDict, defaultdict
from itertools import zip_longest
from pathlib import Path
from typing import Dict, List, Tuple, Union
import numpy as np
import pylatex
import scipy.stats
import tqdm
from millify import millify
from typeguard import typechecked
from aggregate_logs_and_stats import summarise
@typechecked
def gen_latex_version_of_table(
latex_table_dir: Path,
content: List[str],
table_name: str,
branch_name: str = "dev",
) -> Path:
msg = "Expected latexify tag to be placed directly following a table"
assert content[-1].startswith("|"), msg
num_table_rows = [x.startswith("|") for x in reversed(content)].index(False)
assert num_table_rows > 2, "expected at least three table rows (including header)"
markdown_table = list(reversed(content[-1:-(num_table_rows + 1):-1]))
col_names = [x.strip() for x in markdown_table[0].split("|")[1:-1]]
# remove last column of links
remove_links = col_names[-1].lower() == "links"
if remove_links:
col_names.pop()
cols = "|".join(["c" for _ in range(len(col_names))])
table = pylatex.Tabular(cols)
table.add_hline()
table.add_hline()
table.add_row(tuple(col_names))
table.add_hline()
for row in markdown_table[2:]:
tokens = [x.strip() for x in row.split("|")[1:-1]]
if remove_links:
tokens.pop()
row_contents = []
for token in tokens:
mean_regexp = r"<sub><sup>([0-9]+[.][0-9]+)<sub>"
# std_regexp = r"<sub>\(([0-9]+[.][0-9]+|[a-z]+)\)<\/sub>"
std_regexp = r"<sub>\(([0-9]+[.][0-9]+e*-*[0-9]*|[a-z]+|)\)<\/sub>"
mean_strs = re.findall(mean_regexp, token)
if mean_strs:
assert len(mean_strs) == 1, "expected a unique mean specifier"
std_strs = re.findall(std_regexp, token)
assert len(std_strs) == 1, "expected a unique std specifier"
mean_str, std_str = mean_strs[0], std_strs[0]
raw_str = "$" + mean_str + r"_{\pm" + std_str + r"}$"
token = pylatex.NoEscape(raw_str)
row_contents.append(token)
table.add_row(tuple(row_contents))
table.add_hline()
latex_str = table.dumps()
latex_table_dir.mkdir(exist_ok=True, parents=True)
dest_path = latex_table_dir / f"{table_name}.txt"
with open(dest_path, "w") as f:
f.write(latex_str)
github_project_root = f"/../../tree/{branch_name}/"
markdown_link = Path(f"{github_project_root}{dest_path}")
return markdown_link
@typechecked
def generate_url(root_url: str, target: str,
exp_name: str, experiments: Dict,
fnames: dict, seed_folders: dict) -> str:
path_store = {
"log": {"parent": "log", "fname": fnames[exp_name]},
"log_TT": {"parent": "logTT", "fname": fnames[exp_name]},
"config": {"parent": "models", "fname": "config.json"},
"model": {"parent": "models", "fname": "trained_model.pth"}
}
paths = path_store[target]
group_id, timestamp = experiments[exp_name]
rel_path = Path(group_id) / seed_folders[exp_name] / timestamp / paths["fname"]
return str(Path(root_url) / paths["parent"] / exp_name / rel_path)
def small_font_str(tokens):
tokens = [f"<sub><sup>{x}</sup></sub>" for x in tokens]
return " | ".join(tokens)
def sync_files(experiments, save_dir, webserver, web_dir):
filetypes = {
"log": ["summary-seed-1_seed-2_seed-3.json"],
"log_TT": ["summary-seed-1_seed-2_seed-3.json"],
"models": ["trained_model.pth", "config.json"]
}
for key, (group_id, timestamp) in experiments.items():
# copy experiment artifacts
for filetype, fnames in filetypes.items():
for fname in fnames:
if timestamp.startswith("TODO"):
continue
rel_path = Path(group_id) / "seed-1" / timestamp / fname
local_path = Path(save_dir) / filetype / key / rel_path
server_path = Path(web_dir).expanduser() / filetype / key / rel_path
if not local_path.exists() and "/log/" in str(local_path):
# try historical logs
old, new = "/log/", "/log-includes-some-final-exps/"
local_path = Path(str(local_path).replace(old, new))
msg = f"neither original log nor historical data exist ({local_path})"
assert local_path.exists(), msg
dest = f"{webserver}:{str(server_path)}"
print(f"{key} -> {webserver} [{local_path} -> {server_path}]")
subprocess.call(["ssh", webserver, "mkdir -p", str(server_path.parent)])
rsync_args = ["rsync", "-hvrPt", str(local_path), dest]
print(f"running command {" ".join(rsync_args)}")
subprocess.call(rsync_args)
@typechecked
def model_specs2path(feat_aggregation: Dict, keep: set, tag: str = None) -> List[Path]:
feat_paths = []
for model_spec, aggs in feat_aggregation.items():
if model_spec not in keep:
continue
feat_type, model_name, _ = model_spec.split(".")
base = f"aggregated_{feat_type.replace("-", "_")}"
required = ("fps", "pixel_dim", "stride")
fps, pixel_dim, stride = [aggs.get(x, None) for x in required]
if feat_type in {"facecrops", "faceboxes"}:
base = f"{base}_{fps}fps_{pixel_dim}px_stride{stride}"
elif feat_type not in {"ocr", "speech", "audio", "pann", "syncnet", "vggsound"}:
base = f"{base}_{fps}fps_{pixel_dim}px_stride{stride}"
for option in "offset", "inner_stride", "num_segments":
if aggs.get(option, None) is not None:
base += f"_{option}{aggs[option]}"
for agg in aggs["temporal"].split("-"):
fname = f"{model_name}-{agg}"
if aggs["type"] == "logits":
fname = f"{fname}-logits"
if tag is not None:
fname += f"-{tag}"
feat_paths.append(Path(base) / f"{fname}.pickle")
return feat_paths
@typechecked
def dataset_paths(
dataset: str
) -> Tuple[Path, Dict[str, Union[str, List[str], Dict, Path]]]:
name_map = {
"activity-net": "ActivityNet",
"queryd": "QuerYD",
"querydsegments": "QuerYDSegments",
"clotho": "CLOTHO",
"audiocaps": "AudioCaps"
}
if dataset in set(name_map.values()):
class_name = dataset
else:
class_name = name_map[dataset]
mod = importlib.import_module(f"data_loader.{class_name}_dataset")
get_dataset_paths = getattr(getattr(mod, class_name), "dataset_paths")
if dataset == "activity-net":
data_dir = dataset
else:
data_dir = class_name
root_feat = Path(f"data/{data_dir}/structured-symlinks")
paths = get_dataset_paths()
return root_feat, paths
def generate_tar_lists(save_dir, experiments):
all_feat_paths = {}
for exp_name, (group_id, timestamp) in tqdm.tqdm(experiments.items()):
rel_path = Path(group_id) / "seed-1" / timestamp / "config.json"
config_path = Path(save_dir) / "models" / exp_name / rel_path
with open(config_path, "r") as f:
config = json.load(f)
feat_aggregation = config["data_loader"]["args"]["feat_aggregation"]
dataset_name = exp_name.split("-train")[0]
if dataset_name not in all_feat_paths:
all_feat_paths[dataset_name] = set()
split_names = [config["data_loader"]["args"]["split_name"]]
if "eval_settings" in config and config["eval_settings"]:
test_split = config["eval_settings"]["data_loader"]["args"]["split_name"]
split_names.append(test_split)
keep = set(config["experts"]["modalities"])
text_feat = config["experts"]["text_feat"]
root_feat, paths = dataset_paths(dataset_name)
modern_feat_agg = {key: val for key, val in feat_aggregation.items()
if key in paths["feature_names"]}
feat_paths = model_specs2path(modern_feat_agg, keep)
all_feat_paths[dataset_name].update({root_feat / x for x in feat_paths})
for key, feat_list in paths["custom_paths"].items():
for feat_path in feat_list:
all_feat_paths[dataset_name].add(root_feat / feat_path)
text_paths = [root_feat / rel_path for rel_path in
paths["text_feat_paths"][text_feat].values()]
all_feat_paths[dataset_name].update(set(text_paths))
all_feat_paths[dataset_name].add(root_feat / paths["raw_captions_path"])
if "dict_youtube_mapping_path" in paths:
all_feat_paths[dataset_name].add(
root_feat / paths["dict_youtube_mapping_path"])
for split_name in split_names:
split_paths = set(root_feat / x for x in
paths["subset_list_paths"][split_name].values())
all_feat_paths[dataset_name].update(split_paths)
for dataset_name, paths in all_feat_paths.items():
tar_include_list = Path("misc") / "datasets" / dataset_name / "tar_include.txt"
tar_include_list.parent.mkdir(exist_ok=True, parents=True)
with open(tar_include_list, "w") as f:
for path in sorted(paths):
print(f"Writing {path} to {tar_include_list}")
f.write(f"{path}\n")
@typechecked
def parse_geom_means_from_val_runs(log: List[str], group: str) -> List[float]:
"""TODO: Samuel - this is redundant due to log_summary() func in log_parser
should refactor after deadline.
"""
subset = "val"
# sanity check, should not be used for experiments with test sets
assert sum(["test_t2v" in x for x in log]) == 0, "should not parse test runs"
scores = {
"R1": defaultdict(list),
"R5": defaultdict(list),
"R10": defaultdict(list),
}
# Regex tag for finding the seed
seed_tag = "Setting experiment random seed to"
for row in log:
if seed_tag in row:
# Search for the log file entry describing the current random seed
match = re.search(seed_tag + " (\d+)$", row) # NOQA
assert len(match.groups()) == 1, "expected a single regex match"
current_seed = match.groups()[0]
if f"{subset}_{group}_metrics" in row:
tokens = row.split(" ")
for key in scores:
tag = f"{subset}_{group}_metrics_{key}:"
if tag in tokens:
pos = tokens.index(tag) + 1
val = tokens[pos]
val = float(val)
assert current_seed is not None, "failed to determine the seed"
scores[key][current_seed].append(val)
# keep last score
agg_scores = {key: [] for key in scores}
for metric, subdict in scores.items():
for seed, values in subdict.items():
agg_scores[metric].append(values[-1])
geometric_means = []
for r1, r5, r10 in zip(agg_scores["R1"], agg_scores["R5"], agg_scores["R10"]):
geometric_means.append(scipy.stats.mstats.gmean([r1, r5, r10]))
return geometric_means
def parse_log(log_path):
# import pdb; pdb.set_trace()
with open(log_path, "r") as f:
log = f.read().splitlines()
results = {}
for group in {"t2v", "v2t"}:
tag = f"[{group}] loaded log file"
results[group] = OrderedDict()
presence = [tag in row for row in log]
msg = f"expected single occurence of log tag, found {sum(presence)} in {log_path}"
assert sum(presence) == 1, msg
metrics = ["R1", "R5", "R10", "R50", "MedR", "MeanR"]
pos = np.where(presence)[0].item()
if "fixed training length" in log[pos + 2]:
pos += 3
else:
pos += 2
rows = log[pos: pos + len(metrics)]
for row, metric in zip(rows, metrics):
row = row.replace("INFO:summary:", "")
tokens = row.split(" ")
if tokens[-3] != f"{metric}:":
raise ValueError(f"Unexpteced log format [{row}]")
assert tokens[-3] == f"{metric}:", f"unexpected row format {row}"
mean, std = float(tokens[-2].split(",")[0]), float(tokens[-1])
results[group][metric] = (mean, std)
# geometric means are recomputed from summaries
tag = f"test_{group}_metrics_geometric_mean"
nan_tag = "INFO:summary:R1: nan"
matches = [x for x in log if tag in x]
if len(matches) in {1, 2, 3}:
geoms = [float(x.split()[-1].replace("INFO:summary:", "")) for x in matches]
if len(matches) < 3:
print(f"WARNING: Getting stds from {len(matches)} runs for {log_path}!")
elif sum([nan_tag in x for x in log]) > 0:
geoms = [np.nan, np.nan, np.nan]
else:
valid_exceptions = ["miechfeats-moee", "miech-ce", "jsfusion"]
msg = f"Did not expect fixed length training for {log_path}"
assert any([x in str(log_path) for x in valid_exceptions]), msg
geoms = parse_geom_means_from_val_runs(log, group=group)
if len(geoms) == 1:
std = np.nan
else:
std = np.std(geoms)
results[group]["geom"] = (round(np.mean(geoms),1), round(std, 1))
for row in log:
if "Trainable parameters" in row:
param_token = row.split(" ")[-1].replace("INFO:summary:", "")
results["params"] = int(param_token)
return results
@typechecked
def multiprocessing_parsing(exp_name: str, meta: list,
save_dir: Path, refresh_summaries: bool, teachText: bool, pickle_files: str):
if os.path.exists(Path(save_dir) / pickle_files / f'log_results_{exp_name}.pkl') is False or refresh_summaries is True:
group_id, timestamp = meta
_log_path = "log"
# if teachText:
# _log_path = "log"
if timestamp.startswith("TODO"):
log_results[exp_name] = {"timestamp": "TODO", "results": {}}
else:
seed_folder = sorted(os.listdir(Path(save_dir) / _log_path / Path(exp_name) / group_id))[0]
files_in_seed_folder = os.listdir(Path(save_dir) / _log_path / Path(exp_name) / group_id / seed_folder / Path(timestamp))
for file in files_in_seed_folder:
if ".json" in file and ".bak" not in file:
fname = file
break
rel_fname = Path(timestamp) / fname
rel_path = Path(exp_name) / group_id / seed_folder / rel_fname
log_path = Path(save_dir) / _log_path / rel_path
if refresh_summaries:
summarise(group_id=group_id, log_dir=Path(save_dir) / _log_path)
results = parse_log(log_path)
log_results = {"timestamp": timestamp, "results": results}
with open(Path(save_dir) / pickle_files / f'log_results_{exp_name}.pkl', 'wb') as f:
pickle.dump([log_results, fname, seed_folder], f)
print(f"Saved experiment {exp_name}")
else:
print(f"Experiment log_results_{exp_name}.pkl already saved")
@typechecked
def parse_results(
experiments: Dict[str, List[str]],
save_dir: Path,
refresh_summaries: bool,
teachText: bool,
) -> (Dict[str, Dict[str, Union[str, Dict]]],
dict, dict):
starttime = time.time()
processes = []
experiments_items = experiments.items()
pickle_files = "pickle_files"
if teachText:
pickle_files = "pickle_files_teachText"
if os.path.exists(Path(save_dir) / pickle_files) is False:
os.mkdir(Path(save_dir) / pickle_files)
# for exp_name, meta in experiments_items:
# p = multiprocessing.Process(target=multiprocessing_parsing,
# args=(exp_name, meta,
# save_dir, refresh_summaries, teachText, pickle_files))
# processes.append(p)
# p.start()
# for process in processes:
# process.join()
for exp_name, meta in experiments_items:
# import pdb; pdb.set_trace()
multiprocessing_parsing(exp_name, meta, save_dir, refresh_summaries, teachText, pickle_files)
print('That took {} seconds'.format(time.time() - starttime))
log_results = {}
fnames = {}
seed_folders = {}
for exp_name, _ in experiments_items:
with open(Path(save_dir) / pickle_files / f'log_results_{exp_name}.pkl',
'rb') as f:
log_results[exp_name],\
fnames[exp_name],\
seed_folders[exp_name] = pickle.load(f)
if not teachText:
with open(Path(save_dir) / 'log_results2.pkl', 'wb') as f:
pickle.dump([log_results, fnames, seed_folders], f)
else:
with open(Path(save_dir) / 'log_results_teachText.pkl', 'wb') as f:
pickle.dump([log_results, fnames, seed_folders], f)
return log_results, fnames, seed_folders
def generate_results_string(target, exp_name, results, latexify, drop=None):
stats = results[exp_name]["results"][target]
print(f"Filling template values for {exp_name}")
tokens = []
prepad = False
for metric, values in stats.items():
mean, std = values
if drop and metric in drop:
continue
print(f"{metric}: {mean} ({std})")
if latexify:
str_tokens = ["&$", f"{mean}_{{\\pm{std}}}$"]
if prepad:
str_tokens.insert(1, r"\prepad")
tokens.append(" ".join(str_tokens))
else:
tokens += [f"{mean}<sub>({std})</sub>"]
return small_font_str(tokens)
def generate_readme(
experiments: Dict[str, List[str]],
root_url: str,
readme_templates: List[Path],
readme_dests: List[Path],
results_path: Path,
latex_table_dir: Path,
save_dir: Path,
latexify: bool,
keep_mnr: bool,
refresh_summaries: bool,
results: Dict,
fnames: Dict,
seed_folders: Dict,
append_to_existing_readme: bool,
):
for readme_template, readme_dest in zip(readme_templates, readme_dests):
with open(readme_template, "r") as f:
readme = f.read().splitlines()
# insert sub-templates
full_readme = []
for row in readme:
regex = r"\<\<(.*?)\>\>"
matched = False
for match in re.finditer(regex, row):
matched = True
groups = match.groups()
assert len(groups) == 1, "expected single group"
subtemplate_path, src_name, dest_name = groups[0].split(":")
with open(subtemplate_path, "r") as f:
subtemplate = f.read().splitlines()
subrows = []
for subrow in subtemplate:
drop_subrow = False
subrow = subrow.replace(src_name, dest_name)
subrow = subrow.replace(src_name.upper(), dest_name.upper())
# Handle the missing audio modalities of MSVD
if dest_name == "msvd":
for tag in ("speech", "audio"):
# drop experiments for which the audio/speech features form
# the control variable
if f"-{tag}." in subrow:
print("skipping", subrow)
drop_subrow = True
break
# remove audio features from other experiments
subrow = subrow.replace(f"-{tag}", "")
if not drop_subrow:
subrows.append(subrow)
full_readme.extend(subrows)
if not matched:
full_readme.append(row)
generated = []
for row in full_readme:
edits = []
regex = r"\{\{(.*?)\}\}"
for match in re.finditer(regex, row):
groups = match.groups()
assert len(groups) == 1, "expected single group"
exp_name, target = groups[0].split(".")
if target.startswith("latexify"):
latex_link = gen_latex_version_of_table(
content=generated[:],
table_name=exp_name,
latex_table_dir=latex_table_dir,
)
token = f"[latex]({latex_link}) | | | | | | | |"
elif results[exp_name]["timestamp"] == "TODO":
token = "TODO"
elif target in {"config", "model", "log", "log_TT"}:
token = generate_url(root_url, target, exp_name,
experiments=experiments,
fnames=fnames,
seed_folders=seed_folders)
elif target in {"t2v", "v2t", "geomt2v", "geomv2t"}:
if not "geom" in target:
drop = {"geom"}
else:
drop = {}
target_ = target.split("geom")[-1]
token = generate_results_string(target_, exp_name, results,
drop=drop, latexify=latexify)
elif target in {"short-t2v", "short-v2t"}:
if keep_mnr:
drop = {"R50", "geom"}
else:
drop = {"R50", "MeanR", "geom"}
target_ = target.split("-")[1]
token = generate_results_string(target_, exp_name, results,
drop=drop, latexify=latexify)
elif target in {"params"}:
token = millify(results[exp_name]["results"]["params"], precision=2)
edits.append((match.span(), token))
if edits:
# invert the spans
spans = [(None, 0)] + [x[0] for x in edits] + [(len(row), None)]
inverse_spans = [(x[1], y[0]) for x, y in zip(spans, spans[1:])]
tokens = [row[start:stop] for start, stop in inverse_spans]
urls = [str(x[1]) for x in edits]
new_row = ""
for token, url in zip_longest(tokens, urls, fillvalue=""):
new_row += token + url
row = new_row
generated.append(row)
if not append_to_existing_readme:
with open(readme_dest, "w") as f:
f.write("\n".join(generated))
else:
with open(readme_dest, "a") as f:
f.write("\n".join(generated))
def parse_generate_readme(
experiments: Dict[str, List[str]],
root_url: str,
readme_templates: List[Path],
readme_dests: List[Path],
results_path: Path,
latex_table_dir: Path,
save_dir: Path,
latexify: bool,
keep_mnr: bool,
refresh_summaries: bool,
drop_experiments_hq: bool,
results_path_teachText: Path,
experiments_teachText: Dict[str, List[str]],
teachText_template: Path,
):
results, fnames, seed_folders = parse_results(experiments=experiments,
save_dir=save_dir,
refresh_summaries=refresh_summaries,
teachText=False,
)
append_to_existing_readme=False
with open(results_path, "w") as f:
json.dump(results, f, indent=4, sort_keys=False)
if not drop_experiments_hq:
results_teachText, fnames_teachText, seed_folders_teachText = parse_results(experiments=experiments_teachText,
save_dir=save_dir,
refresh_summaries=refresh_summaries,
teachText=True,
)
with open(results_path_teachText, "w") as f:
json.dump(results, f, indent=4, sort_keys=False)
generate_readme(experiments=experiments_teachText,
root_url=root_url,
readme_templates=[teachText_template],
readme_dests=readme_dests,
results_path=results_path_teachText,
latex_table_dir=latex_table_dir,
save_dir=save_dir,
latexify=latexify,
keep_mnr=keep_mnr,
refresh_summaries=refresh_summaries,
results=results_teachText,
fnames=fnames_teachText,
seed_folders=seed_folders_teachText,
append_to_existing_readme=False,
)
append_to_existing_readme=True
generate_readme(experiments=experiments,
root_url=root_url,
readme_templates=readme_templates,
readme_dests=readme_dests,
results_path=results_path,
latex_table_dir=latex_table_dir,
save_dir=save_dir,
latexify=latexify,
keep_mnr=keep_mnr,
refresh_summaries=refresh_summaries,
results=results,
fnames=fnames,
seed_folders=seed_folders,
append_to_existing_readme=append_to_existing_readme,
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--save_dir", default="data/saved", type=Path)
parser.add_argument("--webserver", default="login.robots.ox.ac.uk")
parser.add_argument("--results_path", default="misc/results.json", type=Path)
parser.add_argument("--results_path_teachText", default="misc/results_teachText.json", type=Path)
parser.add_argument("--experiments_path", default="misc/experiments.json")
parser.add_argument("--experiments_teachText", default="misc/experiments_teachText.json")
parser.add_argument("--readme_template", default="misc/README-template.md")
parser.add_argument("--teachText_template", default="misc/README-teachText.md")
parser.add_argument("--latexify", action="store_true")
parser.add_argument("--drop_experiments_hq", action="store_true")
parser.add_argument("--keep_mnr", action="store_true")
parser.add_argument("--refresh_summaries", action="store_true")
parser.add_argument("--readme_dest", default="README.md")
parser.add_argument("--latex_table_dir", default="latex-tables", type=Path)
parser.add_argument("--ablation_readme_dest", default="misc/ablations.md")
parser.add_argument("--challenge_readme_dest", default="misc/challenge.md")
parser.add_argument("--ablation_readme_template",
default="misc/ablations-template.md")
parser.add_argument("--challenge_readme_template",
default="misc/README-challenge-template.md")
parser.add_argument("--task", default="generate_readme",
choices=["sync_files", "generate_readme"])
parser.add_argument(
"--web_dir",
default="/projects/vgg/vgg/WWW/research/collaborative-experts/data",
)
parser.add_argument(
"--root_url",
default="http://www.robots.ox.ac.uk/~vgg/research/collaborative-experts/data",
)
parser.add_argument("--only_one_readme", action="store_true")
args = parser.parse_args()
with open(args.experiments_path, "r") as f:
experiments = json.load(f)
with open(args.experiments_teachText, 'r') as f:
experiments_teachText = json.load(f)
if args.task == "sync_files":
sync_files(
web_dir=args.web_dir,
save_dir=args.save_dir,
webserver=args.webserver,
experiments=experiments,
)
elif args.task == "generate_readme":
readme_dests = [
args.readme_dest,
args.ablation_readme_dest,
args.challenge_readme_dest,
]
readme_templates = [
args.readme_template,
args.ablation_readme_template,
args.challenge_readme_template,
]
if args.only_one_readme is True:
readme_dests = [
args.readme_dest,
]
readme_templates = [
args.readme_template,
]
parse_generate_readme(
root_url=args.root_url,
save_dir=args.save_dir,
latexify=args.latexify,
experiments=experiments,
latex_table_dir=args.latex_table_dir,
keep_mnr=args.keep_mnr,
readme_dests=readme_dests,
results_path=args.results_path,
readme_templates=readme_templates,
refresh_summaries=args.refresh_summaries,
drop_experiments_hq=args.drop_experiments_hq,
results_path_teachText=args.results_path_teachText,
experiments_teachText=experiments_teachText,
teachText_template=args.teachText_template,
)
if __name__ == "__main__":
main()
| """A small utility for filling in the README paths to experiment artifacts.
The template contains tags of the form {{filetype.experiment_name}}, which are then
replaced with the urls for each resource.
Note: print a summary of the most recent results for a given dataset via:
python find_latest_checkpoints.py --dataset audiocaps
"""
import argparse
import glob
import importlib
import json
import multiprocessing
import os
import pickle
import re
import subprocess
import time
from collections import OrderedDict, defaultdict
from itertools import zip_longest
from pathlib import Path
from typing import Dict, List, Tuple, Union
import numpy as np
import pylatex
import scipy.stats
import tqdm
from millify import millify
from typeguard import typechecked
from aggregate_logs_and_stats import summarise
@typechecked
def gen_latex_version_of_table(
latex_table_dir: Path,
content: List[str],
table_name: str,
branch_name: str = "dev",
) -> Path:
msg = "Expected latexify tag to be placed directly following a table"
assert content[-1].startswith("|"), msg
num_table_rows = [x.startswith("|") for x in reversed(content)].index(False)
assert num_table_rows > 2, "expected at least three table rows (including header)"
markdown_table = list(reversed(content[-1:-(num_table_rows + 1):-1]))
col_names = [x.strip() for x in markdown_table[0].split("|")[1:-1]]
# remove last column of links
remove_links = col_names[-1].lower() == "links"
if remove_links:
col_names.pop()
cols = "|".join(["c" for _ in range(len(col_names))])
table = pylatex.Tabular(cols)
table.add_hline()
table.add_hline()
table.add_row(tuple(col_names))
table.add_hline()
for row in markdown_table[2:]:
tokens = [x.strip() for x in row.split("|")[1:-1]]
if remove_links:
tokens.pop()
row_contents = []
for token in tokens:
mean_regexp = r"<sub><sup>([0-9]+[.][0-9]+)<sub>"
# std_regexp = r"<sub>\(([0-9]+[.][0-9]+|[a-z]+)\)<\/sub>"
std_regexp = r"<sub>\(([0-9]+[.][0-9]+e*-*[0-9]*|[a-z]+|)\)<\/sub>"
mean_strs = re.findall(mean_regexp, token)
if mean_strs:
assert len(mean_strs) == 1, "expected a unique mean specifier"
std_strs = re.findall(std_regexp, token)
assert len(std_strs) == 1, "expected a unique std specifier"
mean_str, std_str = mean_strs[0], std_strs[0]
raw_str = "$" + mean_str + r"_{\pm" + std_str + r"}$"
token = pylatex.NoEscape(raw_str)
row_contents.append(token)
table.add_row(tuple(row_contents))
table.add_hline()
latex_str = table.dumps()
latex_table_dir.mkdir(exist_ok=True, parents=True)
dest_path = latex_table_dir / f"{table_name}.txt"
with open(dest_path, "w") as f:
f.write(latex_str)
github_project_root = f"/../../tree/{branch_name}/"
markdown_link = Path(f"{github_project_root}{dest_path}")
return markdown_link
@typechecked
def generate_url(root_url: str, target: str,
exp_name: str, experiments: Dict,
fnames: dict, seed_folders: dict) -> str:
path_store = {
"log": {"parent": "log", "fname": fnames[exp_name]},
"log_TT": {"parent": "logTT", "fname": fnames[exp_name]},
"config": {"parent": "models", "fname": "config.json"},
"model": {"parent": "models", "fname": "trained_model.pth"}
}
paths = path_store[target]
group_id, timestamp = experiments[exp_name]
rel_path = Path(group_id) / seed_folders[exp_name] / timestamp / paths["fname"]
return str(Path(root_url) / paths["parent"] / exp_name / rel_path)
def small_font_str(tokens):
tokens = [f"<sub><sup>{x}</sup></sub>" for x in tokens]
return " | ".join(tokens)
def sync_files(experiments, save_dir, webserver, web_dir):
filetypes = {
"log": ["summary-seed-1_seed-2_seed-3.json"],
"log_TT": ["summary-seed-1_seed-2_seed-3.json"],
"models": ["trained_model.pth", "config.json"]
}
for key, (group_id, timestamp) in experiments.items():
# copy experiment artifacts
for filetype, fnames in filetypes.items():
for fname in fnames:
if timestamp.startswith("TODO"):
continue
rel_path = Path(group_id) / "seed-1" / timestamp / fname
local_path = Path(save_dir) / filetype / key / rel_path
server_path = Path(web_dir).expanduser() / filetype / key / rel_path
if not local_path.exists() and "/log/" in str(local_path):
# try historical logs
old, new = "/log/", "/log-includes-some-final-exps/"
local_path = Path(str(local_path).replace(old, new))
msg = f"neither original log nor historical data exist ({local_path})"
assert local_path.exists(), msg
dest = f"{webserver}:{str(server_path)}"
print(f"{key} -> {webserver} [{local_path} -> {server_path}]")
subprocess.call(["ssh", webserver, "mkdir -p", str(server_path.parent)])
rsync_args = ["rsync", "-hvrPt", str(local_path), dest]
print(f"running command {' '.join(rsync_args)}")
subprocess.call(rsync_args)
@typechecked
def model_specs2path(feat_aggregation: Dict, keep: set, tag: str = None) -> List[Path]:
feat_paths = []
for model_spec, aggs in feat_aggregation.items():
if model_spec not in keep:
continue
feat_type, model_name, _ = model_spec.split(".")
base = f"aggregated_{feat_type.replace('-', '_')}"
required = ("fps", "pixel_dim", "stride")
fps, pixel_dim, stride = [aggs.get(x, None) for x in required]
if feat_type in {"facecrops", "faceboxes"}:
base = f"{base}_{fps}fps_{pixel_dim}px_stride{stride}"
elif feat_type not in {"ocr", "speech", "audio", "pann", "syncnet", "vggsound"}:
base = f"{base}_{fps}fps_{pixel_dim}px_stride{stride}"
for option in "offset", "inner_stride", "num_segments":
if aggs.get(option, None) is not None:
base += f"_{option}{aggs[option]}"
for agg in aggs["temporal"].split("-"):
fname = f"{model_name}-{agg}"
if aggs["type"] == "logits":
fname = f"{fname}-logits"
if tag is not None:
fname += f"-{tag}"
feat_paths.append(Path(base) / f"{fname}.pickle")
return feat_paths
@typechecked
def dataset_paths(
dataset: str
) -> Tuple[Path, Dict[str, Union[str, List[str], Dict, Path]]]:
name_map = {
"activity-net": "ActivityNet",
"queryd": "QuerYD",
"querydsegments": "QuerYDSegments",
"clotho": "CLOTHO",
"audiocaps": "AudioCaps"
}
if dataset in set(name_map.values()):
class_name = dataset
else:
class_name = name_map[dataset]
mod = importlib.import_module(f"data_loader.{class_name}_dataset")
get_dataset_paths = getattr(getattr(mod, class_name), "dataset_paths")
if dataset == "activity-net":
data_dir = dataset
else:
data_dir = class_name
root_feat = Path(f"data/{data_dir}/structured-symlinks")
paths = get_dataset_paths()
return root_feat, paths
def generate_tar_lists(save_dir, experiments):
all_feat_paths = {}
for exp_name, (group_id, timestamp) in tqdm.tqdm(experiments.items()):
rel_path = Path(group_id) / "seed-1" / timestamp / "config.json"
config_path = Path(save_dir) / "models" / exp_name / rel_path
with open(config_path, "r") as f:
config = json.load(f)
feat_aggregation = config["data_loader"]["args"]["feat_aggregation"]
dataset_name = exp_name.split("-train")[0]
if dataset_name not in all_feat_paths:
all_feat_paths[dataset_name] = set()
split_names = [config["data_loader"]["args"]["split_name"]]
if "eval_settings" in config and config["eval_settings"]:
test_split = config["eval_settings"]["data_loader"]["args"]["split_name"]
split_names.append(test_split)
keep = set(config["experts"]["modalities"])
text_feat = config["experts"]["text_feat"]
root_feat, paths = dataset_paths(dataset_name)
modern_feat_agg = {key: val for key, val in feat_aggregation.items()
if key in paths["feature_names"]}
feat_paths = model_specs2path(modern_feat_agg, keep)
all_feat_paths[dataset_name].update({root_feat / x for x in feat_paths})
for key, feat_list in paths["custom_paths"].items():
for feat_path in feat_list:
all_feat_paths[dataset_name].add(root_feat / feat_path)
text_paths = [root_feat / rel_path for rel_path in
paths["text_feat_paths"][text_feat].values()]
all_feat_paths[dataset_name].update(set(text_paths))
all_feat_paths[dataset_name].add(root_feat / paths["raw_captions_path"])
if "dict_youtube_mapping_path" in paths:
all_feat_paths[dataset_name].add(
root_feat / paths["dict_youtube_mapping_path"])
for split_name in split_names:
split_paths = set(root_feat / x for x in
paths["subset_list_paths"][split_name].values())
all_feat_paths[dataset_name].update(split_paths)
for dataset_name, paths in all_feat_paths.items():
tar_include_list = Path("misc") / "datasets" / dataset_name / "tar_include.txt"
tar_include_list.parent.mkdir(exist_ok=True, parents=True)
with open(tar_include_list, "w") as f:
for path in sorted(paths):
print(f"Writing {path} to {tar_include_list}")
f.write(f"{path}\n")
@typechecked
def parse_geom_means_from_val_runs(log: List[str], group: str) -> List[float]:
"""TODO: Samuel - this is redundant due to log_summary() func in log_parser
should refactor after deadline.
"""
subset = "val"
# sanity check, should not be used for experiments with test sets
assert sum(["test_t2v" in x for x in log]) == 0, "should not parse test runs"
scores = {
"R1": defaultdict(list),
"R5": defaultdict(list),
"R10": defaultdict(list),
}
# Regex tag for finding the seed
seed_tag = "Setting experiment random seed to"
for row in log:
if seed_tag in row:
# Search for the log file entry describing the current random seed
match = re.search(seed_tag + " (\d+)$", row) # NOQA
assert len(match.groups()) == 1, "expected a single regex match"
current_seed = match.groups()[0]
if f"{subset}_{group}_metrics" in row:
tokens = row.split(" ")
for key in scores:
tag = f"{subset}_{group}_metrics_{key}:"
if tag in tokens:
pos = tokens.index(tag) + 1
val = tokens[pos]
val = float(val)
assert current_seed is not None, "failed to determine the seed"
scores[key][current_seed].append(val)
# keep last score
agg_scores = {key: [] for key in scores}
for metric, subdict in scores.items():
for seed, values in subdict.items():
agg_scores[metric].append(values[-1])
geometric_means = []
for r1, r5, r10 in zip(agg_scores["R1"], agg_scores["R5"], agg_scores["R10"]):
geometric_means.append(scipy.stats.mstats.gmean([r1, r5, r10]))
return geometric_means
def parse_log(log_path):
# import pdb; pdb.set_trace()
with open(log_path, "r") as f:
log = f.read().splitlines()
results = {}
for group in {"t2v", "v2t"}:
tag = f"[{group}] loaded log file"
results[group] = OrderedDict()
presence = [tag in row for row in log]
msg = f"expected single occurence of log tag, found {sum(presence)} in {log_path}"
assert sum(presence) == 1, msg
metrics = ["R1", "R5", "R10", "R50", "MedR", "MeanR"]
pos = np.where(presence)[0].item()
if "fixed training length" in log[pos + 2]:
pos += 3
else:
pos += 2
rows = log[pos: pos + len(metrics)]
for row, metric in zip(rows, metrics):
row = row.replace("INFO:summary:", "")
tokens = row.split(" ")
if tokens[-3] != f"{metric}:":
raise ValueError(f"Unexpteced log format [{row}]")
assert tokens[-3] == f"{metric}:", f"unexpected row format {row}"
mean, std = float(tokens[-2].split(",")[0]), float(tokens[-1])
results[group][metric] = (mean, std)
# geometric means are recomputed from summaries
tag = f"test_{group}_metrics_geometric_mean"
nan_tag = "INFO:summary:R1: nan"
matches = [x for x in log if tag in x]
if len(matches) in {1, 2, 3}:
geoms = [float(x.split()[-1].replace("INFO:summary:", "")) for x in matches]
if len(matches) < 3:
print(f"WARNING: Getting stds from {len(matches)} runs for {log_path}!")
elif sum([nan_tag in x for x in log]) > 0:
geoms = [np.nan, np.nan, np.nan]
else:
valid_exceptions = ["miechfeats-moee", "miech-ce", "jsfusion"]
msg = f"Did not expect fixed length training for {log_path}"
assert any([x in str(log_path) for x in valid_exceptions]), msg
geoms = parse_geom_means_from_val_runs(log, group=group)
if len(geoms) == 1:
std = np.nan
else:
std = np.std(geoms)
results[group]["geom"] = (round(np.mean(geoms),1), round(std, 1))
for row in log:
if "Trainable parameters" in row:
param_token = row.split(" ")[-1].replace("INFO:summary:", "")
results["params"] = int(param_token)
return results
@typechecked
def multiprocessing_parsing(exp_name: str, meta: list,
save_dir: Path, refresh_summaries: bool, teachText: bool, pickle_files: str):
if os.path.exists(Path(save_dir) / pickle_files / f'log_results_{exp_name}.pkl') is False or refresh_summaries is True:
group_id, timestamp = meta
_log_path = "log"
# if teachText:
# _log_path = "log"
if timestamp.startswith("TODO"):
log_results[exp_name] = {"timestamp": "TODO", "results": {}}
else:
seed_folder = sorted(os.listdir(Path(save_dir) / _log_path / Path(exp_name) / group_id))[0]
files_in_seed_folder = os.listdir(Path(save_dir) / _log_path / Path(exp_name) / group_id / seed_folder / Path(timestamp))
for file in files_in_seed_folder:
if ".json" in file and ".bak" not in file:
fname = file
break
rel_fname = Path(timestamp) / fname
rel_path = Path(exp_name) / group_id / seed_folder / rel_fname
log_path = Path(save_dir) / _log_path / rel_path
if refresh_summaries:
summarise(group_id=group_id, log_dir=Path(save_dir) / _log_path)
results = parse_log(log_path)
log_results = {"timestamp": timestamp, "results": results}
with open(Path(save_dir) / pickle_files / f'log_results_{exp_name}.pkl', 'wb') as f:
pickle.dump([log_results, fname, seed_folder], f)
print(f"Saved experiment {exp_name}")
else:
print(f"Experiment log_results_{exp_name}.pkl already saved")
@typechecked
def parse_results(
experiments: Dict[str, List[str]],
save_dir: Path,
refresh_summaries: bool,
teachText: bool,
) -> (Dict[str, Dict[str, Union[str, Dict]]],
dict, dict):
starttime = time.time()
processes = []
experiments_items = experiments.items()
pickle_files = "pickle_files"
if teachText:
pickle_files = "pickle_files_teachText"
if os.path.exists(Path(save_dir) / pickle_files) is False:
os.mkdir(Path(save_dir) / pickle_files)
# for exp_name, meta in experiments_items:
# p = multiprocessing.Process(target=multiprocessing_parsing,
# args=(exp_name, meta,
# save_dir, refresh_summaries, teachText, pickle_files))
# processes.append(p)
# p.start()
# for process in processes:
# process.join()
for exp_name, meta in experiments_items:
# import pdb; pdb.set_trace()
multiprocessing_parsing(exp_name, meta, save_dir, refresh_summaries, teachText, pickle_files)
print('That took {} seconds'.format(time.time() - starttime))
log_results = {}
fnames = {}
seed_folders = {}
for exp_name, _ in experiments_items:
with open(Path(save_dir) / pickle_files / f'log_results_{exp_name}.pkl',
'rb') as f:
log_results[exp_name],\
fnames[exp_name],\
seed_folders[exp_name] = pickle.load(f)
if not teachText:
with open(Path(save_dir) / 'log_results2.pkl', 'wb') as f:
pickle.dump([log_results, fnames, seed_folders], f)
else:
with open(Path(save_dir) / 'log_results_teachText.pkl', 'wb') as f:
pickle.dump([log_results, fnames, seed_folders], f)
return log_results, fnames, seed_folders
def generate_results_string(target, exp_name, results, latexify, drop=None):
stats = results[exp_name]["results"][target]
print(f"Filling template values for {exp_name}")
tokens = []
prepad = False
for metric, values in stats.items():
mean, std = values
if drop and metric in drop:
continue
print(f"{metric}: {mean} ({std})")
if latexify:
str_tokens = ["&$", f"{mean}_{{\\pm{std}}}$"]
if prepad:
str_tokens.insert(1, r"\prepad")
tokens.append(" ".join(str_tokens))
else:
tokens += [f"{mean}<sub>({std})</sub>"]
return small_font_str(tokens)
def generate_readme(
experiments: Dict[str, List[str]],
root_url: str,
readme_templates: List[Path],
readme_dests: List[Path],
results_path: Path,
latex_table_dir: Path,
save_dir: Path,
latexify: bool,
keep_mnr: bool,
refresh_summaries: bool,
results: Dict,
fnames: Dict,
seed_folders: Dict,
append_to_existing_readme: bool,
):
for readme_template, readme_dest in zip(readme_templates, readme_dests):
with open(readme_template, "r") as f:
readme = f.read().splitlines()
# insert sub-templates
full_readme = []
for row in readme:
regex = r"\<\<(.*?)\>\>"
matched = False
for match in re.finditer(regex, row):
matched = True
groups = match.groups()
assert len(groups) == 1, "expected single group"
subtemplate_path, src_name, dest_name = groups[0].split(":")
with open(subtemplate_path, "r") as f:
subtemplate = f.read().splitlines()
subrows = []
for subrow in subtemplate:
drop_subrow = False
subrow = subrow.replace(src_name, dest_name)
subrow = subrow.replace(src_name.upper(), dest_name.upper())
# Handle the missing audio modalities of MSVD
if dest_name == "msvd":
for tag in ("speech", "audio"):
# drop experiments for which the audio/speech features form
# the control variable
if f"-{tag}." in subrow:
print("skipping", subrow)
drop_subrow = True
break
# remove audio features from other experiments
subrow = subrow.replace(f"-{tag}", "")
if not drop_subrow:
subrows.append(subrow)
full_readme.extend(subrows)
if not matched:
full_readme.append(row)
generated = []
for row in full_readme:
edits = []
regex = r"\{\{(.*?)\}\}"
for match in re.finditer(regex, row):
groups = match.groups()
assert len(groups) == 1, "expected single group"
exp_name, target = groups[0].split(".")
if target.startswith("latexify"):
latex_link = gen_latex_version_of_table(
content=generated[:],
table_name=exp_name,
latex_table_dir=latex_table_dir,
)
token = f"[latex]({latex_link}) | | | | | | | |"
elif results[exp_name]["timestamp"] == "TODO":
token = "TODO"
elif target in {"config", "model", "log", "log_TT"}:
token = generate_url(root_url, target, exp_name,
experiments=experiments,
fnames=fnames,
seed_folders=seed_folders)
elif target in {"t2v", "v2t", "geomt2v", "geomv2t"}:
if not "geom" in target:
drop = {"geom"}
else:
drop = {}
target_ = target.split("geom")[-1]
token = generate_results_string(target_, exp_name, results,
drop=drop, latexify=latexify)
elif target in {"short-t2v", "short-v2t"}:
if keep_mnr:
drop = {"R50", "geom"}
else:
drop = {"R50", "MeanR", "geom"}
target_ = target.split("-")[1]
token = generate_results_string(target_, exp_name, results,
drop=drop, latexify=latexify)
elif target in {"params"}:
token = millify(results[exp_name]["results"]["params"], precision=2)
edits.append((match.span(), token))
if edits:
# invert the spans
spans = [(None, 0)] + [x[0] for x in edits] + [(len(row), None)]
inverse_spans = [(x[1], y[0]) for x, y in zip(spans, spans[1:])]
tokens = [row[start:stop] for start, stop in inverse_spans]
urls = [str(x[1]) for x in edits]
new_row = ""
for token, url in zip_longest(tokens, urls, fillvalue=""):
new_row += token + url
row = new_row
generated.append(row)
if not append_to_existing_readme:
with open(readme_dest, "w") as f:
f.write("\n".join(generated))
else:
with open(readme_dest, "a") as f:
f.write("\n".join(generated))
def parse_generate_readme(
experiments: Dict[str, List[str]],
root_url: str,
readme_templates: List[Path],
readme_dests: List[Path],
results_path: Path,
latex_table_dir: Path,
save_dir: Path,
latexify: bool,
keep_mnr: bool,
refresh_summaries: bool,
drop_experiments_hq: bool,
results_path_teachText: Path,
experiments_teachText: Dict[str, List[str]],
teachText_template: Path,
):
results, fnames, seed_folders = parse_results(experiments=experiments,
save_dir=save_dir,
refresh_summaries=refresh_summaries,
teachText=False,
)
append_to_existing_readme=False
with open(results_path, "w") as f:
json.dump(results, f, indent=4, sort_keys=False)
if not drop_experiments_hq:
results_teachText, fnames_teachText, seed_folders_teachText = parse_results(experiments=experiments_teachText,
save_dir=save_dir,
refresh_summaries=refresh_summaries,
teachText=True,
)
with open(results_path_teachText, "w") as f:
json.dump(results, f, indent=4, sort_keys=False)
generate_readme(experiments=experiments_teachText,
root_url=root_url,
readme_templates=[teachText_template],
readme_dests=readme_dests,
results_path=results_path_teachText,
latex_table_dir=latex_table_dir,
save_dir=save_dir,
latexify=latexify,
keep_mnr=keep_mnr,
refresh_summaries=refresh_summaries,
results=results_teachText,
fnames=fnames_teachText,
seed_folders=seed_folders_teachText,
append_to_existing_readme=False,
)
append_to_existing_readme=True
generate_readme(experiments=experiments,
root_url=root_url,
readme_templates=readme_templates,
readme_dests=readme_dests,
results_path=results_path,
latex_table_dir=latex_table_dir,
save_dir=save_dir,
latexify=latexify,
keep_mnr=keep_mnr,
refresh_summaries=refresh_summaries,
results=results,
fnames=fnames,
seed_folders=seed_folders,
append_to_existing_readme=append_to_existing_readme,
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--save_dir", default="data/saved", type=Path)
parser.add_argument("--webserver", default="login.robots.ox.ac.uk")
parser.add_argument("--results_path", default="misc/results.json", type=Path)
parser.add_argument("--results_path_teachText", default="misc/results_teachText.json", type=Path)
parser.add_argument("--experiments_path", default="misc/experiments.json")
parser.add_argument("--experiments_teachText", default="misc/experiments_teachText.json")
parser.add_argument("--readme_template", default="misc/README-template.md")
parser.add_argument("--teachText_template", default="misc/README-teachText.md")
parser.add_argument("--latexify", action="store_true")
parser.add_argument("--drop_experiments_hq", action="store_true")
parser.add_argument("--keep_mnr", action="store_true")
parser.add_argument("--refresh_summaries", action="store_true")
parser.add_argument("--readme_dest", default="README.md")
parser.add_argument("--latex_table_dir", default="latex-tables", type=Path)
parser.add_argument("--ablation_readme_dest", default="misc/ablations.md")
parser.add_argument("--challenge_readme_dest", default="misc/challenge.md")
parser.add_argument("--ablation_readme_template",
default="misc/ablations-template.md")
parser.add_argument("--challenge_readme_template",
default="misc/README-challenge-template.md")
parser.add_argument("--task", default="generate_readme",
choices=["sync_files", "generate_readme"])
parser.add_argument(
"--web_dir",
default="/projects/vgg/vgg/WWW/research/collaborative-experts/data",
)
parser.add_argument(
"--root_url",
default="http://www.robots.ox.ac.uk/~vgg/research/collaborative-experts/data",
)
parser.add_argument("--only_one_readme", action="store_true")
args = parser.parse_args()
with open(args.experiments_path, "r") as f:
experiments = json.load(f)
with open(args.experiments_teachText, 'r') as f:
experiments_teachText = json.load(f)
if args.task == "sync_files":
sync_files(
web_dir=args.web_dir,
save_dir=args.save_dir,
webserver=args.webserver,
experiments=experiments,
)
elif args.task == "generate_readme":
readme_dests = [
args.readme_dest,
args.ablation_readme_dest,
args.challenge_readme_dest,
]
readme_templates = [
args.readme_template,
args.ablation_readme_template,
args.challenge_readme_template,
]
if args.only_one_readme is True:
readme_dests = [
args.readme_dest,
]
readme_templates = [
args.readme_template,
]
parse_generate_readme(
root_url=args.root_url,
save_dir=args.save_dir,
latexify=args.latexify,
experiments=experiments,
latex_table_dir=args.latex_table_dir,
keep_mnr=args.keep_mnr,
readme_dests=readme_dests,
results_path=args.results_path,
readme_templates=readme_templates,
refresh_summaries=args.refresh_summaries,
drop_experiments_hq=args.drop_experiments_hq,
results_path_teachText=args.results_path_teachText,
experiments_teachText=experiments_teachText,
teachText_template=args.teachText_template,
)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# There are tests here with unicode string literals and
# identifiers. There's a code in ast.c that was added because of a
# failure with a non-ascii-only expression. So, I have tests for
# that. There are workarounds that would let me run tests for that
# code without unicode identifiers and strings, but just using them
# directly seems like the easiest and therefore safest thing to do.
# Unicode identifiers in tests is allowed by PEP 3131.
import ast
import os
import re
import types
import decimal
import unittest
from test.support.os_helper import temp_cwd
from test.support.script_helper import assert_python_failure
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# values with assertRaisesRegex, but without it it's way too easy to
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
# worthwhile tradeoff. When I switched to this method, I found many
# examples where I wasn't testing what I thought I was.
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
# it).
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
# This is how __format__ is actually called.
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
# Inspired by http://bugs.python.org/issue24975
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
# Make sure x was not called.
self.assertFalse(x.called)
# Actually run the code.
exec(c)
# Make sure x was called.
self.assertTrue(x.called)
def test_ast_line_numbers(self):
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiple_formattedvalues(self):
expr = """
f'no formatted values'
f'eggs {a * x()} spam {b + y()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `f'no formatted value'`
self.assertEqual(type(t.body[0]), ast.Expr)
self.assertEqual(type(t.body[0].value), ast.JoinedStr)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 4)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(type(t.body[1].value.values[3]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
# check the first binop location
binop1 = t.body[1].value.values[1].value
self.assertEqual(type(binop1), ast.BinOp)
self.assertEqual(type(binop1.left), ast.Name)
self.assertEqual(type(binop1.op), ast.Mult)
self.assertEqual(type(binop1.right), ast.Call)
self.assertEqual(binop1.lineno, 3)
self.assertEqual(binop1.left.lineno, 3)
self.assertEqual(binop1.right.lineno, 3)
self.assertEqual(binop1.col_offset, 8)
self.assertEqual(binop1.left.col_offset, 8)
self.assertEqual(binop1.right.col_offset, 12)
# check the second binop location
binop2 = t.body[1].value.values[3].value
self.assertEqual(type(binop2), ast.BinOp)
self.assertEqual(type(binop2.left), ast.Name)
self.assertEqual(type(binop2.op), ast.Add)
self.assertEqual(type(binop2.right), ast.Call)
self.assertEqual(binop2.lineno, 3)
self.assertEqual(binop2.left.lineno, 3)
self.assertEqual(binop2.right.lineno, 3)
self.assertEqual(binop2.col_offset, 23)
self.assertEqual(binop2.left.col_offset, 23)
self.assertEqual(binop2.right.col_offset, 27)
def test_ast_line_numbers_nested(self):
expr = """
a = 10
f'{a * f'-{x()}-'}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.JoinedStr)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the nested call location
self.assertEqual(len(binop.right.values), 3)
self.assertEqual(type(binop.right.values[0]), ast.Constant)
self.assertEqual(type(binop.right.values[0].value), str)
self.assertEqual(type(binop.right.values[1]), ast.FormattedValue)
self.assertEqual(type(binop.right.values[2]), ast.Constant)
self.assertEqual(type(binop.right.values[2].value), str)
self.assertEqual(binop.right.values[0].lineno, 3)
self.assertEqual(binop.right.values[1].lineno, 3)
self.assertEqual(binop.right.values[2].lineno, 3)
call = binop.right.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.col_offset, 11)
def test_ast_line_numbers_duplicate_expression(self):
"""Duplicate expression
NOTE: this is currently broken, always sets location of the first
expression.
"""
expr = """
a = 10
f'{a * x()} {a * x()} {a * x()}'
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 5)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[1]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[1].value), str)
self.assertEqual(type(t.body[1].value.values[2]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[3]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[3].value), str)
self.assertEqual(type(t.body[1].value.values[4]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
self.assertEqual(t.body[1].value.values[4].lineno, 3)
# check the first binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the second binop location
binop = t.body[1].value.values[2].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
# check the third binop location
binop = t.body[1].value.values[4].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
def test_ast_line_numbers_multiline_fstring(self):
# See bpo-30465 for details.
expr = """
a = 10
f'''
{a
*
x()}
non-important content
'''
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 3)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].col_offset, 0)
self.assertEqual(t.body[1].value.col_offset, 0)
self.assertEqual(t.body[1].value.values[0].col_offset, 0)
self.assertEqual(t.body[1].value.values[1].col_offset, 0)
self.assertEqual(t.body[1].value.values[2].col_offset, 0)
# NOTE: the following lineno information and col_offset is correct for
# expressions within FormattedValues.
binop = t.body[1].value.values[1].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 4)
self.assertEqual(binop.left.lineno, 4)
self.assertEqual(binop.right.lineno, 6)
self.assertEqual(binop.col_offset, 4)
self.assertEqual(binop.left.col_offset, 4)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_with_parentheses(self):
expr = """
x = (
f" {test(t)}"
)"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 1)
# check the test(t) location
call = t.body[0].value.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.end_lineno, 3)
self.assertEqual(call.col_offset, 8)
self.assertEqual(call.end_col_offset, 15)
expr = """
x = (
'PERL_MM_OPT', (
f'wat'
f'some_string={f(x)} '
f'wat'
),
)
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 1)
# check the fstring
fstring = t.body[0].value.elts[1]
self.assertEqual(type(fstring), ast.JoinedStr)
self.assertEqual(len(fstring.values), 3)
wat1, middle, wat2 = fstring.values
# check the first wat
self.assertEqual(type(wat1), ast.Constant)
self.assertEqual(wat1.lineno, 4)
self.assertEqual(wat1.end_lineno, 6)
self.assertEqual(wat1.col_offset, 12)
self.assertEqual(wat1.end_col_offset, 18)
# check the call
call = middle.value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 5)
self.assertEqual(call.end_lineno, 5)
self.assertEqual(call.col_offset, 27)
self.assertEqual(call.end_col_offset, 31)
# check the second wat
self.assertEqual(type(wat2), ast.Constant)
self.assertEqual(wat2.lineno, 4)
self.assertEqual(wat2.end_lineno, 6)
self.assertEqual(wat2.col_offset, 12)
self.assertEqual(wat2.end_col_offset, 18)
def test_docstring(self):
def f():
f'''Not a docstring'''
self.assertIsNone(f.__doc__)
def g():
'''Not a docstring''' \
f''
self.assertIsNone(g.__doc__)
def test_literal_eval(self):
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x'")
def test_ast_compile_time_concat(self):
x = ['']
expr = """x[0] = 'foo' f'{3}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_compile_time_concat_errors(self):
self.assertAllRaise(SyntaxError,
'cannot mix bytes and nonbytes literals',
[r"""f'' b''""",
r"""b'' f''""",
])
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
[r"""f'{"x'""",
r"""f'{"x}'""",
r"""f'{("x'""",
r"""f'{("x}'""",
])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{((}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\)' "
r"does not match opening parenthesis '\['",
["f'{a[4)}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\]' "
r"does not match opening parenthesis '\('",
["f'{a(4]}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\['",
["f'{a[4}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{a(4}'",
])
self.assertRaises(SyntaxError, eval, "f'{' + '('*500 + '}'")
def test_double_braces(self):
self.assertEqual(f'{{', '{')
self.assertEqual(f'a{{', 'a{')
self.assertEqual(f'{{b', '{b')
self.assertEqual(f'a{{b', 'a{b')
self.assertEqual(f'}}', '}')
self.assertEqual(f'a}}', 'a}')
self.assertEqual(f'}}b', '}b')
self.assertEqual(f'a}}b', 'a}b')
self.assertEqual(f'{{}}', '{}')
self.assertEqual(f'a{{}}', 'a{}')
self.assertEqual(f'{{b}}', '{b}')
self.assertEqual(f'{{}}c', '{}c')
self.assertEqual(f'a{{b}}', 'a{b}')
self.assertEqual(f'a{{}}c', 'a{}c')
self.assertEqual(f'{{b}}c', '{b}c')
self.assertEqual(f'a{{b}}c', 'a{b}c')
self.assertEqual(f'{{{10}', '{10')
self.assertEqual(f'}}{10}', '}10')
self.assertEqual(f'}}{{{10}', '}{10')
self.assertEqual(f'}}a{{{10}', '}a{10')
self.assertEqual(f'{10}{{', '10{')
self.assertEqual(f'{10}}}', '10}')
self.assertEqual(f'{10}}}{{', '10}{')
self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
# Inside of strings, don't interpret doubled brackets.
self.assertEqual(f'{'{{}}'}', '{{}}')
self.assertAllRaise(TypeError, 'unhashable type',
["f'{ {{}} }'", # dict in a set
])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual('abc' f'## {x}ghi', 'abc## defghi')
self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi')
self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{x' f'{x}', '{xdef')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{{x}}' f'{x}', '{{x}}def')
self.assertEqual('{{x' f'{x}', '{{xdef')
self.assertEqual('x}}' f'{x}', 'x}}def')
self.assertEqual(f'{x}' 'x}}', 'defx}}')
self.assertEqual(f'{x}' '', 'def')
self.assertEqual('' f'{x}' '', 'def')
self.assertEqual('' f'{x}', 'def')
self.assertEqual(f'{x}' '2', 'def2')
self.assertEqual('1' f'{x}' '2', '1def2')
self.assertEqual('1' f'{x}', '1def')
self.assertEqual(f'{x}' f'-{x}', 'def-def')
self.assertEqual('' f'', '')
self.assertEqual('' f'' '', '')
self.assertEqual('' f'' '' f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'' '', '')
self.assertEqual(f'' '' f'', '')
self.assertEqual(f'' '' f'' '', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3' f'}'", # can't concat to get a valid f-string
])
def test_comments(self):
# These aren't comments, since they're in strings.
d = {'#': 'hash'}
self.assertEqual(f'{'#'}', '#')
self.assertEqual(f'{d['#']}', 'hash')
self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
["f'{1#}'", # error because the expression becomes "(1#)"
"f'{3(#)}'",
"f'{#}'",
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{)#}'", # When wrapped in parens, this becomes
# '()#)'. Make sure that doesn't compile.
])
def test_many_expressions(self):
# Create a string with many expressions in it. Note that
# because we have a space in here as a literal, we're actually
# going to use twice as many ast nodes: one for each literal
# plus one for each expression.
def build_fstr(n, extra=''):
return "f'" + ('{x} ' * n) + extra + "'"
x = 'X'
width = 1
# Test around 256.
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
# Test concatenating 2 largs fstrings.
self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x+' ')*254)
# Test lots of expressions and constants, concatenated.
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35')
self.assertEqual(f'{10:#{1}0x}', ' 0xa')
self.assertEqual(f'{10:{'#'}1{0}{'x'}}', ' 0xa')
self.assertEqual(f'{-10:-{'#'}1{0}x}', ' -0xa')
self.assertEqual(f'{-10:{'-'}#{1}0{'x'}}', ' -0xa')
self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["""f'{'s'!r{':10'}}'""",
# This looks like a nested format spec.
])
self.assertAllRaise(SyntaxError, "f-string: invalid syntax",
[# Invalid syntax inside a nested spec.
"f'{4:{/5}}'",
])
self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
[# Can't nest format specifiers.
"f'result: {value:{width:{0}}.{precision:1}}'",
])
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
[# No expansion inside conversion or for
# the : or ! itself.
"""f'{'s'!{'r'}}'""",
])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
["f'{}'",
"f'{ }'"
"f' {} '",
"f'{!r}'",
"f'{ !r}'",
"f'{10:{ }}'",
"f' { } '",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"f'''{\t\f\r\n}'''",
# Catch the empty expression before the
# invalid conversion.
"f'{!x}'",
"f'{ !xr}'",
"f'{!x:}'",
"f'{!x:a}'",
"f'{ !xr:}'",
"f'{ !xr:a}'",
"f'{!}'",
"f'{:}'",
# We find the empty expression before the
# missing closing brace.
"f'{!'",
"f'{!s:'",
"f'{:'",
"f'{:x'",
])
# Different error message is raised for other whitespace characters.
self.assertAllRaise(SyntaxError, r"invalid non-printable character U\+00A0",
["f'''{\xa0}'''",
"\xa0",
])
def test_parens_in_expressions(self):
self.assertEqual(f'{3,}', '(3,)')
# Add these because when an expression is evaluated, parens
# are added around it. But we shouldn't go from an invalid
# expression to a valid one. The added parens are just
# supposed to allow whitespace (including newlines).
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
["f'{,}'",
"f'{,}'", # this is (,), which is an error
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{3)+(4}'",
])
self.assertAllRaise(SyntaxError, 'unterminated string literal',
["f'{\n}'",
])
def test_newlines_before_syntax_error(self):
self.assertAllRaise(SyntaxError, "invalid syntax",
["f'{.}'", "\nf'{.}'", "\n\nf'{.}'"])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual(r'\t', '\\t')
self.assertEqual(rf'\t', '\\t')
self.assertEqual(f'{2}\t', '2\t')
self.assertEqual(f'{2}\t{3}', '2\t3')
self.assertEqual(f'\t{3}', '\t3')
self.assertEqual(f'\u0394', '\u0394')
self.assertEqual(r'\u0394', '\\u0394')
self.assertEqual(rf'\u0394', '\\u0394')
self.assertEqual(f'{2}\u0394', '2\u0394')
self.assertEqual(f'{2}\u0394{3}', '2\u03943')
self.assertEqual(f'\u0394{3}', '\u03943')
self.assertEqual(f'\U00000394', '\u0394')
self.assertEqual(r'\U00000394', '\\U00000394')
self.assertEqual(rf'\U00000394', '\\U00000394')
self.assertEqual(f'{2}\U00000394', '2\u0394')
self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
self.assertEqual(f'\U00000394{3}', '\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
self.assertEqual(f'\x20', ' ')
self.assertEqual(r'\x20', '\\x20')
self.assertEqual(rf'\x20', '\\x20')
self.assertEqual(f'{2}\x20', '2 ')
self.assertEqual(f'{2}\x20{3}', '2 3')
self.assertEqual(f'\x20{3}', ' 3')
self.assertEqual(f'2\x20', '2 ')
self.assertEqual(f'2\x203', '2 3')
self.assertEqual(f'\x203', ' 3')
with self.assertWarns(DeprecationWarning): # invalid escape sequence
value = eval(r"f'\{6*7}'")
self.assertEqual(value, '\\42')
self.assertEqual(f'\\{6*7}', '\\42')
self.assertEqual(fr'\{6*7}', '\\42')
AMPERSAND = 'spam'
# Get the right unicode character (&), or pick up local variable
# depending on the number of backslashes.
self.assertEqual(f'\N{AMPERSAND}', '&')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(fr'\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\\N{AMPERSAND}', '\\&')
def test_misformed_unicode_character_name(self):
# These test are needed because unicode names are parsed
# differently inside f-strings.
self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
[r"f'\N'",
r"f'\N{'",
r"f'\N{GREEK CAPITAL LETTER DELTA'",
# Here are the non-f-string versions,
# which should give the same errors.
r"'\N'",
r"'\N{'",
r"'\N{GREEK CAPITAL LETTER DELTA'",
])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
[r"f'{\'a\'}'",
r"f'{\t3}'",
r"f'{\}'",
r"rf'{\'a\'}'",
r"rf'{\t3}'",
r"rf'{\}'",
r"""rf'{'\N{LEFT CURLY BRACKET}'}'""",
r"f'{\n}'",
])
def test_no_escapes_for_braces(self):
"""
Only literal curly braces begin an expression.
"""
# \x7b is '{'.
self.assertEqual(f'\x7b1+1}}', '{1+1}')
self.assertEqual(f'\x7b1+1', '{1+1')
self.assertEqual(f'\u007b1+1', '{1+1')
self.assertEqual(f'\N{LEFT CURLY BRACKET}1+1\N{RIGHT CURLY BRACKET}', '{1+1}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{0}', '0')
self.assertEqual(rf'''{3+
4}''', '7')
def test_lambda(self):
x = 5
self.assertEqual(f'{(lambda y:x*y)('8')!r}', "'88888'")
self.assertEqual(f'{(lambda y:x*y)('8')!r:10}', "'88888' ")
self.assertEqual(f'{(lambda y:x*y)('8'):10}', "88888 ")
# lambda doesn't work without parens, because the colon
# makes the parser think it's a format_spec
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
["f'{lambda x:x}'",
])
def test_yield(self):
# Not terribly useful, but make sure the yield turns
# a function into a generator
def fn(y):
f'y:{yield y*2}'
f'{yield}'
g = fn(4)
self.assertEqual(next(g), 8)
self.assertEqual(next(g), None)
def test_yield_send(self):
def fn(x):
yield f'x:{yield (lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{"""x"""}", 'x')
self.assertEqual(f"{"""eric"s"""}", "eric's")
# Test concatenation within an expression
self.assertEqual(f'{'x' '''eric's''' 'y'}', 'xeric"sy')
self.assertEqual(f'{'x' '''eric's'''}', 'xeric"s')
self.assertEqual(f'{'''eric's''' 'y'}', 'eric"sy')
self.assertEqual(f'{'''x''' '''eric's''' 'y'}', 'xeric"sy')
self.assertEqual(f'{'''x''' '''eric's''' '''y'''}', 'xeric"sy')
self.assertEqual(f'{r'''x''' '''eric's''' '''y'''}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={x*y:{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f'v:{value}'
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{3:}', '3')
self.assertEqual(f'{3!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f'{f'{0}'*3}', '000')
self.assertEqual(f'{f'{y}'*3}', '555')
def test_invalid_string_prefixes(self):
single_quote_cases = ["fu''",
"uf''",
"Fu''",
"fU''",
"Uf''",
"uF''",
"ufr''",
"urf''",
"fur''",
"fru''",
"rfu''",
"ruf''",
"FUR''",
"Fur''",
"fb''",
"fB''",
"Fb''",
"FB''",
"bf''",
"bF''",
"Bf''",
"BF''",]
double_quote_cases = [case.replace("'", '"') for case in single_quote_cases]
self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing',
single_quote_cases + double_quote_cases)
def test_leading_trailing_spaces(self):
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
'expr={1: 2}')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
'expr={1: 2}')
def test_not_equal(self):
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
self.assertEqual(f'{3!=4}', 'True')
self.assertEqual(f'{3!=4:}', 'True')
self.assertEqual(f'{3!=4!s}', 'True')
self.assertEqual(f'{3!=4!s:.3}', 'Tru')
def test_equal_equal(self):
# Because an expression ending in = has special meaning,
# there's a special test for ==. Make sure it works.
self.assertEqual(f'{0==1}', 'False')
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
self.assertEqual(f'{'a'}', 'a')
self.assertEqual(f'{'a'!r}', "'a'")
self.assertEqual(f'{'a'!a}', "'a'")
# Not a conversion.
self.assertEqual(f'{'a!r'}', "a!r")
# Not a conversion, but show that ! is allowed in a format spec.
self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
["f'{3!g}'",
"f'{3!A}'",
"f'{3!3}'",
"f'{3!G}'",
"f'{3!!}'",
"f'{3!:}'",
"f'{3! s}'", # no space before conversion char
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{x!s{y}}'",
"f'{3!ss}'",
"f'{3!ss:}'",
"f'{3!ss:s}'",
])
def test_assignment(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'' = 3",
"f'{0}' = x",
"f'{x}' = x",
])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["del f''",
"del '' f''",
])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
["f'{{}'",
"f'{{}}}'",
"f'}'",
"f'x}'",
"f'x}x'",
r"f'\u007b}'",
# Can't have { or } in a format spec.
"f'{3:}>10}'",
"f'{3:}}>10}'",
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3:{{>10}'",
"f'{3'",
"f'{3!'",
"f'{3:'",
"f'{3!s'",
"f'{3!s:'",
"f'{3!s:3'",
"f'x{'",
"f'x{x'",
"f'{x'",
"f'{3:s'",
"f'{{{'",
"f'{{}}{'",
"f'{'",
])
# But these are just normal strings.
self.assertEqual(f'{'{'}', '{')
self.assertEqual(f'{'}'}', '}')
self.assertEqual(f'{3:{'}'}>10}', '}}}}}}}}}3')
self.assertEqual(f'{2:{'{'}>10}', '{{{{{{{{{2')
def test_if_conditional(self):
# There's special logic in compile.c to test if the
# conditional for an if (and while) are constants. Exercise
# that code.
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if '' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if ' ' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string',
0: 'integer',
}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f'{d['a']}', 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_errors(self):
# see issue 26287
self.assertAllRaise(TypeError, 'unsupported',
[r"f'{(lambda: 0):x}'",
r"f'{(0,):x}'",
])
self.assertAllRaise(ValueError, 'Unknown format code',
[r"f'{1000:j}'",
r"f'{1000:j}'",
])
def test_filename_in_syntaxerror(self):
# see issue 38964
with temp_cwd() as cwd:
file_path = os.path.join(cwd, 't.py')
with open(file_path, 'w') as f:
f.write('f"{a b}"') # This generates a SyntaxError
_, _, stderr = assert_python_failure(file_path,
PYTHONIOENCODING='ascii')
self.assertIn(file_path.encode('ascii', 'backslashreplace'), stderr)
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote',
"'": 'squote',
'foo': 'bar',
}
self.assertEqual(f'''{d[''']}''', 'squote')
self.assertEqual(f"""{d["""]}""", 'dquote')
self.assertEqual(f'{d['foo']}', 'bar')
self.assertEqual(f"{d["foo"]}", 'bar')
def test_backslash_char(self):
# Check eval of a backslash followed by a control char.
# See bpo-30682: this used to raise an assert in pydebug mode.
self.assertEqual(eval('f"\\\n"'), '')
self.assertEqual(eval('f"\\\r"'), '')
def test_debug_conversion(self):
x = 'A string'
self.assertEqual(f'{x=}', 'x=' + repr(x))
self.assertEqual(f'{x =}', 'x =' + repr(x))
self.assertEqual(f'{x=!s}', 'x=' + str(x))
self.assertEqual(f'{x=!r}', 'x=' + repr(x))
self.assertEqual(f'{x=!a}', 'x=' + ascii(x))
x = 2.71828
self.assertEqual(f'{x=:.2f}', 'x=' + format(x, '.2f'))
self.assertEqual(f'{x=:}', 'x=' + format(x, ''))
self.assertEqual(f'{x=!r:^20}', 'x=' + format(repr(x), '^20'))
self.assertEqual(f'{x=!s:^20}', 'x=' + format(str(x), '^20'))
self.assertEqual(f'{x=!a:^20}', 'x=' + format(ascii(x), '^20'))
x = 9
self.assertEqual(f'{3*x+15=}', '3*x+15=42')
# There is code in ast.c that deals with non-ascii expression values. So,
# use a unicode identifier to trigger that.
tenπ = 31.4
self.assertEqual(f'{tenπ=:.2f}', 'tenπ=31.40')
# Also test with Unicode in non-identifiers.
self.assertEqual(f'{'Σ'=}', '"Σ"=\'Σ\'')
# Make sure nested fstrings still work.
self.assertEqual(f'{f'{3.1415=:.1f}':*^20}', '*****3.1415=3.1*****')
# Make sure text before and after an expression with = works
# correctly.
pi = 'π'
self.assertEqual(f'alpha α {pi=} ω omega', "alpha α pi='π' ω omega")
# Check multi-line expressions.
self.assertEqual(f'''{
3
=}''', '\n3\n=3')
# Since = is handled specially, make sure all existing uses of
# it still work.
self.assertEqual(f'{0==1}', 'False')
self.assertEqual(f'{0!=1}', 'True')
self.assertEqual(f'{0<=1}', 'True')
self.assertEqual(f'{0>=1}', 'False')
self.assertEqual(f'{(x:='5')}', '5')
self.assertEqual(x, '5')
self.assertEqual(f'{(x:=5)}', '5')
self.assertEqual(x, 5)
self.assertEqual(f'{'='}', '=')
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'. See test_walrus: you need to use parens.
self.assertEqual(f'{x:=10}', ' 20')
# Test named function parameters, to make sure '=' parsing works
# there.
def f(a):
nonlocal x
oldx = x
x = a
return oldx
x = 0
self.assertEqual(f'{f(a='3=')}', '0')
self.assertEqual(x, '3=')
self.assertEqual(f'{f(a=4)}', '3=')
self.assertEqual(x, 4)
# Make sure __format__ is being called.
class C:
def __format__(self, s):
return f'FORMAT-{s}'
def __repr__(self):
return 'REPR'
self.assertEqual(f'{C()=}', 'C()=REPR')
self.assertEqual(f'{C()=!r}', 'C()=REPR')
self.assertEqual(f'{C()=:}', 'C()=FORMAT-')
self.assertEqual(f'{C()=: }', 'C()=FORMAT- ')
self.assertEqual(f'{C()=:x}', 'C()=FORMAT-x')
self.assertEqual(f'{C()=!r:*^20}', 'C()=********REPR********')
self.assertRaises(SyntaxError, eval, "f'{C=]'")
# Make sure leading and following text works.
x = 'foo'
self.assertEqual(f'X{x=}Y', 'Xx='+repr(x)+'Y')
# Make sure whitespace around the = works.
self.assertEqual(f'X{x =}Y', 'Xx ='+repr(x)+'Y')
self.assertEqual(f'X{x= }Y', 'Xx= '+repr(x)+'Y')
self.assertEqual(f'X{x = }Y', 'Xx = '+repr(x)+'Y')
# These next lines contains tabs. Backslash escapes don't
# work in f-strings.
# patchcheck doesn't like these tabs. So the only way to test
# this will be to dynamically created and exec the f-strings. But
# that's such a hassle I'll save it for another day. For now, convert
# the tabs to spaces just to shut up patchcheck.
#self.assertEqual(f'X{x =}Y', 'Xx\t='+repr(x)+'Y')
#self.assertEqual(f'X{x = }Y', 'Xx\t=\t'+repr(x)+'Y')
def test_walrus(self):
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'.
self.assertEqual(f'{x:=10}', ' 20')
# This is an assignment expression, which requires parens.
self.assertEqual(f'{(x:=10)}', '10')
self.assertEqual(x, 10)
def test_invalid_syntax_error_message(self):
with self.assertRaisesRegex(SyntaxError, "f-string: invalid syntax"):
compile("f'{a $ b}'", "?", "exec")
def test_with_two_commas_in_format_specifier(self):
error_msg = re.escape("Cannot specify ',' with ','.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,,}'
def test_with_two_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify '_' with '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:__}'
def test_with_a_commas_and_an_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,_}'
def test_with_an_underscore_and_a_comma_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:_,}'
def test_syntax_error_for_starred_expressions(self):
error_msg = re.escape("can't use starred expression here")
with self.assertRaisesRegex(SyntaxError, error_msg):
compile("f'{*a}'", "?", "exec")
error_msg = re.escape("can't use double starred expression here")
with self.assertRaisesRegex(SyntaxError, error_msg):
compile("f'{**a}'", "?", "exec")
if __name__ == '__main__':
unittest.main()
| # -*- coding: utf-8 -*-
# There are tests here with unicode string literals and
# identifiers. There's a code in ast.c that was added because of a
# failure with a non-ascii-only expression. So, I have tests for
# that. There are workarounds that would let me run tests for that
# code without unicode identifiers and strings, but just using them
# directly seems like the easiest and therefore safest thing to do.
# Unicode identifiers in tests is allowed by PEP 3131.
import ast
import os
import re
import types
import decimal
import unittest
from test.support.os_helper import temp_cwd
from test.support.script_helper import assert_python_failure
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# values with assertRaisesRegex, but without it it's way too easy to
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
# worthwhile tradeoff. When I switched to this method, I found many
# examples where I wasn't testing what I thought I was.
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
# it).
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
# This is how __format__ is actually called.
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
# Inspired by http://bugs.python.org/issue24975
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
# Make sure x was not called.
self.assertFalse(x.called)
# Actually run the code.
exec(c)
# Make sure x was called.
self.assertTrue(x.called)
def test_ast_line_numbers(self):
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_multiple_formattedvalues(self):
expr = """
f'no formatted values'
f'eggs {a * x()} spam {b + y()}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `f'no formatted value'`
self.assertEqual(type(t.body[0]), ast.Expr)
self.assertEqual(type(t.body[0].value), ast.JoinedStr)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 4)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(type(t.body[1].value.values[3]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
# check the first binop location
binop1 = t.body[1].value.values[1].value
self.assertEqual(type(binop1), ast.BinOp)
self.assertEqual(type(binop1.left), ast.Name)
self.assertEqual(type(binop1.op), ast.Mult)
self.assertEqual(type(binop1.right), ast.Call)
self.assertEqual(binop1.lineno, 3)
self.assertEqual(binop1.left.lineno, 3)
self.assertEqual(binop1.right.lineno, 3)
self.assertEqual(binop1.col_offset, 8)
self.assertEqual(binop1.left.col_offset, 8)
self.assertEqual(binop1.right.col_offset, 12)
# check the second binop location
binop2 = t.body[1].value.values[3].value
self.assertEqual(type(binop2), ast.BinOp)
self.assertEqual(type(binop2.left), ast.Name)
self.assertEqual(type(binop2.op), ast.Add)
self.assertEqual(type(binop2.right), ast.Call)
self.assertEqual(binop2.lineno, 3)
self.assertEqual(binop2.left.lineno, 3)
self.assertEqual(binop2.right.lineno, 3)
self.assertEqual(binop2.col_offset, 23)
self.assertEqual(binop2.left.col_offset, 23)
self.assertEqual(binop2.right.col_offset, 27)
def test_ast_line_numbers_nested(self):
expr = """
a = 10
f'{a * f"-{x()}-"}'"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 1)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
# check the binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.JoinedStr)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the nested call location
self.assertEqual(len(binop.right.values), 3)
self.assertEqual(type(binop.right.values[0]), ast.Constant)
self.assertEqual(type(binop.right.values[0].value), str)
self.assertEqual(type(binop.right.values[1]), ast.FormattedValue)
self.assertEqual(type(binop.right.values[2]), ast.Constant)
self.assertEqual(type(binop.right.values[2].value), str)
self.assertEqual(binop.right.values[0].lineno, 3)
self.assertEqual(binop.right.values[1].lineno, 3)
self.assertEqual(binop.right.values[2].lineno, 3)
call = binop.right.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.col_offset, 11)
def test_ast_line_numbers_duplicate_expression(self):
"""Duplicate expression
NOTE: this is currently broken, always sets location of the first
expression.
"""
expr = """
a = 10
f'{a * x()} {a * x()} {a * x()}'
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 5)
self.assertEqual(type(t.body[1].value.values[0]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[1]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[1].value), str)
self.assertEqual(type(t.body[1].value.values[2]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[3]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[3].value), str)
self.assertEqual(type(t.body[1].value.values[4]), ast.FormattedValue)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].value.values[3].lineno, 3)
self.assertEqual(t.body[1].value.values[4].lineno, 3)
# check the first binop location
binop = t.body[1].value.values[0].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3)
self.assertEqual(binop.left.col_offset, 3)
self.assertEqual(binop.right.col_offset, 7)
# check the second binop location
binop = t.body[1].value.values[2].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
# check the third binop location
binop = t.body[1].value.values[4].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 3)
self.assertEqual(binop.left.lineno, 3)
self.assertEqual(binop.right.lineno, 3)
self.assertEqual(binop.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.left.col_offset, 3) # FIXME: this is wrong
self.assertEqual(binop.right.col_offset, 7) # FIXME: this is wrong
def test_ast_line_numbers_multiline_fstring(self):
# See bpo-30465 for details.
expr = """
a = 10
f'''
{a
*
x()}
non-important content
'''
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 2)
# check `a = 10`
self.assertEqual(type(t.body[0]), ast.Assign)
self.assertEqual(t.body[0].lineno, 2)
# check `f'...'`
self.assertEqual(type(t.body[1]), ast.Expr)
self.assertEqual(type(t.body[1].value), ast.JoinedStr)
self.assertEqual(len(t.body[1].value.values), 3)
self.assertEqual(type(t.body[1].value.values[0]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[0].value), str)
self.assertEqual(type(t.body[1].value.values[1]), ast.FormattedValue)
self.assertEqual(type(t.body[1].value.values[2]), ast.Constant)
self.assertEqual(type(t.body[1].value.values[2].value), str)
self.assertEqual(t.body[1].lineno, 3)
self.assertEqual(t.body[1].value.lineno, 3)
self.assertEqual(t.body[1].value.values[0].lineno, 3)
self.assertEqual(t.body[1].value.values[1].lineno, 3)
self.assertEqual(t.body[1].value.values[2].lineno, 3)
self.assertEqual(t.body[1].col_offset, 0)
self.assertEqual(t.body[1].value.col_offset, 0)
self.assertEqual(t.body[1].value.values[0].col_offset, 0)
self.assertEqual(t.body[1].value.values[1].col_offset, 0)
self.assertEqual(t.body[1].value.values[2].col_offset, 0)
# NOTE: the following lineno information and col_offset is correct for
# expressions within FormattedValues.
binop = t.body[1].value.values[1].value
self.assertEqual(type(binop), ast.BinOp)
self.assertEqual(type(binop.left), ast.Name)
self.assertEqual(type(binop.op), ast.Mult)
self.assertEqual(type(binop.right), ast.Call)
self.assertEqual(binop.lineno, 4)
self.assertEqual(binop.left.lineno, 4)
self.assertEqual(binop.right.lineno, 6)
self.assertEqual(binop.col_offset, 4)
self.assertEqual(binop.left.col_offset, 4)
self.assertEqual(binop.right.col_offset, 7)
def test_ast_line_numbers_with_parentheses(self):
expr = """
x = (
f" {test(t)}"
)"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 1)
# check the test(t) location
call = t.body[0].value.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.end_lineno, 3)
self.assertEqual(call.col_offset, 8)
self.assertEqual(call.end_col_offset, 15)
expr = """
x = (
'PERL_MM_OPT', (
f'wat'
f'some_string={f(x)} '
f'wat'
),
)
"""
t = ast.parse(expr)
self.assertEqual(type(t), ast.Module)
self.assertEqual(len(t.body), 1)
# check the fstring
fstring = t.body[0].value.elts[1]
self.assertEqual(type(fstring), ast.JoinedStr)
self.assertEqual(len(fstring.values), 3)
wat1, middle, wat2 = fstring.values
# check the first wat
self.assertEqual(type(wat1), ast.Constant)
self.assertEqual(wat1.lineno, 4)
self.assertEqual(wat1.end_lineno, 6)
self.assertEqual(wat1.col_offset, 12)
self.assertEqual(wat1.end_col_offset, 18)
# check the call
call = middle.value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 5)
self.assertEqual(call.end_lineno, 5)
self.assertEqual(call.col_offset, 27)
self.assertEqual(call.end_col_offset, 31)
# check the second wat
self.assertEqual(type(wat2), ast.Constant)
self.assertEqual(wat2.lineno, 4)
self.assertEqual(wat2.end_lineno, 6)
self.assertEqual(wat2.col_offset, 12)
self.assertEqual(wat2.end_col_offset, 18)
def test_docstring(self):
def f():
f'''Not a docstring'''
self.assertIsNone(f.__doc__)
def g():
'''Not a docstring''' \
f''
self.assertIsNone(g.__doc__)
def test_literal_eval(self):
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x'")
def test_ast_compile_time_concat(self):
x = ['']
expr = """x[0] = 'foo' f'{3}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_compile_time_concat_errors(self):
self.assertAllRaise(SyntaxError,
'cannot mix bytes and nonbytes literals',
[r"""f'' b''""",
r"""b'' f''""",
])
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
[r"""f'{"x'""",
r"""f'{"x}'""",
r"""f'{("x'""",
r"""f'{("x}'""",
])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{((}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\)' "
r"does not match opening parenthesis '\['",
["f'{a[4)}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\]' "
r"does not match opening parenthesis '\('",
["f'{a(4]}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\['",
["f'{a[4}'",
])
self.assertAllRaise(SyntaxError, r"f-string: closing parenthesis '\}' "
r"does not match opening parenthesis '\('",
["f'{a(4}'",
])
self.assertRaises(SyntaxError, eval, "f'{" + "("*500 + "}'")
def test_double_braces(self):
self.assertEqual(f'{{', '{')
self.assertEqual(f'a{{', 'a{')
self.assertEqual(f'{{b', '{b')
self.assertEqual(f'a{{b', 'a{b')
self.assertEqual(f'}}', '}')
self.assertEqual(f'a}}', 'a}')
self.assertEqual(f'}}b', '}b')
self.assertEqual(f'a}}b', 'a}b')
self.assertEqual(f'{{}}', '{}')
self.assertEqual(f'a{{}}', 'a{}')
self.assertEqual(f'{{b}}', '{b}')
self.assertEqual(f'{{}}c', '{}c')
self.assertEqual(f'a{{b}}', 'a{b}')
self.assertEqual(f'a{{}}c', 'a{}c')
self.assertEqual(f'{{b}}c', '{b}c')
self.assertEqual(f'a{{b}}c', 'a{b}c')
self.assertEqual(f'{{{10}', '{10')
self.assertEqual(f'}}{10}', '}10')
self.assertEqual(f'}}{{{10}', '}{10')
self.assertEqual(f'}}a{{{10}', '}a{10')
self.assertEqual(f'{10}{{', '10{')
self.assertEqual(f'{10}}}', '10}')
self.assertEqual(f'{10}}}{{', '10}{')
self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
# Inside of strings, don't interpret doubled brackets.
self.assertEqual(f'{"{{}}"}', '{{}}')
self.assertAllRaise(TypeError, 'unhashable type',
["f'{ {{}} }'", # dict in a set
])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual('abc' f'## {x}ghi', 'abc## defghi')
self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi')
self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{x' f'{x}', '{xdef')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{{x}}' f'{x}', '{{x}}def')
self.assertEqual('{{x' f'{x}', '{{xdef')
self.assertEqual('x}}' f'{x}', 'x}}def')
self.assertEqual(f'{x}' 'x}}', 'defx}}')
self.assertEqual(f'{x}' '', 'def')
self.assertEqual('' f'{x}' '', 'def')
self.assertEqual('' f'{x}', 'def')
self.assertEqual(f'{x}' '2', 'def2')
self.assertEqual('1' f'{x}' '2', '1def2')
self.assertEqual('1' f'{x}', '1def')
self.assertEqual(f'{x}' f'-{x}', 'def-def')
self.assertEqual('' f'', '')
self.assertEqual('' f'' '', '')
self.assertEqual('' f'' '' f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'' '', '')
self.assertEqual(f'' '' f'', '')
self.assertEqual(f'' '' f'' '', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3' f'}'", # can't concat to get a valid f-string
])
def test_comments(self):
# These aren't comments, since they're in strings.
d = {'#': 'hash'}
self.assertEqual(f'{"#"}', '#')
self.assertEqual(f'{d["#"]}', 'hash')
self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
["f'{1#}'", # error because the expression becomes "(1#)"
"f'{3(#)}'",
"f'{#}'",
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{)#}'", # When wrapped in parens, this becomes
# '()#)'. Make sure that doesn't compile.
])
def test_many_expressions(self):
# Create a string with many expressions in it. Note that
# because we have a space in here as a literal, we're actually
# going to use twice as many ast nodes: one for each literal
# plus one for each expression.
def build_fstr(n, extra=''):
return "f'" + ('{x} ' * n) + extra + "'"
x = 'X'
width = 1
# Test around 256.
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
# Test concatenating 2 largs fstrings.
self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x+' ')*254)
# Test lots of expressions and constants, concatenated.
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35')
self.assertEqual(f'{10:#{1}0x}', ' 0xa')
self.assertEqual(f'{10:{"#"}1{0}{"x"}}', ' 0xa')
self.assertEqual(f'{-10:-{"#"}1{0}x}', ' -0xa')
self.assertEqual(f'{-10:{"-"}#{1}0{"x"}}', ' -0xa')
self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["""f'{"s"!r{":10"}}'""",
# This looks like a nested format spec.
])
self.assertAllRaise(SyntaxError, "f-string: invalid syntax",
[# Invalid syntax inside a nested spec.
"f'{4:{/5}}'",
])
self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
[# Can't nest format specifiers.
"f'result: {value:{width:{0}}.{precision:1}}'",
])
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
[# No expansion inside conversion or for
# the : or ! itself.
"""f'{"s"!{"r"}}'""",
])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
["f'{}'",
"f'{ }'"
"f' {} '",
"f'{!r}'",
"f'{ !r}'",
"f'{10:{ }}'",
"f' { } '",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"f'''{\t\f\r\n}'''",
# Catch the empty expression before the
# invalid conversion.
"f'{!x}'",
"f'{ !xr}'",
"f'{!x:}'",
"f'{!x:a}'",
"f'{ !xr:}'",
"f'{ !xr:a}'",
"f'{!}'",
"f'{:}'",
# We find the empty expression before the
# missing closing brace.
"f'{!'",
"f'{!s:'",
"f'{:'",
"f'{:x'",
])
# Different error message is raised for other whitespace characters.
self.assertAllRaise(SyntaxError, r"invalid non-printable character U\+00A0",
["f'''{\xa0}'''",
"\xa0",
])
def test_parens_in_expressions(self):
self.assertEqual(f'{3,}', '(3,)')
# Add these because when an expression is evaluated, parens
# are added around it. But we shouldn't go from an invalid
# expression to a valid one. The added parens are just
# supposed to allow whitespace (including newlines).
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
["f'{,}'",
"f'{,}'", # this is (,), which is an error
])
self.assertAllRaise(SyntaxError, r"f-string: unmatched '\)'",
["f'{3)+(4}'",
])
self.assertAllRaise(SyntaxError, 'unterminated string literal',
["f'{\n}'",
])
def test_newlines_before_syntax_error(self):
self.assertAllRaise(SyntaxError, "invalid syntax",
["f'{.}'", "\nf'{.}'", "\n\nf'{.}'"])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual(r'\t', '\\t')
self.assertEqual(rf'\t', '\\t')
self.assertEqual(f'{2}\t', '2\t')
self.assertEqual(f'{2}\t{3}', '2\t3')
self.assertEqual(f'\t{3}', '\t3')
self.assertEqual(f'\u0394', '\u0394')
self.assertEqual(r'\u0394', '\\u0394')
self.assertEqual(rf'\u0394', '\\u0394')
self.assertEqual(f'{2}\u0394', '2\u0394')
self.assertEqual(f'{2}\u0394{3}', '2\u03943')
self.assertEqual(f'\u0394{3}', '\u03943')
self.assertEqual(f'\U00000394', '\u0394')
self.assertEqual(r'\U00000394', '\\U00000394')
self.assertEqual(rf'\U00000394', '\\U00000394')
self.assertEqual(f'{2}\U00000394', '2\u0394')
self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
self.assertEqual(f'\U00000394{3}', '\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
self.assertEqual(f'\x20', ' ')
self.assertEqual(r'\x20', '\\x20')
self.assertEqual(rf'\x20', '\\x20')
self.assertEqual(f'{2}\x20', '2 ')
self.assertEqual(f'{2}\x20{3}', '2 3')
self.assertEqual(f'\x20{3}', ' 3')
self.assertEqual(f'2\x20', '2 ')
self.assertEqual(f'2\x203', '2 3')
self.assertEqual(f'\x203', ' 3')
with self.assertWarns(DeprecationWarning): # invalid escape sequence
value = eval(r"f'\{6*7}'")
self.assertEqual(value, '\\42')
self.assertEqual(f'\\{6*7}', '\\42')
self.assertEqual(fr'\{6*7}', '\\42')
AMPERSAND = 'spam'
# Get the right unicode character (&), or pick up local variable
# depending on the number of backslashes.
self.assertEqual(f'\N{AMPERSAND}', '&')
self.assertEqual(f'\\N{AMPERSAND}', '\\Nspam')
self.assertEqual(fr'\N{AMPERSAND}', '\\Nspam')
self.assertEqual(f'\\\N{AMPERSAND}', '\\&')
def test_misformed_unicode_character_name(self):
# These test are needed because unicode names are parsed
# differently inside f-strings.
self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
[r"f'\N'",
r"f'\N{'",
r"f'\N{GREEK CAPITAL LETTER DELTA'",
# Here are the non-f-string versions,
# which should give the same errors.
r"'\N'",
r"'\N{'",
r"'\N{GREEK CAPITAL LETTER DELTA'",
])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
[r"f'{\'a\'}'",
r"f'{\t3}'",
r"f'{\}'",
r"rf'{\'a\'}'",
r"rf'{\t3}'",
r"rf'{\}'",
r"""rf'{"\N{LEFT CURLY BRACKET}"}'""",
r"f'{\n}'",
])
def test_no_escapes_for_braces(self):
"""
Only literal curly braces begin an expression.
"""
# \x7b is '{'.
self.assertEqual(f'\x7b1+1}}', '{1+1}')
self.assertEqual(f'\x7b1+1', '{1+1')
self.assertEqual(f'\u007b1+1', '{1+1')
self.assertEqual(f'\N{LEFT CURLY BRACKET}1+1\N{RIGHT CURLY BRACKET}', '{1+1}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{0}', '0')
self.assertEqual(rf'''{3+
4}''', '7')
def test_lambda(self):
x = 5
self.assertEqual(f'{(lambda y:x*y)("8")!r}', "'88888'")
self.assertEqual(f'{(lambda y:x*y)("8")!r:10}', "'88888' ")
self.assertEqual(f'{(lambda y:x*y)("8"):10}', "88888 ")
# lambda doesn't work without parens, because the colon
# makes the parser think it's a format_spec
self.assertAllRaise(SyntaxError, 'f-string: invalid syntax',
["f'{lambda x:x}'",
])
def test_yield(self):
# Not terribly useful, but make sure the yield turns
# a function into a generator
def fn(y):
f'y:{yield y*2}'
f'{yield}'
g = fn(4)
self.assertEqual(next(g), 8)
self.assertEqual(next(g), None)
def test_yield_send(self):
def fn(x):
yield f'x:{yield (lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{'''x'''}", 'x')
self.assertEqual(f"{'''eric's'''}", "eric's")
# Test concatenation within an expression
self.assertEqual(f'{"x" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"x" """eric"s"""}', 'xeric"s')
self.assertEqual(f'{"""eric"s""" "y"}', 'eric"sy')
self.assertEqual(f'{"""x""" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"""x""" """eric"s""" """y"""}', 'xeric"sy')
self.assertEqual(f'{r"""x""" """eric"s""" """y"""}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={x*y:{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f'v:{value}'
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{3:}', '3')
self.assertEqual(f'{3!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f'{f"{0}"*3}', '000')
self.assertEqual(f'{f"{y}"*3}', '555')
def test_invalid_string_prefixes(self):
single_quote_cases = ["fu''",
"uf''",
"Fu''",
"fU''",
"Uf''",
"uF''",
"ufr''",
"urf''",
"fur''",
"fru''",
"rfu''",
"ruf''",
"FUR''",
"Fur''",
"fb''",
"fB''",
"Fb''",
"FB''",
"bf''",
"bF''",
"Bf''",
"BF''",]
double_quote_cases = [case.replace("'", '"') for case in single_quote_cases]
self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing',
single_quote_cases + double_quote_cases)
def test_leading_trailing_spaces(self):
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
'expr={1: 2}')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
'expr={1: 2}')
def test_not_equal(self):
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
self.assertEqual(f'{3!=4}', 'True')
self.assertEqual(f'{3!=4:}', 'True')
self.assertEqual(f'{3!=4!s}', 'True')
self.assertEqual(f'{3!=4!s:.3}', 'Tru')
def test_equal_equal(self):
# Because an expression ending in = has special meaning,
# there's a special test for ==. Make sure it works.
self.assertEqual(f'{0==1}', 'False')
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
self.assertEqual(f'{"a"}', 'a')
self.assertEqual(f'{"a"!r}', "'a'")
self.assertEqual(f'{"a"!a}', "'a'")
# Not a conversion.
self.assertEqual(f'{"a!r"}', "a!r")
# Not a conversion, but show that ! is allowed in a format spec.
self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
["f'{3!g}'",
"f'{3!A}'",
"f'{3!3}'",
"f'{3!G}'",
"f'{3!!}'",
"f'{3!:}'",
"f'{3! s}'", # no space before conversion char
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{x!s{y}}'",
"f'{3!ss}'",
"f'{3!ss:}'",
"f'{3!ss:s}'",
])
def test_assignment(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'' = 3",
"f'{0}' = x",
"f'{x}' = x",
])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["del f''",
"del '' f''",
])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
["f'{{}'",
"f'{{}}}'",
"f'}'",
"f'x}'",
"f'x}x'",
r"f'\u007b}'",
# Can't have { or } in a format spec.
"f'{3:}>10}'",
"f'{3:}}>10}'",
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3:{{>10}'",
"f'{3'",
"f'{3!'",
"f'{3:'",
"f'{3!s'",
"f'{3!s:'",
"f'{3!s:3'",
"f'x{'",
"f'x{x'",
"f'{x'",
"f'{3:s'",
"f'{{{'",
"f'{{}}{'",
"f'{'",
])
# But these are just normal strings.
self.assertEqual(f'{"{"}', '{')
self.assertEqual(f'{"}"}', '}')
self.assertEqual(f'{3:{"}"}>10}', '}}}}}}}}}3')
self.assertEqual(f'{2:{"{"}>10}', '{{{{{{{{{2')
def test_if_conditional(self):
# There's special logic in compile.c to test if the
# conditional for an if (and while) are constants. Exercise
# that code.
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if '' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if ' ' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string',
0: 'integer',
}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f'{d["a"]}', 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_errors(self):
# see issue 26287
self.assertAllRaise(TypeError, 'unsupported',
[r"f'{(lambda: 0):x}'",
r"f'{(0,):x}'",
])
self.assertAllRaise(ValueError, 'Unknown format code',
[r"f'{1000:j}'",
r"f'{1000:j}'",
])
def test_filename_in_syntaxerror(self):
# see issue 38964
with temp_cwd() as cwd:
file_path = os.path.join(cwd, 't.py')
with open(file_path, 'w') as f:
f.write('f"{a b}"') # This generates a SyntaxError
_, _, stderr = assert_python_failure(file_path,
PYTHONIOENCODING='ascii')
self.assertIn(file_path.encode('ascii', 'backslashreplace'), stderr)
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote',
"'": 'squote',
'foo': 'bar',
}
self.assertEqual(f'''{d["'"]}''', 'squote')
self.assertEqual(f"""{d['"']}""", 'dquote')
self.assertEqual(f'{d["foo"]}', 'bar')
self.assertEqual(f"{d['foo']}", 'bar')
def test_backslash_char(self):
# Check eval of a backslash followed by a control char.
# See bpo-30682: this used to raise an assert in pydebug mode.
self.assertEqual(eval('f"\\\n"'), '')
self.assertEqual(eval('f"\\\r"'), '')
def test_debug_conversion(self):
x = 'A string'
self.assertEqual(f'{x=}', 'x=' + repr(x))
self.assertEqual(f'{x =}', 'x =' + repr(x))
self.assertEqual(f'{x=!s}', 'x=' + str(x))
self.assertEqual(f'{x=!r}', 'x=' + repr(x))
self.assertEqual(f'{x=!a}', 'x=' + ascii(x))
x = 2.71828
self.assertEqual(f'{x=:.2f}', 'x=' + format(x, '.2f'))
self.assertEqual(f'{x=:}', 'x=' + format(x, ''))
self.assertEqual(f'{x=!r:^20}', 'x=' + format(repr(x), '^20'))
self.assertEqual(f'{x=!s:^20}', 'x=' + format(str(x), '^20'))
self.assertEqual(f'{x=!a:^20}', 'x=' + format(ascii(x), '^20'))
x = 9
self.assertEqual(f'{3*x+15=}', '3*x+15=42')
# There is code in ast.c that deals with non-ascii expression values. So,
# use a unicode identifier to trigger that.
tenπ = 31.4
self.assertEqual(f'{tenπ=:.2f}', 'tenπ=31.40')
# Also test with Unicode in non-identifiers.
self.assertEqual(f'{"Σ"=}', '"Σ"=\'Σ\'')
# Make sure nested fstrings still work.
self.assertEqual(f'{f"{3.1415=:.1f}":*^20}', '*****3.1415=3.1*****')
# Make sure text before and after an expression with = works
# correctly.
pi = 'π'
self.assertEqual(f'alpha α {pi=} ω omega', "alpha α pi='π' ω omega")
# Check multi-line expressions.
self.assertEqual(f'''{
3
=}''', '\n3\n=3')
# Since = is handled specially, make sure all existing uses of
# it still work.
self.assertEqual(f'{0==1}', 'False')
self.assertEqual(f'{0!=1}', 'True')
self.assertEqual(f'{0<=1}', 'True')
self.assertEqual(f'{0>=1}', 'False')
self.assertEqual(f'{(x:="5")}', '5')
self.assertEqual(x, '5')
self.assertEqual(f'{(x:=5)}', '5')
self.assertEqual(x, 5)
self.assertEqual(f'{"="}', '=')
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'. See test_walrus: you need to use parens.
self.assertEqual(f'{x:=10}', ' 20')
# Test named function parameters, to make sure '=' parsing works
# there.
def f(a):
nonlocal x
oldx = x
x = a
return oldx
x = 0
self.assertEqual(f'{f(a="3=")}', '0')
self.assertEqual(x, '3=')
self.assertEqual(f'{f(a=4)}', '3=')
self.assertEqual(x, 4)
# Make sure __format__ is being called.
class C:
def __format__(self, s):
return f'FORMAT-{s}'
def __repr__(self):
return 'REPR'
self.assertEqual(f'{C()=}', 'C()=REPR')
self.assertEqual(f'{C()=!r}', 'C()=REPR')
self.assertEqual(f'{C()=:}', 'C()=FORMAT-')
self.assertEqual(f'{C()=: }', 'C()=FORMAT- ')
self.assertEqual(f'{C()=:x}', 'C()=FORMAT-x')
self.assertEqual(f'{C()=!r:*^20}', 'C()=********REPR********')
self.assertRaises(SyntaxError, eval, "f'{C=]'")
# Make sure leading and following text works.
x = 'foo'
self.assertEqual(f'X{x=}Y', 'Xx='+repr(x)+'Y')
# Make sure whitespace around the = works.
self.assertEqual(f'X{x =}Y', 'Xx ='+repr(x)+'Y')
self.assertEqual(f'X{x= }Y', 'Xx= '+repr(x)+'Y')
self.assertEqual(f'X{x = }Y', 'Xx = '+repr(x)+'Y')
# These next lines contains tabs. Backslash escapes don't
# work in f-strings.
# patchcheck doesn't like these tabs. So the only way to test
# this will be to dynamically created and exec the f-strings. But
# that's such a hassle I'll save it for another day. For now, convert
# the tabs to spaces just to shut up patchcheck.
#self.assertEqual(f'X{x =}Y', 'Xx\t='+repr(x)+'Y')
#self.assertEqual(f'X{x = }Y', 'Xx\t=\t'+repr(x)+'Y')
def test_walrus(self):
x = 20
# This isn't an assignment expression, it's 'x', with a format
# spec of '=10'.
self.assertEqual(f'{x:=10}', ' 20')
# This is an assignment expression, which requires parens.
self.assertEqual(f'{(x:=10)}', '10')
self.assertEqual(x, 10)
def test_invalid_syntax_error_message(self):
with self.assertRaisesRegex(SyntaxError, "f-string: invalid syntax"):
compile("f'{a $ b}'", "?", "exec")
def test_with_two_commas_in_format_specifier(self):
error_msg = re.escape("Cannot specify ',' with ','.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,,}'
def test_with_two_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify '_' with '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:__}'
def test_with_a_commas_and_an_underscore_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:,_}'
def test_with_an_underscore_and_a_comma_in_format_specifier(self):
error_msg = re.escape("Cannot specify both ',' and '_'.")
with self.assertRaisesRegex(ValueError, error_msg):
f'{1:_,}'
def test_syntax_error_for_starred_expressions(self):
error_msg = re.escape("can't use starred expression here")
with self.assertRaisesRegex(SyntaxError, error_msg):
compile("f'{*a}'", "?", "exec")
error_msg = re.escape("can't use double starred expression here")
with self.assertRaisesRegex(SyntaxError, error_msg):
compile("f'{**a}'", "?", "exec")
if __name__ == '__main__':
unittest.main()
|
import re
import time
import unicodedata
from typing import Any, Dict, cast
import pkg_resources
import requests
import requests_html
from wikipron.config import Config
from wikipron.typing import Iterator, Pron, WordPronPair
# Queries for the MediaWiki backend.
# Documentation here: https://www.mediawiki.org/wiki/API:Categorymembers
_CATEGORY_TEMPLATE = "Category:{language} terms with IPA pronunciation"
# Selects the content on the page.
_PAGE_TEMPLATE = "https://th.wiktionary.org/wiki/{word}"
# Http headers for api call
HTTP_HEADERS = {
"User-Agent": (
f"WikiPron/{pkg_resources.get_distribution("wikipron").version} "
"(https://github.com/kylebgorman/wikipron) "
f"requests/{requests.__version__}"
),
}
def _skip_word(word: str, skip_spaces: bool) -> bool:
# Skips reconstructions.
if word.startswith("*"):
return True
# Skips multiword examples.
if skip_spaces and (" " in word or "\u00A0" in word):
return True
# Skips examples containing a dash.
if "-" in word:
return True
# Skips examples containing digits.
if re.search(r"\d", word):
return True
return False
def _skip_date(date_from_word: str, cut_off_date: str) -> bool:
return date_from_word > cut_off_date
def _scrape_once(data, config: Config) -> Iterator[WordPronPair]:
session = requests_html.HTMLSession()
for member in data["query"]["categorymembers"]:
title = member["title"]
timestamp = member["timestamp"]
config.restart_key = member["sortkey"]
if _skip_word(title, config.skip_spaces_word) or _skip_date(
timestamp, config.cut_off_date
):
continue
request = session.get(
_PAGE_TEMPLATE.format(word=title), timeout=10, headers=HTTP_HEADERS
)
for word, pron in config.extract_word_pron(title, request, config):
# Pronunciation processing is done in NFD-space;
# we convert back to NFC afterwards.
normalized_pron = unicodedata.normalize("NFC", pron)
# 'cast' is required 'normalize' doesn't return a 'Pron'
yield word, cast(Pron, normalized_pron)
def _language_name_for_scraping(language):
"""Handle cases where X is under a "macrolanguage" on Wiktionary.
So far, only the Chinese languages necessitate this helper function.
We'll keep this function as simple as possible, until it becomes too
complicated and requires refactoring.
"""
return (
"Chinese"
if language == "Cantonese" or language == "Min Nan"
else language
)
def scrape(config: Config) -> Iterator[WordPronPair]:
"""Scrapes with a given configuration."""
category = _CATEGORY_TEMPLATE.format(
language=_language_name_for_scraping(config.language)
)
requests_params: Dict[str, Any] = {
"action": "query",
"format": "json",
"list": "categorymembers",
"cmtitle": category,
"cmlimit": "500",
"cmprop": "ids|title|timestamp|sortkey",
}
while True:
data = requests.get(
"https://th.wiktionary.org/w/api.php?",
params=requests_params,
headers=HTTP_HEADERS,
).json()
try:
yield from _scrape_once(data, config)
if "continue" not in data:
break
continue_code = data["continue"]["cmcontinue"]
# "cmstarthexsortkey" reset so as to avoid competition
# with "continue_code".
requests_params.update(
{"cmcontinue": continue_code, "cmstarthexsortkey": None}
)
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
):
requests_params.update({"cmstarthexsortkey": config.restart_key})
# 5 minute timeout. Immediately restarting after the
# connection has dropped appears to have led to
# 'Connection reset by peer' errors.
time.sleep(300)
| import re
import time
import unicodedata
from typing import Any, Dict, cast
import pkg_resources
import requests
import requests_html
from wikipron.config import Config
from wikipron.typing import Iterator, Pron, WordPronPair
# Queries for the MediaWiki backend.
# Documentation here: https://www.mediawiki.org/wiki/API:Categorymembers
_CATEGORY_TEMPLATE = "Category:{language} terms with IPA pronunciation"
# Selects the content on the page.
_PAGE_TEMPLATE = "https://th.wiktionary.org/wiki/{word}"
# Http headers for api call
HTTP_HEADERS = {
"User-Agent": (
f"WikiPron/{pkg_resources.get_distribution('wikipron').version} "
"(https://github.com/kylebgorman/wikipron) "
f"requests/{requests.__version__}"
),
}
def _skip_word(word: str, skip_spaces: bool) -> bool:
# Skips reconstructions.
if word.startswith("*"):
return True
# Skips multiword examples.
if skip_spaces and (" " in word or "\u00A0" in word):
return True
# Skips examples containing a dash.
if "-" in word:
return True
# Skips examples containing digits.
if re.search(r"\d", word):
return True
return False
def _skip_date(date_from_word: str, cut_off_date: str) -> bool:
return date_from_word > cut_off_date
def _scrape_once(data, config: Config) -> Iterator[WordPronPair]:
session = requests_html.HTMLSession()
for member in data["query"]["categorymembers"]:
title = member["title"]
timestamp = member["timestamp"]
config.restart_key = member["sortkey"]
if _skip_word(title, config.skip_spaces_word) or _skip_date(
timestamp, config.cut_off_date
):
continue
request = session.get(
_PAGE_TEMPLATE.format(word=title), timeout=10, headers=HTTP_HEADERS
)
for word, pron in config.extract_word_pron(title, request, config):
# Pronunciation processing is done in NFD-space;
# we convert back to NFC afterwards.
normalized_pron = unicodedata.normalize("NFC", pron)
# 'cast' is required 'normalize' doesn't return a 'Pron'
yield word, cast(Pron, normalized_pron)
def _language_name_for_scraping(language):
"""Handle cases where X is under a "macrolanguage" on Wiktionary.
So far, only the Chinese languages necessitate this helper function.
We'll keep this function as simple as possible, until it becomes too
complicated and requires refactoring.
"""
return (
"Chinese"
if language == "Cantonese" or language == "Min Nan"
else language
)
def scrape(config: Config) -> Iterator[WordPronPair]:
"""Scrapes with a given configuration."""
category = _CATEGORY_TEMPLATE.format(
language=_language_name_for_scraping(config.language)
)
requests_params: Dict[str, Any] = {
"action": "query",
"format": "json",
"list": "categorymembers",
"cmtitle": category,
"cmlimit": "500",
"cmprop": "ids|title|timestamp|sortkey",
}
while True:
data = requests.get(
"https://th.wiktionary.org/w/api.php?",
params=requests_params,
headers=HTTP_HEADERS,
).json()
try:
yield from _scrape_once(data, config)
if "continue" not in data:
break
continue_code = data["continue"]["cmcontinue"]
# "cmstarthexsortkey" reset so as to avoid competition
# with "continue_code".
requests_params.update(
{"cmcontinue": continue_code, "cmstarthexsortkey": None}
)
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
):
requests_params.update({"cmstarthexsortkey": config.restart_key})
# 5 minute timeout. Immediately restarting after the
# connection has dropped appears to have led to
# 'Connection reset by peer' errors.
time.sleep(300)
|
import unittest
import random
from unittest.case import SkipTest
from src.pysguard import DateRule, DateRangeRule, TimeRangeRule, RecurringTimeRule, TimeConstraint
from datetime import datetime, timedelta
class DateRuleTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(datetime.now().microsecond)
def test_date_rule_matches_exact_date(self):
dt = datetime.today()
r = DateRule(dt.strftime('%Y.%m.%d'))
assert r.test(dt)
def test_date_rule_with_wildcard_year_matches_any_year(self):
dt = datetime.today()
random_year = random.randint(2000, 2100)
r = DateRule(dt.strftime('*.%m.%d'))
assert r.test(datetime(random_year, dt.month, dt.day))
def test_date_rule_with_wildcard_month_matches_any_month(self):
dt = datetime.today()
random_month = random.randint(1, 12)
r = DateRule(dt.strftime('%Y.*.%d'))
assert r.test(datetime(dt.year, random_month, dt.day))
def test_date_rule_with_wildcard_day_matches_any_day(self):
dt = datetime.today()
random_day = random.randint(1, 28)
r = DateRule(dt.strftime('%Y.%m.*'))
assert r.test(datetime(dt.year, dt.month, random_day))
class TimeRuleTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls._test_time = datetime(2020, 1, 1, 13, 24, 15)
def test_time_is_in_range_1(self):
t = TimeRangeRule('13:24-13:25')
assert t.test(self._test_time)
def test_time_is_in_range_2(self):
t = TimeRangeRule('00:00-23:59')
assert t.test(self._test_time)
def test_time_is_outside_range_1(self):
t = TimeRangeRule('13:23-13:24')
is_inside = t.test(self._test_time)
assert not is_inside
def test_time_is_outside_range_2(self):
t = TimeRangeRule('13:25-13:26')
is_inside = t.test(self._test_time)
assert not is_inside
class DateRangeRuleTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls._test_time = datetime(2020, 11, 2)
def test_time_is_in_range_1(self):
t = DateRangeRule('2020.11.02-2020.11.03')
assert t.test(self._test_time)
def test_time_is_in_range_2(self):
t = DateRangeRule('2020.11.01-2020.11.02')
assert t.test(self._test_time)
def test_time_is_outside_range_1(self):
t = DateRangeRule('2020.10.31-2020.11.01')
is_inside = t.test(self._test_time)
assert not is_inside
class RecurringTimeRuleTest(unittest.TestCase):
# Mostly testing the weekday part here as this
# rule is a combintation of this and a TimeRangeRule
@classmethod
def setUpClass(cls) -> None:
cls._weekdays = {
'monday': datetime(2020, 11, 2, 13, 24),
'tuesday': datetime(2020, 11, 3, 13, 24),
'wednesday': datetime(2020, 11, 4, 13, 24),
'thursday': datetime(2020, 11, 5, 13, 24),
'friday': datetime(2020, 11, 6, 13, 24),
'saturday': datetime(2020, 11, 7, 13, 24),
'sunday': datetime(2020, 11, 8, 13, 24)
}
def test_wildcard_weekday_is_in_range(self):
random.seed(datetime.now().microsecond)
random_day = random.choice(list(self._weekdays.keys()))
r = RecurringTimeRule('*', '13:00-14:00')
assert r.test(self._weekdays[random_day])
def test_each_weekday_is_in_range(self):
for day in self._weekdays.keys():
r = RecurringTimeRule(f'{day}s', '13:00-14:00')
assert r.test(self._weekdays[day]), f'Unexpected result for {day}'
class TimeConstraintTest(unittest.TestCase):
def test_time_constraint(self):
d = datetime.now()
if d.hour == 23 and d.minute == 59:
raise SkipTest('Too close to midnight to assure success')
d1 = d + timedelta(minutes=1)
c = TimeConstraint({
'name': 'test',
'constraints': [
'date *.12.24 12:00-23:59',
'date *.12.24 12:00-23:59',
'date 2006.04.14-2006.04.17',
f'weekly * {d.strftime('%H:%M')}-{d1.strftime('%H:%M')}', # will hit this one
'weekly fridays 16:00-17:00'
]
})
assert c.test()
| import unittest
import random
from unittest.case import SkipTest
from src.pysguard import DateRule, DateRangeRule, TimeRangeRule, RecurringTimeRule, TimeConstraint
from datetime import datetime, timedelta
class DateRuleTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(datetime.now().microsecond)
def test_date_rule_matches_exact_date(self):
dt = datetime.today()
r = DateRule(dt.strftime('%Y.%m.%d'))
assert r.test(dt)
def test_date_rule_with_wildcard_year_matches_any_year(self):
dt = datetime.today()
random_year = random.randint(2000, 2100)
r = DateRule(dt.strftime('*.%m.%d'))
assert r.test(datetime(random_year, dt.month, dt.day))
def test_date_rule_with_wildcard_month_matches_any_month(self):
dt = datetime.today()
random_month = random.randint(1, 12)
r = DateRule(dt.strftime('%Y.*.%d'))
assert r.test(datetime(dt.year, random_month, dt.day))
def test_date_rule_with_wildcard_day_matches_any_day(self):
dt = datetime.today()
random_day = random.randint(1, 28)
r = DateRule(dt.strftime('%Y.%m.*'))
assert r.test(datetime(dt.year, dt.month, random_day))
class TimeRuleTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls._test_time = datetime(2020, 1, 1, 13, 24, 15)
def test_time_is_in_range_1(self):
t = TimeRangeRule('13:24-13:25')
assert t.test(self._test_time)
def test_time_is_in_range_2(self):
t = TimeRangeRule('00:00-23:59')
assert t.test(self._test_time)
def test_time_is_outside_range_1(self):
t = TimeRangeRule('13:23-13:24')
is_inside = t.test(self._test_time)
assert not is_inside
def test_time_is_outside_range_2(self):
t = TimeRangeRule('13:25-13:26')
is_inside = t.test(self._test_time)
assert not is_inside
class DateRangeRuleTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls._test_time = datetime(2020, 11, 2)
def test_time_is_in_range_1(self):
t = DateRangeRule('2020.11.02-2020.11.03')
assert t.test(self._test_time)
def test_time_is_in_range_2(self):
t = DateRangeRule('2020.11.01-2020.11.02')
assert t.test(self._test_time)
def test_time_is_outside_range_1(self):
t = DateRangeRule('2020.10.31-2020.11.01')
is_inside = t.test(self._test_time)
assert not is_inside
class RecurringTimeRuleTest(unittest.TestCase):
# Mostly testing the weekday part here as this
# rule is a combintation of this and a TimeRangeRule
@classmethod
def setUpClass(cls) -> None:
cls._weekdays = {
'monday': datetime(2020, 11, 2, 13, 24),
'tuesday': datetime(2020, 11, 3, 13, 24),
'wednesday': datetime(2020, 11, 4, 13, 24),
'thursday': datetime(2020, 11, 5, 13, 24),
'friday': datetime(2020, 11, 6, 13, 24),
'saturday': datetime(2020, 11, 7, 13, 24),
'sunday': datetime(2020, 11, 8, 13, 24)
}
def test_wildcard_weekday_is_in_range(self):
random.seed(datetime.now().microsecond)
random_day = random.choice(list(self._weekdays.keys()))
r = RecurringTimeRule('*', '13:00-14:00')
assert r.test(self._weekdays[random_day])
def test_each_weekday_is_in_range(self):
for day in self._weekdays.keys():
r = RecurringTimeRule(f'{day}s', '13:00-14:00')
assert r.test(self._weekdays[day]), f'Unexpected result for {day}'
class TimeConstraintTest(unittest.TestCase):
def test_time_constraint(self):
d = datetime.now()
if d.hour == 23 and d.minute == 59:
raise SkipTest('Too close to midnight to assure success')
d1 = d + timedelta(minutes=1)
c = TimeConstraint({
'name': 'test',
'constraints': [
'date *.12.24 12:00-23:59',
'date *.12.24 12:00-23:59',
'date 2006.04.14-2006.04.17',
f'weekly * {d.strftime("%H:%M")}-{d1.strftime("%H:%M")}', # will hit this one
'weekly fridays 16:00-17:00'
]
})
assert c.test()
|
import os
import json
import time
import click
import pickle
import signal
import requests
import subprocess
import numpy as np
import logging
import socket
import errno
from tqdm import tqdm
elog = logging.getLogger('eval')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('evaluation.log')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logging.getLogger('requests').setLevel(logging.CRITICAL)
class CurveScore:
def __init__(self, curve_pkl='../curve_pipeline.pkl'):
with open(curve_pkl, 'rb') as f:
self.pipeline = pickle.load(f)
def get_weight(self, x):
return self.pipeline.predict(np.asarray([[x]]))[0]
def score(self, guesses, question):
'''guesses is a list of {'guess': GUESS, 'buzz': True/False}
'''
char_length = len(question['text'])
buzzes = [x['buzz'] for x in guesses]
if True not in buzzes:
return 0
buzz_index = buzzes.index(True)
rel_position = (1.0 * guesses[buzz_index]['char_index']) / char_length
weight = self.get_weight(rel_position)
result = guesses[buzz_index]['guess'] == question['page']
return weight * result
def score_optimal(self, guesses, question):
'''score with an optimal buzzer'''
char_length = len(question['text'])
buzz_index = char_length
for g in guesses[::-1]:
if g['guess'] != question['page']:
buzz_index = g['char_index']
break
rel_position = (1.0 * buzz_index) / char_length
return self.get_weight(rel_position)
def start_server():
web_proc = subprocess.Popen(
'bash run.sh', shell=True,
preexec_fn=os.setsid
)
return web_proc
def retry_get_url(url, retries=5, delay=3):
while retries > 0:
try:
response = requests.get(url)
if response.status_code == 200:
return response.json()
except requests.exceptions.ConnectionError as e:
retries -= 1
elog.warn(e)
if delay > 0:
time.sleep(delay)
return None
def get_question_query(qid, question, evidence, char_idx, wiki_paragraphs=False):
char_idx = min(char_idx, len(question['text']))
for sent_idx, (st, ed) in enumerate(question['tokenizations']):
if char_idx >= st and char_idx <= ed:
break
query = {
'question_idx': qid,
'sent_index': sent_idx,
'char_index': char_idx,
'text': question['text'][:char_idx]
}
if wiki_paragraphs:
evidences = evidence['sent_evidences'][:sent_idx+1]
#evidences here is a list of lists of length = #sentences seen so far, and each sublist is contains 5 dictionaries for the 5 top sentences
query['wiki_paragraphs'] = evidences
return query
def get_answer_single(url, questions, evidences, char_step_size, wiki_paragraphs=False):
elog.info('Collecting responses to questions')
answers = []
for question_idx, q in enumerate(tqdm(questions)):
elog.info(f'Running question_idx={question_idx} qnum={q['qanta_id']}')
answers.append([])
# get an answer every K characters
if wiki_paragraphs:
for char_idx in range(1, len(q['text']) + char_step_size,
char_step_size):
query = get_question_query(question_idx, q, evidences[question_idx], char_idx, wiki_paragraphs)
resp = requests.post(url, json=query).json()
query.update(resp)
answers[-1].append(query)
else:
for char_idx in range(1, len(q['text']) + char_step_size,
char_step_size):
query = get_question_query(question_idx, q, [], char_idx, wiki_paragraphs)
resp = requests.post(url, json=query).json()
query.update(resp)
answers[-1].append(query)
return answers
def get_answer_batch(url, questions, evidences, char_step_size, batch_size, wiki_paragraphs=False):
elog.info('Collecting responses to questions in batches', batch_size)
answers = []
batch_ids = list(range(0, len(questions), batch_size))
for batch_idx in tqdm(batch_ids):
batch_ed = min(len(questions), batch_idx + batch_size)
qs = questions[batch_idx: batch_ed]
max_len = max(len(q['text']) for q in qs)
qids = list(range(batch_idx, batch_ed))
answers += [[] for _ in qs]
if wiki_paragraphs:
evs = evidences[batch_idx: batch_ed]
for char_idx in range(1, max_len + char_step_size, char_step_size):
query = {'questions': []}
for i, q in enumerate(qs):
query['questions'].append(
get_question_query(qids[i], q, evs[i], char_idx, wiki_paragraphs))
resp = requests.post(url, json=query).json()
for i, r in enumerate(resp):
q = query['questions'][i]
q.update(r)
answers[qids[i]].append(q)
else:
for char_idx in range(1, max_len + char_step_size, char_step_size):
query = {'questions': []}
for i, q in enumerate(qs):
query['questions'].append(
get_question_query(qids[i], q, [], char_idx, wiki_paragraphs))
resp = requests.post(url, json=query).json()
for i, r in enumerate(resp):
q = query['questions'][i]
q.update(r)
answers[qids[i]].append(q)
return answers
def check_port(hostname, port):
pass
@click.command()
@click.argument('input_dir')
#@click.argument('evidence_dir', default='data/evidence_docs_dev_with_sent_text.json')
@click.argument('output_dir', default='predictions.json')
@click.argument('score_dir', default='scores.json')
@click.option('--char_step_size', default=25)
@click.option('--hostname', default='0.0.0.0')
@click.option('--norun-web', default=False, is_flag=True)
@click.option('--wait', default=0, type=int)
@click.option('--curve-pkl', default='curve_pipeline.pkl')
@click.option('--retries', default=20)
@click.option('--retry-delay', default=3)
def evaluate(input_dir, output_dir, score_dir, char_step_size, hostname,
norun_web, wait, curve_pkl, retries, retry_delay):
try:
if not norun_web:
web_proc = start_server()
if wait > 0:
time.sleep(wait)
status_url = f'http://{hostname}:4861/api/1.0/quizbowl/status'
status = retry_get_url(status_url, retries=retries, delay=retry_delay)
elog.info(f'API Status: {status}')
if status is None:
elog.warning('Failed to find a running web server beep boop, prepare for RUD')
raise ValueError('Status API could not be reached')
if 'include_wiki_paragraphs' in status:
include_wiki_paragraphs = status['include_wiki_paragraphs']
else:
include_wiki_paragraphs = False
with open(input_dir) as f:
questions = json.load(f)['questions']
evidences = []
if include_wiki_paragraphs:
evidence_dir = 'evidence_docs_dev_with_sent_text.json'
with open(evidence_dir) as f:
evidences = json.load(f)['evidence']
if status is not None and status['batch'] is True:
url = f'http://{hostname}:4861/api/1.0/quizbowl/batch_act'
answers = get_answer_batch(url, questions, evidences,
char_step_size,
status['batch_size'],
wiki_paragraphs=include_wiki_paragraphs)
else:
url = f'http://{hostname}:4861/api/1.0/quizbowl/act'
answers = get_answer_single(url, questions, evidences,
char_step_size,
wiki_paragraphs=include_wiki_paragraphs)
with open(output_dir, 'w') as f:
json.dump(answers, f)
elog.info('Computing curve score of results')
curve_score = CurveScore(curve_pkl=curve_pkl)
first_acc = []
end_acc = []
ew = []
ew_opt = []
for question_idx, guesses in enumerate(answers):
question = questions[question_idx]
answer = question['page']
first_guess = None
for g in guesses:
if g['sent_index'] == 1:
first_guess = g['guess']
break
first_acc.append(first_guess == answer)
end_acc.append(guesses[-1]['guess'] == answer)
ew.append(curve_score.score(guesses, question))
ew_opt.append(curve_score.score_optimal(guesses, question))
eval_out = {
'first_acc': sum(first_acc) * 1.0 / len(first_acc),
'end_acc': sum(end_acc) * 1.0 / len(end_acc),
'expected_wins': sum(ew) * 1.0 / len(ew),
'expected_wins_optimal': sum(ew_opt) * 1.0 / len(ew_opt),
}
with open(score_dir, 'w') as f:
json.dump(eval_out, f)
print(json.dumps(eval_out))
finally:
if not norun_web:
os.killpg(os.getpgid(web_proc.pid), signal.SIGTERM)
if __name__ == '__main__':
evaluate()
| import os
import json
import time
import click
import pickle
import signal
import requests
import subprocess
import numpy as np
import logging
import socket
import errno
from tqdm import tqdm
elog = logging.getLogger('eval')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('evaluation.log')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logging.getLogger('requests').setLevel(logging.CRITICAL)
class CurveScore:
def __init__(self, curve_pkl='../curve_pipeline.pkl'):
with open(curve_pkl, 'rb') as f:
self.pipeline = pickle.load(f)
def get_weight(self, x):
return self.pipeline.predict(np.asarray([[x]]))[0]
def score(self, guesses, question):
'''guesses is a list of {'guess': GUESS, 'buzz': True/False}
'''
char_length = len(question['text'])
buzzes = [x['buzz'] for x in guesses]
if True not in buzzes:
return 0
buzz_index = buzzes.index(True)
rel_position = (1.0 * guesses[buzz_index]['char_index']) / char_length
weight = self.get_weight(rel_position)
result = guesses[buzz_index]['guess'] == question['page']
return weight * result
def score_optimal(self, guesses, question):
'''score with an optimal buzzer'''
char_length = len(question['text'])
buzz_index = char_length
for g in guesses[::-1]:
if g['guess'] != question['page']:
buzz_index = g['char_index']
break
rel_position = (1.0 * buzz_index) / char_length
return self.get_weight(rel_position)
def start_server():
web_proc = subprocess.Popen(
'bash run.sh', shell=True,
preexec_fn=os.setsid
)
return web_proc
def retry_get_url(url, retries=5, delay=3):
while retries > 0:
try:
response = requests.get(url)
if response.status_code == 200:
return response.json()
except requests.exceptions.ConnectionError as e:
retries -= 1
elog.warn(e)
if delay > 0:
time.sleep(delay)
return None
def get_question_query(qid, question, evidence, char_idx, wiki_paragraphs=False):
char_idx = min(char_idx, len(question['text']))
for sent_idx, (st, ed) in enumerate(question['tokenizations']):
if char_idx >= st and char_idx <= ed:
break
query = {
'question_idx': qid,
'sent_index': sent_idx,
'char_index': char_idx,
'text': question['text'][:char_idx]
}
if wiki_paragraphs:
evidences = evidence['sent_evidences'][:sent_idx+1]
#evidences here is a list of lists of length = #sentences seen so far, and each sublist is contains 5 dictionaries for the 5 top sentences
query['wiki_paragraphs'] = evidences
return query
def get_answer_single(url, questions, evidences, char_step_size, wiki_paragraphs=False):
elog.info('Collecting responses to questions')
answers = []
for question_idx, q in enumerate(tqdm(questions)):
elog.info(f'Running question_idx={question_idx} qnum={q["qanta_id"]}')
answers.append([])
# get an answer every K characters
if wiki_paragraphs:
for char_idx in range(1, len(q['text']) + char_step_size,
char_step_size):
query = get_question_query(question_idx, q, evidences[question_idx], char_idx, wiki_paragraphs)
resp = requests.post(url, json=query).json()
query.update(resp)
answers[-1].append(query)
else:
for char_idx in range(1, len(q['text']) + char_step_size,
char_step_size):
query = get_question_query(question_idx, q, [], char_idx, wiki_paragraphs)
resp = requests.post(url, json=query).json()
query.update(resp)
answers[-1].append(query)
return answers
def get_answer_batch(url, questions, evidences, char_step_size, batch_size, wiki_paragraphs=False):
elog.info('Collecting responses to questions in batches', batch_size)
answers = []
batch_ids = list(range(0, len(questions), batch_size))
for batch_idx in tqdm(batch_ids):
batch_ed = min(len(questions), batch_idx + batch_size)
qs = questions[batch_idx: batch_ed]
max_len = max(len(q['text']) for q in qs)
qids = list(range(batch_idx, batch_ed))
answers += [[] for _ in qs]
if wiki_paragraphs:
evs = evidences[batch_idx: batch_ed]
for char_idx in range(1, max_len + char_step_size, char_step_size):
query = {'questions': []}
for i, q in enumerate(qs):
query['questions'].append(
get_question_query(qids[i], q, evs[i], char_idx, wiki_paragraphs))
resp = requests.post(url, json=query).json()
for i, r in enumerate(resp):
q = query['questions'][i]
q.update(r)
answers[qids[i]].append(q)
else:
for char_idx in range(1, max_len + char_step_size, char_step_size):
query = {'questions': []}
for i, q in enumerate(qs):
query['questions'].append(
get_question_query(qids[i], q, [], char_idx, wiki_paragraphs))
resp = requests.post(url, json=query).json()
for i, r in enumerate(resp):
q = query['questions'][i]
q.update(r)
answers[qids[i]].append(q)
return answers
def check_port(hostname, port):
pass
@click.command()
@click.argument('input_dir')
#@click.argument('evidence_dir', default='data/evidence_docs_dev_with_sent_text.json')
@click.argument('output_dir', default='predictions.json')
@click.argument('score_dir', default='scores.json')
@click.option('--char_step_size', default=25)
@click.option('--hostname', default='0.0.0.0')
@click.option('--norun-web', default=False, is_flag=True)
@click.option('--wait', default=0, type=int)
@click.option('--curve-pkl', default='curve_pipeline.pkl')
@click.option('--retries', default=20)
@click.option('--retry-delay', default=3)
def evaluate(input_dir, output_dir, score_dir, char_step_size, hostname,
norun_web, wait, curve_pkl, retries, retry_delay):
try:
if not norun_web:
web_proc = start_server()
if wait > 0:
time.sleep(wait)
status_url = f'http://{hostname}:4861/api/1.0/quizbowl/status'
status = retry_get_url(status_url, retries=retries, delay=retry_delay)
elog.info(f'API Status: {status}')
if status is None:
elog.warning('Failed to find a running web server beep boop, prepare for RUD')
raise ValueError('Status API could not be reached')
if 'include_wiki_paragraphs' in status:
include_wiki_paragraphs = status['include_wiki_paragraphs']
else:
include_wiki_paragraphs = False
with open(input_dir) as f:
questions = json.load(f)['questions']
evidences = []
if include_wiki_paragraphs:
evidence_dir = 'evidence_docs_dev_with_sent_text.json'
with open(evidence_dir) as f:
evidences = json.load(f)['evidence']
if status is not None and status['batch'] is True:
url = f'http://{hostname}:4861/api/1.0/quizbowl/batch_act'
answers = get_answer_batch(url, questions, evidences,
char_step_size,
status['batch_size'],
wiki_paragraphs=include_wiki_paragraphs)
else:
url = f'http://{hostname}:4861/api/1.0/quizbowl/act'
answers = get_answer_single(url, questions, evidences,
char_step_size,
wiki_paragraphs=include_wiki_paragraphs)
with open(output_dir, 'w') as f:
json.dump(answers, f)
elog.info('Computing curve score of results')
curve_score = CurveScore(curve_pkl=curve_pkl)
first_acc = []
end_acc = []
ew = []
ew_opt = []
for question_idx, guesses in enumerate(answers):
question = questions[question_idx]
answer = question['page']
first_guess = None
for g in guesses:
if g['sent_index'] == 1:
first_guess = g['guess']
break
first_acc.append(first_guess == answer)
end_acc.append(guesses[-1]['guess'] == answer)
ew.append(curve_score.score(guesses, question))
ew_opt.append(curve_score.score_optimal(guesses, question))
eval_out = {
'first_acc': sum(first_acc) * 1.0 / len(first_acc),
'end_acc': sum(end_acc) * 1.0 / len(end_acc),
'expected_wins': sum(ew) * 1.0 / len(ew),
'expected_wins_optimal': sum(ew_opt) * 1.0 / len(ew_opt),
}
with open(score_dir, 'w') as f:
json.dump(eval_out, f)
print(json.dumps(eval_out))
finally:
if not norun_web:
os.killpg(os.getpgid(web_proc.pid), signal.SIGTERM)
if __name__ == '__main__':
evaluate()
|
import asyncio
import json
import logging
import random
import time
import traceback
from asyncio import CancelledError
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple
from blspy import AugSchemeMPL, PrivateKey
from packaging.version import Version
from chia.consensus.block_record import BlockRecord
from chia.consensus.blockchain import ReceiveBlockResult
from chia.consensus.constants import ConsensusConstants
from chia.daemon.keychain_proxy import (
KeychainProxy,
KeychainProxyConnectionFailure,
KeyringIsEmpty,
connect_to_keychain_and_validate,
wrap_local_keychain,
)
from chia.protocols import wallet_protocol
from chia.protocols.full_node_protocol import RequestProofOfWeight, RespondProofOfWeight
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import (
CoinState,
RequestHeaderBlocks,
RequestSESInfo,
RespondBlockHeader,
RespondSESInfo,
RespondToCoinUpdates,
RespondToPhUpdates,
)
from chia.server.node_discovery import WalletPeers
from chia.server.outbound_message import Message, NodeType, make_msg
from chia.server.peer_store_resolver import PeerStoreResolver
from chia.server.server import ChiaServer
from chia.server.ws_connection import WSChiaConnection
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.coin_spend import CoinSpend
from chia.types.header_block import HeaderBlock
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.peer_info import PeerInfo
from chia.types.weight_proof import SubEpochData, WeightProof
from chia.util.byte_types import hexstr_to_bytes
from chia.util.chunks import chunks
from chia.util.config import WALLET_PEERS_PATH_KEY_DEPRECATED
from chia.util.default_root import STANDALONE_ROOT_PATH
from chia.util.ints import uint32, uint64
from chia.util.keychain import Keychain, KeyringIsLocked
from chia.util.path import mkdir, path_from_root
from chia.util.profiler import profile_task
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.util.new_peak_queue import NewPeakItem, NewPeakQueue, NewPeakQueueTypes
from chia.wallet.util.peer_request_cache import PeerRequestCache, can_use_peer_request_cache
from chia.wallet.util.wallet_sync_utils import (
fetch_header_blocks_in_range,
fetch_last_tx_from_peer,
last_change_height_cs,
request_and_validate_additions,
request_and_validate_removals,
subscribe_to_coin_updates,
subscribe_to_phs,
)
from chia.wallet.wallet_action import WalletAction
from chia.wallet.wallet_coin_record import WalletCoinRecord
from chia.wallet.wallet_state_manager import WalletStateManager
class WalletNode:
key_config: Dict
config: Dict
constants: ConsensusConstants
server: Optional[ChiaServer]
log: logging.Logger
# Maintains the state of the wallet (blockchain and transactions), handles DB connections
wallet_state_manager: Optional[WalletStateManager]
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
syncing: bool
full_node_peer: Optional[PeerInfo]
peer_task: Optional[asyncio.Task]
logged_in: bool
wallet_peers_initialized: bool
keychain_proxy: Optional[KeychainProxy]
wallet_peers: Optional[WalletPeers]
race_cache: Dict[bytes32, Set[CoinState]]
race_cache_hashes: List[Tuple[uint32, bytes32]]
new_peak_queue: NewPeakQueue
_process_new_subscriptions_task: Optional[asyncio.Task]
_secondary_peer_sync_task: Optional[asyncio.Task]
node_peaks: Dict[bytes32, Tuple[uint32, bytes32]]
validation_semaphore: Optional[asyncio.Semaphore]
local_node_synced: bool
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
local_keychain: Optional[Keychain] = None,
):
self.config = config
self.constants = consensus_constants
self.root_path = root_path
self.log = logging.getLogger(name if name else __name__)
# Normal operation data
self.cached_blocks: Dict = {}
self.future_block_hashes: Dict = {}
# Sync data
self._shut_down = False
self.proof_hashes: List = []
self.state_changed_callback = None
self.wallet_state_manager = None
self.server = None
self.wsm_close_task = None
self.sync_task: Optional[asyncio.Task] = None
self.logged_in_fingerprint: Optional[int] = None
self.peer_task = None
self.logged_in = False
self.keychain_proxy = None
self.local_keychain = local_keychain
self.height_to_time: Dict[uint32, uint64] = {}
self.synced_peers: Set[bytes32] = set() # Peers that we have long synced to
self.wallet_peers = None
self.wallet_peers_initialized = False
self.valid_wp_cache: Dict[bytes32, Any] = {}
self.untrusted_caches: Dict[bytes32, PeerRequestCache] = {}
self.race_cache = {} # in Untrusted mode wallet might get the state update before receiving the block
self.race_cache_hashes = []
self._process_new_subscriptions_task = None
self._secondary_peer_sync_task = None
self.node_peaks = {}
self.validation_semaphore = None
self.local_node_synced = False
self.LONG_SYNC_THRESHOLD = 200
async def ensure_keychain_proxy(self) -> KeychainProxy:
if self.keychain_proxy is None:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self.root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure("Failed to connect to keychain service")
return self.keychain_proxy
def get_cache_for_peer(self, peer) -> PeerRequestCache:
if peer.peer_node_id not in self.untrusted_caches:
self.untrusted_caches[peer.peer_node_id] = PeerRequestCache()
return self.untrusted_caches[peer.peer_node_id]
def rollback_request_caches(self, reorg_height: int):
# Everything after reorg_height should be removed from the cache
for cache in self.untrusted_caches.values():
cache.clear_after_height(reorg_height)
async def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]:
try:
keychain_proxy = await self.ensure_keychain_proxy()
key = await keychain_proxy.get_key_for_fingerprint(fingerprint)
except KeyringIsEmpty:
self.log.warning("No keys present. Create keys with the UI, or with the 'chia keys' program.")
return None
except KeyringIsLocked:
self.log.warning("Keyring is locked")
return None
except KeychainProxyConnectionFailure as e:
tb = traceback.format_exc()
self.log.error(f"Missing keychain_proxy: {e} {tb}")
raise e # Re-raise so that the caller can decide whether to continue or abort
return key
async def _start(
self,
fingerprint: Optional[int] = None,
) -> bool:
# Makes sure the coin_state_updates get higher priority than new_peak messages
self.new_peak_queue = NewPeakQueue(asyncio.PriorityQueue())
self.synced_peers = set()
private_key = await self.get_key_for_fingerprint(fingerprint)
if private_key is None:
self.logged_in = False
return False
if self.config.get("enable_profiler", False):
asyncio.create_task(profile_task(self.root_path, "wallet", self.log))
db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
db_path_replaced: str = (
self.config["database_path"]
.replace("CHALLENGE", self.config["selected_network"])
.replace("KEY", db_path_key_suffix)
)
path = path_from_root(self.root_path, db_path_replaced.replace("v1", "v2"))
mkdir(path.parent)
standalone_path = path_from_root(STANDALONE_ROOT_PATH, f"{db_path_replaced.replace("v2", "v1")}_new")
if not path.exists():
if standalone_path.exists():
self.log.info(f"Copying wallet db from {standalone_path} to {path}")
path.write_bytes(standalone_path.read_bytes())
assert self.server is not None
self.wallet_state_manager = await WalletStateManager.create(
private_key,
self.config,
path,
self.constants,
self.server,
self.root_path,
self,
)
assert self.wallet_state_manager is not None
self.config["starting_height"] = 0
if self.wallet_peers is None:
self.initialize_wallet_peers()
if self.state_changed_callback is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
self._shut_down = False
self._process_new_subscriptions_task = asyncio.create_task(self._process_new_subscriptions())
self.sync_event = asyncio.Event()
if fingerprint is None:
self.logged_in_fingerprint = private_key.get_g1().get_fingerprint()
else:
self.logged_in_fingerprint = fingerprint
self.logged_in = True
self.wallet_state_manager.set_sync_mode(False)
async with self.wallet_state_manager.puzzle_store.lock:
index = await self.wallet_state_manager.puzzle_store.get_last_derivation_path()
if index is None or index < self.config["initial_num_public_keys"] - 1:
await self.wallet_state_manager.create_more_puzzle_hashes(from_zero=True)
self.wsm_close_task = None
return True
def _close(self):
self.log.info("self._close")
self.logged_in_fingerprint = None
self._shut_down = True
if self._process_new_subscriptions_task is not None:
self._process_new_subscriptions_task.cancel()
if self._secondary_peer_sync_task is not None:
self._secondary_peer_sync_task.cancel()
async def _await_closed(self, shutting_down: bool = True):
self.log.info("self._await_closed")
if self.server is not None:
await self.server.close_all_connections()
if self.wallet_peers is not None:
await self.wallet_peers.ensure_is_closed()
if self.wallet_state_manager is not None:
await self.wallet_state_manager._await_closed()
self.wallet_state_manager = None
if shutting_down and self.keychain_proxy is not None:
proxy = self.keychain_proxy
self.keychain_proxy = None
await proxy.close()
await asyncio.sleep(0.5) # https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown
self.logged_in = False
self.wallet_peers = None
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
def _pending_tx_handler(self):
if self.wallet_state_manager is None:
return None
asyncio.create_task(self._resend_queue())
async def _action_messages(self) -> List[Message]:
if self.wallet_state_manager is None:
return []
actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions()
result: List[Message] = []
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
msg = make_msg(
ProtocolMessageTypes.request_puzzle_solution,
wallet_protocol.RequestPuzzleSolution(coin_name, height),
)
result.append(msg)
return result
async def _resend_queue(self):
if self._shut_down or self.server is None or self.wallet_state_manager is None:
return None
for msg, sent_peers in await self._messages_to_resend():
if self._shut_down or self.server is None or self.wallet_state_manager is None:
return None
full_nodes = self.server.get_full_node_connections()
for peer in full_nodes:
if peer.peer_node_id in sent_peers:
continue
self.log.debug(f"sending: {msg}")
await peer.send_message(msg)
for msg in await self._action_messages():
if self._shut_down or self.server is None or self.wallet_state_manager is None:
return None
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]:
if self.wallet_state_manager is None or self._shut_down:
return []
messages: List[Tuple[Message, Set[bytes32]]] = []
records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent()
for record in records:
if record.spend_bundle is None:
continue
msg = make_msg(
ProtocolMessageTypes.send_transaction,
wallet_protocol.SendTransaction(record.spend_bundle),
)
already_sent = set()
for peer, status, _ in record.sent_to:
if status == MempoolInclusionStatus.SUCCESS.value:
already_sent.add(bytes32.from_hexstr(peer))
messages.append((msg, already_sent))
return messages
async def _process_new_subscriptions(self):
while not self._shut_down:
# Here we process four types of messages in the queue, where the first one has higher priority (lower
# number in the queue), and priority decreases for each type.
peer: Optional[WSChiaConnection] = None
item: Optional[NewPeakItem] = None
try:
peer, item = None, None
item = await self.new_peak_queue.get()
self.log.debug("Pulled from queue: %s", item)
assert item is not None
if item.item_type == NewPeakQueueTypes.COIN_ID_SUBSCRIPTION:
# Subscriptions are the highest priority, because we don't want to process any more peaks or
# state updates until we are sure that we subscribed to everything that we need to. Otherwise,
# we might not be able to process some state.
coin_ids: List[bytes32] = item.data
for peer in self.server.get_full_node_connections():
coin_states: List[CoinState] = await subscribe_to_coin_updates(coin_ids, peer, uint32(0))
if len(coin_states) > 0:
async with self.wallet_state_manager.lock:
await self.receive_state_from_peer(coin_states, peer)
elif item.item_type == NewPeakQueueTypes.PUZZLE_HASH_SUBSCRIPTION:
puzzle_hashes: List[bytes32] = item.data
for peer in self.server.get_full_node_connections():
# Puzzle hash subscription
coin_states: List[CoinState] = await subscribe_to_phs(puzzle_hashes, peer, uint32(0))
if len(coin_states) > 0:
async with self.wallet_state_manager.lock:
await self.receive_state_from_peer(coin_states, peer)
elif item.item_type == NewPeakQueueTypes.FULL_NODE_STATE_UPDATED:
# Note: this can take a while when we have a lot of transactions. We want to process these
# before new_peaks, since new_peak_wallet requires that we first obtain the state for that peak.
request: wallet_protocol.CoinStateUpdate = item.data[0]
peer = item.data[1]
assert peer is not None
await self.state_update_received(request, peer)
elif item.item_type == NewPeakQueueTypes.NEW_PEAK_WALLET:
# This can take a VERY long time, because it might trigger a long sync. It is OK if we miss some
# subscriptions or state updates, since all subscriptions and state updates will be handled by
# long_sync (up to the target height).
request: wallet_protocol.NewPeakWallet = item.data[0]
peer = item.data[1]
assert peer is not None
await self.new_peak_wallet(request, peer)
else:
assert False
except CancelledError:
self.log.info("Queue task cancelled, exiting.")
raise
except Exception as e:
self.log.error(f"Exception handling {item}, {e} {traceback.format_exc()}")
if peer is not None:
await peer.close(9999)
def set_server(self, server: ChiaServer):
self.server = server
self.initialize_wallet_peers()
def initialize_wallet_peers(self):
self.server.on_connect = self.on_connect
network_name = self.config["selected_network"]
connect_to_unknown_peers = self.config.get("connect_to_unknown_peers", True)
testing = self.config.get("testing", False)
if self.wallet_peers is None and connect_to_unknown_peers and not testing:
self.wallet_peers = WalletPeers(
self.server,
self.config["target_peer_count"],
PeerStoreResolver(
self.root_path,
self.config,
selected_network=network_name,
peers_file_path_key="wallet_peers_file_path",
legacy_peer_db_path_key=WALLET_PEERS_PATH_KEY_DEPRECATED,
default_peers_file_path="wallet/db/wallet_peers.dat",
),
self.config["introducer_peer"],
self.config.get("dns_servers", ["dns-introducer.chia.net"]),
self.config["peer_connect_interval"],
network_name,
None,
self.log,
)
asyncio.create_task(self.wallet_peers.start())
def on_disconnect(self, peer: WSChiaConnection):
if self.is_trusted(peer):
self.local_node_synced = False
self.initialize_wallet_peers()
if peer.peer_node_id in self.untrusted_caches:
self.untrusted_caches.pop(peer.peer_node_id)
if peer.peer_node_id in self.synced_peers:
self.synced_peers.remove(peer.peer_node_id)
if peer.peer_node_id in self.node_peaks:
self.node_peaks.pop(peer.peer_node_id)
async def on_connect(self, peer: WSChiaConnection):
if self.wallet_state_manager is None:
return None
if Version(peer.protocol_version) < Version("0.0.33"):
self.log.info("Disconnecting, full node running old software")
await peer.close()
trusted = self.is_trusted(peer)
if not trusted and self.local_node_synced:
await peer.close()
if peer.peer_node_id in self.synced_peers:
self.synced_peers.remove(peer.peer_node_id)
self.log.info(f"Connected peer {peer.get_peer_info()} is trusted: {trusted}")
messages_peer_ids = await self._messages_to_resend()
self.wallet_state_manager.state_changed("add_connection")
for msg, peer_ids in messages_peer_ids:
if peer.peer_node_id in peer_ids:
continue
await peer.send_message(msg)
if self.wallet_peers is not None:
await self.wallet_peers.on_connect(peer)
async def perform_atomic_rollback(self, fork_height: int, cache: Optional[PeerRequestCache] = None):
assert self.wallet_state_manager is not None
self.log.info(f"perform_atomic_rollback to {fork_height}")
async with self.wallet_state_manager.db_wrapper.lock:
try:
await self.wallet_state_manager.db_wrapper.begin_transaction()
removed_wallet_ids = await self.wallet_state_manager.reorg_rollback(fork_height)
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(fork_height, True)
if cache is None:
self.rollback_request_caches(fork_height)
else:
cache.clear_after_height(fork_height)
await self.wallet_state_manager.db_wrapper.commit_transaction()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception while perform_atomic_rollback: {e} {tb}")
await self.wallet_state_manager.db_wrapper.rollback_transaction()
await self.wallet_state_manager.coin_store.rebuild_wallet_cache()
await self.wallet_state_manager.tx_store.rebuild_tx_cache()
await self.wallet_state_manager.pool_store.rebuild_cache()
raise
else:
await self.wallet_state_manager.blockchain.clean_block_records()
for wallet_id in removed_wallet_ids:
self.wallet_state_manager.wallets.pop(wallet_id)
async def long_sync(
self,
target_height: uint32,
full_node: WSChiaConnection,
fork_height: int,
*,
rollback: bool,
):
"""
Sync algorithm:
- Download and verify weight proof (if not trusted)
- Roll back anything after the fork point (if rollback=True)
- Subscribe to all puzzle_hashes over and over until there are no more updates
- Subscribe to all coin_ids over and over until there are no more updates
- rollback=False means that we are just double-checking with this peer to make sure we don't have any
missing transactions, so we don't need to rollback
"""
def is_new_state_update(cs: CoinState) -> bool:
if cs.spent_height is None and cs.created_height is None:
return True
if cs.spent_height is not None and cs.spent_height >= fork_height:
return True
if cs.created_height is not None and cs.created_height >= fork_height:
return True
return False
trusted: bool = self.is_trusted(full_node)
self.log.info(f"Starting sync trusted: {trusted} to peer {full_node.peer_host}")
assert self.wallet_state_manager is not None
start_time = time.time()
if rollback:
# we should clear all peers since this is a full rollback
await self.perform_atomic_rollback(fork_height)
await self.update_ui()
# We only process new state updates to avoid slow reprocessing. We set the sync height after adding
# Things, so we don't have to reprocess these later. There can be many things in ph_update_res.
already_checked_ph: Set[bytes32] = set()
continue_while: bool = True
all_puzzle_hashes: List[bytes32] = await self.get_puzzle_hashes_to_subscribe()
while continue_while:
# Get all phs from puzzle store
ph_chunks: Iterator[List[bytes32]] = chunks(all_puzzle_hashes, 1000)
for chunk in ph_chunks:
ph_update_res: List[CoinState] = await subscribe_to_phs(
[p for p in chunk if p not in already_checked_ph], full_node, 0
)
ph_update_res = list(filter(is_new_state_update, ph_update_res))
if not await self.receive_state_from_peer(ph_update_res, full_node, update_finished_height=True):
# If something goes wrong, abort sync
return
already_checked_ph.update(chunk)
# Check if new puzzle hashed have been created
await self.wallet_state_manager.create_more_puzzle_hashes()
all_puzzle_hashes = await self.get_puzzle_hashes_to_subscribe()
continue_while = False
for ph in all_puzzle_hashes:
if ph not in already_checked_ph:
continue_while = True
break
self.log.info(f"Successfully subscribed and updated {len(already_checked_ph)} puzzle hashes")
# The number of coin id updates are usually going to be significantly less than ph updates, so we can
# sync from 0 every time.
continue_while = True
all_coin_ids: List[bytes32] = await self.get_coin_ids_to_subscribe(0)
already_checked_coin_ids: Set[bytes32] = set()
while continue_while:
one_k_chunks = chunks(all_coin_ids, 1000)
for chunk in one_k_chunks:
c_update_res: List[CoinState] = await subscribe_to_coin_updates(chunk, full_node, 0)
if not await self.receive_state_from_peer(c_update_res, full_node):
# If something goes wrong, abort sync
return
already_checked_coin_ids.update(chunk)
all_coin_ids = await self.get_coin_ids_to_subscribe(0)
continue_while = False
for coin_id in all_coin_ids:
if coin_id not in already_checked_coin_ids:
continue_while = True
break
self.log.info(f"Successfully subscribed and updated {len(already_checked_coin_ids)} coin ids")
# Only update this fully when the entire sync has completed
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(target_height)
if trusted:
self.local_node_synced = True
self.wallet_state_manager.state_changed("new_block")
self.synced_peers.add(full_node.peer_node_id)
await self.update_ui()
end_time = time.time()
duration = end_time - start_time
self.log.info(f"Sync (trusted: {trusted}) duration was: {duration}")
async def receive_state_from_peer(
self,
items_input: List[CoinState],
peer: WSChiaConnection,
fork_height: Optional[uint32] = None,
height: Optional[uint32] = None,
header_hash: Optional[bytes32] = None,
update_finished_height: bool = False,
) -> bool:
# Adds the state to the wallet state manager. If the peer is trusted, we do not validate. If the peer is
# untrusted we do, but we might not add the state, since we need to receive the new_peak message as well.
if self.wallet_state_manager is None:
return False
trusted = self.is_trusted(peer)
# Validate states in parallel, apply serial
# TODO: optimize fetching
if self.validation_semaphore is None:
self.validation_semaphore = asyncio.Semaphore(6)
# Rollback is handled in wallet_short_sync_backtrack for untrusted peers, so we don't need to do it here.
# Also it's not safe to rollback, an untrusted peer can give us old fork point and make our TX dissapear.
# wallet_short_sync_backtrack can safely rollback because we validated the weight for the new peak so we
# know the peer is telling the truth about the reorg.
# If there is a fork, we need to ensure that we roll back in trusted mode to properly handle reorgs
cache: PeerRequestCache = self.get_cache_for_peer(peer)
if trusted and fork_height is not None and height is not None and fork_height != height - 1:
# only one peer told us to rollback so only clear for that peer
await self.perform_atomic_rollback(fork_height, cache=cache)
else:
if fork_height is not None:
# only one peer told us to rollback so only clear for that peer
cache.clear_after_height(fork_height)
self.log.info(f"clear_after_height {fork_height} for peer {peer}")
all_tasks: List[asyncio.Task] = []
target_concurrent_tasks: int = 20
concurrent_tasks_cs_heights: List[uint32] = []
# Ensure the list is sorted
items = sorted(items_input, key=last_change_height_cs)
async def receive_and_validate(inner_states: List[CoinState], inner_idx_start: int, cs_heights: List[uint32]):
assert self.wallet_state_manager is not None
try:
assert self.validation_semaphore is not None
async with self.validation_semaphore:
if header_hash is not None:
assert height is not None
for inner_state in inner_states:
self.add_state_to_race_cache(header_hash, height, inner_state)
self.log.info(f"Added to race cache: {height}, {inner_state}")
valid_states = [
inner_state
for inner_state in inner_states
if await self.validate_received_state_from_peer(inner_state, peer, cache, fork_height)
]
if len(valid_states) > 0:
async with self.wallet_state_manager.db_wrapper.lock:
self.log.info(
f"new coin state received ({inner_idx_start}-"
f"{inner_idx_start + len(inner_states) - 1}/ {len(items)})"
)
if self.wallet_state_manager is None:
return
try:
await self.wallet_state_manager.db_wrapper.begin_transaction()
await self.wallet_state_manager.new_coin_state(valid_states, peer, fork_height)
if update_finished_height:
if len(cs_heights) == 1:
# We have processed all past tasks, so we can increase the height safely
synced_up_to = last_change_height_cs(valid_states[-1]) - 1
else:
# We know we have processed everything before this min height
synced_up_to = min(cs_heights) - 1
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(
synced_up_to, in_transaction=True
)
await self.wallet_state_manager.db_wrapper.commit_transaction()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception while adding state: {e} {tb}")
await self.wallet_state_manager.db_wrapper.rollback_transaction()
await self.wallet_state_manager.coin_store.rebuild_wallet_cache()
await self.wallet_state_manager.tx_store.rebuild_tx_cache()
await self.wallet_state_manager.pool_store.rebuild_cache()
else:
await self.wallet_state_manager.blockchain.clean_block_records()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception while adding state: {e} {tb}")
finally:
cs_heights.remove(last_change_height_cs(inner_states[0]))
idx = 1
# Keep chunk size below 1000 just in case, windows has sqlite limits of 999 per query
# Untrusted has a smaller batch size since validation has to happen which takes a while
chunk_size: int = 900 if trusted else 10
for states in chunks(items, chunk_size):
if self.server is None:
self.log.error("No server")
await asyncio.gather(*all_tasks)
return False
if peer.peer_node_id not in self.server.all_connections:
self.log.error(f"Disconnected from peer {peer.peer_node_id} host {peer.peer_host}")
await asyncio.gather(*all_tasks)
return False
if trusted:
async with self.wallet_state_manager.db_wrapper.lock:
try:
self.log.info(f"new coin state received ({idx}-" f"{idx + len(states) - 1}/ {len(items)})")
await self.wallet_state_manager.db_wrapper.begin_transaction()
await self.wallet_state_manager.new_coin_state(states, peer, fork_height)
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(
last_change_height_cs(states[-1]) - 1, in_transaction=True
)
await self.wallet_state_manager.db_wrapper.commit_transaction()
except Exception as e:
await self.wallet_state_manager.db_wrapper.rollback_transaction()
await self.wallet_state_manager.coin_store.rebuild_wallet_cache()
await self.wallet_state_manager.tx_store.rebuild_tx_cache()
await self.wallet_state_manager.pool_store.rebuild_cache()
tb = traceback.format_exc()
self.log.error(f"Error adding states.. {e} {tb}")
return False
else:
await self.wallet_state_manager.blockchain.clean_block_records()
else:
while len(concurrent_tasks_cs_heights) >= target_concurrent_tasks:
await asyncio.sleep(0.1)
if self._shut_down:
self.log.info("Terminating receipt and validation due to shut down request")
await asyncio.gather(*all_tasks)
return False
concurrent_tasks_cs_heights.append(last_change_height_cs(states[0]))
all_tasks.append(asyncio.create_task(receive_and_validate(states, idx, concurrent_tasks_cs_heights)))
idx += len(states)
still_connected = self.server is not None and peer.peer_node_id in self.server.all_connections
await asyncio.gather(*all_tasks)
await self.update_ui()
return still_connected and self.server is not None and peer.peer_node_id in self.server.all_connections
async def get_coins_with_puzzle_hash(self, puzzle_hash) -> List[CoinState]:
assert self.wallet_state_manager is not None
assert self.server is not None
all_nodes = self.server.connection_by_type[NodeType.FULL_NODE]
if len(all_nodes.keys()) == 0:
raise ValueError("Not connected to the full node")
first_node = list(all_nodes.values())[0]
msg = wallet_protocol.RegisterForPhUpdates(puzzle_hash, uint32(0))
coin_state: Optional[RespondToPhUpdates] = await first_node.register_interest_in_puzzle_hash(msg)
assert coin_state is not None
return coin_state.coin_states
async def is_peer_synced(
self, peer: WSChiaConnection, header_block: HeaderBlock, request_time: uint64
) -> Optional[uint64]:
# Get last timestamp
last_tx: Optional[HeaderBlock] = await fetch_last_tx_from_peer(header_block.height, peer)
latest_timestamp: Optional[uint64] = None
if last_tx is not None:
assert last_tx.foliage_transaction_block is not None
latest_timestamp = last_tx.foliage_transaction_block.timestamp
# Return None if not synced
if latest_timestamp is None or self.config["testing"] is False and latest_timestamp < request_time - 600:
return None
return latest_timestamp
def is_trusted(self, peer) -> bool:
assert self.server is not None
return self.server.is_trusted_peer(peer, self.config["trusted_peers"])
def add_state_to_race_cache(self, header_hash: bytes32, height: uint32, coin_state: CoinState) -> None:
# Clears old state that is no longer relevant
delete_threshold = 100
for rc_height, rc_hh in self.race_cache_hashes:
if height - delete_threshold >= rc_height:
self.race_cache.pop(rc_hh)
self.race_cache_hashes = [
(rc_height, rc_hh) for rc_height, rc_hh in self.race_cache_hashes if height - delete_threshold < rc_height
]
if header_hash not in self.race_cache:
self.race_cache[header_hash] = set()
self.race_cache[header_hash].add(coin_state)
async def state_update_received(self, request: wallet_protocol.CoinStateUpdate, peer: WSChiaConnection) -> None:
# This gets called every time there is a new coin or puzzle hash change in the DB
# that is of interest to this wallet. It is not guaranteed to come for every height. This message is guaranteed
# to come before the corresponding new_peak for each height. We handle this differently for trusted and
# untrusted peers. For trusted, we always process the state, and we process reorgs as well.
assert self.wallet_state_manager is not None
assert self.server is not None
async with self.wallet_state_manager.lock:
await self.receive_state_from_peer(
request.items,
peer,
request.fork_height,
request.height,
request.peak_hash,
)
def get_full_node_peer(self) -> Optional[WSChiaConnection]:
if self.server is None:
return None
nodes = self.server.get_full_node_connections()
if len(nodes) > 0:
return random.choice(nodes)
else:
return None
async def disconnect_and_stop_wpeers(self) -> None:
if self.server is None:
return
# Close connection of non-trusted peers
if len(self.server.get_full_node_connections()) > 1:
for peer in self.server.get_full_node_connections():
if not self.is_trusted(peer):
await peer.close()
if self.wallet_peers is not None:
await self.wallet_peers.ensure_is_closed()
self.wallet_peers = None
async def check_for_synced_trusted_peer(self, header_block: HeaderBlock, request_time: uint64) -> bool:
if self.server is None:
return False
for peer in self.server.get_full_node_connections():
if self.is_trusted(peer) and await self.is_peer_synced(peer, header_block, request_time):
return True
return False
async def get_timestamp_for_height(self, height: uint32) -> uint64:
"""
Returns the timestamp for transaction block at h=height, if not transaction block, backtracks until it finds
a transaction block
"""
if height in self.height_to_time:
return self.height_to_time[height]
for cache in self.untrusted_caches.values():
cache_ts: Optional[uint64] = cache.get_height_timestamp(height)
if cache_ts is not None:
return cache_ts
peer: Optional[WSChiaConnection] = self.get_full_node_peer()
if peer is None:
raise ValueError("Cannot fetch timestamp, no peers")
self.log.debug(f"Fetching block at height: {height}")
last_tx_block: Optional[HeaderBlock] = await fetch_last_tx_from_peer(height, peer)
if last_tx_block is None:
raise ValueError(f"Error fetching blocks from peer {peer.get_peer_info()}")
assert last_tx_block.foliage_transaction_block is not None
self.get_cache_for_peer(peer).add_to_blocks(last_tx_block)
return last_tx_block.foliage_transaction_block.timestamp
async def new_peak_wallet(self, new_peak: wallet_protocol.NewPeakWallet, peer: WSChiaConnection):
if self.wallet_state_manager is None:
# When logging out of wallet
return
assert self.server is not None
request_time = uint64(int(time.time()))
trusted: bool = self.is_trusted(peer)
peak_hb: Optional[HeaderBlock] = await self.wallet_state_manager.blockchain.get_peak_block()
if peak_hb is not None and new_peak.weight < peak_hb.weight:
# Discards old blocks, but accepts blocks that are equal in weight to peak
return
request = wallet_protocol.RequestBlockHeader(new_peak.height)
response: Optional[RespondBlockHeader] = await peer.request_block_header(request)
if response is None:
self.log.warning(f"Peer {peer.get_peer_info()} did not respond in time.")
await peer.close(120)
return
header_block: HeaderBlock = response.header_block
latest_timestamp: Optional[uint64] = await self.is_peer_synced(peer, header_block, request_time)
if latest_timestamp is None:
if trusted:
self.log.debug(f"Trusted peer {peer.get_peer_info()} is not synced.")
return
else:
self.log.warning(f"Non-trusted peer {peer.get_peer_info()} is not synced, disconnecting")
await peer.close(120)
return
current_height: uint32 = await self.wallet_state_manager.blockchain.get_finished_sync_up_to()
if self.is_trusted(peer):
async with self.wallet_state_manager.lock:
await self.wallet_state_manager.blockchain.set_peak_block(header_block, latest_timestamp)
# Disconnect from all untrusted peers if our local node is trusted and synced
await self.disconnect_and_stop_wpeers()
# Sync to trusted node if we haven't done so yet. As long as we have synced once (and not
# disconnected), we assume that the full node will continue to give us state updates, so we do
# not need to resync.
if peer.peer_node_id not in self.synced_peers:
if new_peak.height - current_height > self.LONG_SYNC_THRESHOLD:
self.wallet_state_manager.set_sync_mode(True)
await self.long_sync(new_peak.height, peer, uint32(max(0, current_height - 256)), rollback=True)
self.wallet_state_manager.set_sync_mode(False)
else:
far_behind: bool = (
new_peak.height - self.wallet_state_manager.blockchain.get_peak_height() > self.LONG_SYNC_THRESHOLD
)
# check if claimed peak is heavier or same as our current peak
# if we haven't synced fully to this peer sync again
if (
peer.peer_node_id not in self.synced_peers or far_behind
) and new_peak.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
if await self.check_for_synced_trusted_peer(header_block, request_time):
self.wallet_state_manager.set_sync_mode(False)
self.log.info("Cancelling untrusted sync, we are connected to a trusted peer")
return
syncing = False
if far_behind or len(self.synced_peers) == 0:
syncing = True
self.wallet_state_manager.set_sync_mode(True)
try:
(
valid_weight_proof,
weight_proof,
summaries,
block_records,
) = await self.fetch_and_validate_the_weight_proof(peer, response.header_block)
if valid_weight_proof is False:
if syncing:
self.wallet_state_manager.set_sync_mode(False)
await peer.close()
return
if await self.check_for_synced_trusted_peer(header_block, request_time):
self.wallet_state_manager.set_sync_mode(False)
self.log.info("Cancelling untrusted sync, we are connected to a trusted peer")
return
assert weight_proof is not None
old_proof = self.wallet_state_manager.blockchain.synced_weight_proof
if syncing:
# This usually happens the first time we start up the wallet. We roll back slightly to be
# safe, but we don't want to rollback too much (hence 16)
fork_point: int = max(0, current_height - 16)
else:
# In this case we will not rollback so it's OK to check some older updates as well, to ensure
# that no recent transactions are being hidden.
fork_point = 0
if old_proof is not None:
# If the weight proof fork point is in the past, rollback more to ensure we don't have duplicate
# state.
wp_fork_point = self.wallet_state_manager.weight_proof_handler.get_fork_point(
old_proof, weight_proof
)
fork_point = min(fork_point, wp_fork_point)
await self.wallet_state_manager.blockchain.new_weight_proof(weight_proof, block_records)
if syncing:
async with self.wallet_state_manager.lock:
self.log.info("Primary peer syncing")
await self.long_sync(new_peak.height, peer, fork_point, rollback=True)
else:
if self._secondary_peer_sync_task is None or self._secondary_peer_sync_task.done():
self.log.info("Secondary peer syncing")
self._secondary_peer_sync_task = asyncio.create_task(
self.long_sync(new_peak.height, peer, fork_point, rollback=False)
)
return
else:
self.log.info("Will not do secondary sync, there is already another sync task running.")
return
self.log.info(f"New peak wallet.. {new_peak.height} {peer.get_peer_info()} 12")
if (
self.wallet_state_manager.blockchain.synced_weight_proof is None
or weight_proof.recent_chain_data[-1].weight
> self.wallet_state_manager.blockchain.synced_weight_proof.recent_chain_data[-1].weight
):
await self.wallet_state_manager.blockchain.new_weight_proof(weight_proof, block_records)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error syncing to {peer.get_peer_info()} {e} {tb}")
if syncing:
self.wallet_state_manager.set_sync_mode(False)
tb = traceback.format_exc()
self.log.error(f"Error syncing to {peer.get_peer_info()} {tb}")
await peer.close()
return
if syncing:
self.wallet_state_manager.set_sync_mode(False)
else:
# This is the (untrusted) case where we already synced and are not too far behind. Here we just
# fetch one by one.
async with self.wallet_state_manager.lock:
peak_hb = await self.wallet_state_manager.blockchain.get_peak_block()
if peak_hb is None or new_peak.weight > peak_hb.weight:
backtrack_fork_height: int = await self.wallet_short_sync_backtrack(header_block, peer)
else:
backtrack_fork_height = new_peak.height - 1
if peer.peer_node_id not in self.synced_peers:
# Edge case, this happens when the peak < WEIGHT_PROOF_RECENT_BLOCKS
# we still want to subscribe for all phs and coins.
# (Hints are not in filter)
all_coin_ids: List[bytes32] = await self.get_coin_ids_to_subscribe(uint32(0))
phs: List[bytes32] = await self.get_puzzle_hashes_to_subscribe()
ph_updates: List[CoinState] = await subscribe_to_phs(phs, peer, uint32(0))
coin_updates: List[CoinState] = await subscribe_to_coin_updates(all_coin_ids, peer, uint32(0))
peer_new_peak_height, peer_new_peak_hash = self.node_peaks[peer.peer_node_id]
success = await self.receive_state_from_peer(
ph_updates + coin_updates,
peer,
height=peer_new_peak_height,
header_hash=peer_new_peak_hash,
)
if success:
self.synced_peers.add(peer.peer_node_id)
else:
if peak_hb is not None and new_peak.weight <= peak_hb.weight:
# Don't process blocks at the same weight
return
# For every block, we need to apply the cache from race_cache
for potential_height in range(backtrack_fork_height + 1, new_peak.height + 1):
header_hash = self.wallet_state_manager.blockchain.height_to_hash(uint32(potential_height))
if header_hash in self.race_cache:
self.log.info(f"Receiving race state: {self.race_cache[header_hash]}")
await self.receive_state_from_peer(list(self.race_cache[header_hash]), peer)
self.wallet_state_manager.state_changed("new_block")
self.wallet_state_manager.set_sync_mode(False)
self.log.info(f"Finished processing new peak of {new_peak.height}")
if peer.peer_node_id in self.synced_peers:
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(new_peak.height)
await self.wallet_state_manager.new_peak(new_peak)
async def wallet_short_sync_backtrack(self, header_block: HeaderBlock, peer: WSChiaConnection) -> int:
assert self.wallet_state_manager is not None
peak: Optional[HeaderBlock] = await self.wallet_state_manager.blockchain.get_peak_block()
top = header_block
blocks = [top]
# Fetch blocks backwards until we hit the one that we have,
# then complete them with additions / removals going forward
fork_height = 0
if self.wallet_state_manager.blockchain.contains_block(header_block.prev_header_hash):
fork_height = header_block.height - 1
while not self.wallet_state_manager.blockchain.contains_block(top.prev_header_hash) and top.height > 0:
request_prev = wallet_protocol.RequestBlockHeader(top.height - 1)
response_prev: Optional[RespondBlockHeader] = await peer.request_block_header(request_prev)
if response_prev is None or not isinstance(response_prev, RespondBlockHeader):
raise RuntimeError("bad block header response from peer while syncing")
prev_head = response_prev.header_block
blocks.append(prev_head)
top = prev_head
fork_height = top.height - 1
blocks.reverse()
# Roll back coins and transactions
peak_height = self.wallet_state_manager.blockchain.get_peak_height()
if fork_height < peak_height:
self.log.info(f"Rolling back to {fork_height}")
# we should clear all peers since this is a full rollback
await self.perform_atomic_rollback(fork_height)
await self.update_ui()
if peak is not None:
assert header_block.weight >= peak.weight
for block in blocks:
# Set blockchain to the latest peak
res, err = await self.wallet_state_manager.blockchain.receive_block(block)
if res == ReceiveBlockResult.INVALID_BLOCK:
raise ValueError(err)
return fork_height
async def update_ui(self):
for wallet_id, wallet in self.wallet_state_manager.wallets.items():
self.wallet_state_manager.state_changed("coin_removed", wallet_id)
self.wallet_state_manager.state_changed("coin_added", wallet_id)
async def fetch_and_validate_the_weight_proof(
self, peer: WSChiaConnection, peak: HeaderBlock
) -> Tuple[bool, Optional[WeightProof], List[SubEpochSummary], List[BlockRecord]]:
assert self.wallet_state_manager is not None
assert self.wallet_state_manager.weight_proof_handler is not None
weight_request = RequestProofOfWeight(peak.height, peak.header_hash)
wp_timeout = self.config.get("weight_proof_timeout", 360)
self.log.debug(f"weight proof timeout is {wp_timeout} sec")
weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight(
weight_request, timeout=wp_timeout
)
if weight_proof_response is None:
return False, None, [], []
start_validation = time.time()
weight_proof = weight_proof_response.wp
if weight_proof.recent_chain_data[-1].reward_chain_block.height != peak.height:
return False, None, [], []
if weight_proof.recent_chain_data[-1].reward_chain_block.weight != peak.weight:
return False, None, [], []
if weight_proof.get_hash() in self.valid_wp_cache:
valid, fork_point, summaries, block_records = self.valid_wp_cache[weight_proof.get_hash()]
else:
start_validation = time.time()
(
valid,
fork_point,
summaries,
block_records,
) = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof(weight_proof)
if valid:
self.valid_wp_cache[weight_proof.get_hash()] = valid, fork_point, summaries, block_records
end_validation = time.time()
self.log.info(f"It took {end_validation - start_validation} time to validate the weight proof")
return valid, weight_proof, summaries, block_records
async def get_puzzle_hashes_to_subscribe(self) -> List[bytes32]:
assert self.wallet_state_manager is not None
all_puzzle_hashes = list(await self.wallet_state_manager.puzzle_store.get_all_puzzle_hashes())
# Get all phs from interested store
interested_puzzle_hashes = [
t[0] for t in await self.wallet_state_manager.interested_store.get_interested_puzzle_hashes()
]
all_puzzle_hashes.extend(interested_puzzle_hashes)
return all_puzzle_hashes
async def get_coin_ids_to_subscribe(self, min_height: int) -> List[bytes32]:
assert self.wallet_state_manager is not None
all_coins: Set[WalletCoinRecord] = await self.wallet_state_manager.coin_store.get_coins_to_check(min_height)
all_coin_names: Set[bytes32] = {coin_record.name() for coin_record in all_coins}
removed_dict = await self.wallet_state_manager.trade_manager.get_coins_of_interest()
all_coin_names.update(removed_dict.keys())
all_coin_names.update(await self.wallet_state_manager.interested_store.get_interested_coin_ids())
return list(all_coin_names)
async def validate_received_state_from_peer(
self,
coin_state: CoinState,
peer: WSChiaConnection,
peer_request_cache: PeerRequestCache,
fork_height: Optional[uint32],
) -> bool:
"""
Returns all state that is valid and included in the blockchain proved by the weight proof. If return_old_states
is False, only new states that are not in the coin_store are returned.
"""
assert self.wallet_state_manager is not None
# Only use the cache if we are talking about states before the fork point. If we are evaluating something
# in a reorg, we cannot use the cache, since we don't know if it's actually in the new chain after the reorg.
if await can_use_peer_request_cache(coin_state, peer_request_cache, fork_height):
return True
spent_height = coin_state.spent_height
confirmed_height = coin_state.created_height
current = await self.wallet_state_manager.coin_store.get_coin_record(coin_state.coin.name())
# if remote state is same as current local state we skip validation
# CoinRecord unspent = height 0, coin state = None. We adjust for comparison below
current_spent_height = None
if current is not None and current.spent_block_height != 0:
current_spent_height = current.spent_block_height
# Same as current state, nothing to do
if (
current is not None
and current_spent_height == spent_height
and current.confirmed_block_height == confirmed_height
):
peer_request_cache.add_to_states_validated(coin_state)
return True
reorg_mode = False
# If coin was removed from the blockchain
if confirmed_height is None:
if current is None:
# Coin does not exist in local DB, so no need to do anything
return False
# This coin got reorged
reorg_mode = True
confirmed_height = current.confirmed_block_height
# request header block for created height
state_block: Optional[HeaderBlock] = peer_request_cache.get_block(confirmed_height)
if state_block is None or reorg_mode:
request = RequestHeaderBlocks(confirmed_height, confirmed_height)
res = await peer.request_header_blocks(request)
if res is None:
return False
state_block = res.header_blocks[0]
assert state_block is not None
peer_request_cache.add_to_blocks(state_block)
# get proof of inclusion
assert state_block.foliage_transaction_block is not None
validate_additions_result = await request_and_validate_additions(
peer,
state_block.height,
state_block.header_hash,
coin_state.coin.puzzle_hash,
state_block.foliage_transaction_block.additions_root,
)
if validate_additions_result is False:
self.log.warning("Validate false 1")
await peer.close(9999)
return False
# If spent_height is None, we need to validate that the creation block is actually in the longest blockchain.
# Otherwise, we don't have to, since we will validate the spent block later.
if coin_state.spent_height is None:
validated = await self.validate_block_inclusion(state_block, peer, peer_request_cache)
if not validated:
return False
# TODO: make sure all cases are covered
if current is not None:
if spent_height is None and current.spent_block_height != 0:
# Peer is telling us that coin that was previously known to be spent is not spent anymore
# Check old state
request = RequestHeaderBlocks(current.spent_block_height, current.spent_block_height)
res = await peer.request_header_blocks(request)
spent_state_block = res.header_blocks[0]
assert spent_state_block.height == current.spent_block_height
assert spent_state_block.foliage_transaction_block is not None
peer_request_cache.add_to_blocks(spent_state_block)
validate_removals_result: bool = await request_and_validate_removals(
peer,
current.spent_block_height,
spent_state_block.header_hash,
coin_state.coin.name(),
spent_state_block.foliage_transaction_block.removals_root,
)
if validate_removals_result is False:
self.log.warning("Validate false 2")
await peer.close(9999)
return False
validated = await self.validate_block_inclusion(spent_state_block, peer, peer_request_cache)
if not validated:
return False
if spent_height is not None:
# request header block for created height
spent_state_block = peer_request_cache.get_block(spent_height)
if spent_state_block is None:
request = RequestHeaderBlocks(spent_height, spent_height)
res = await peer.request_header_blocks(request)
spent_state_block = res.header_blocks[0]
assert spent_state_block.height == spent_height
assert spent_state_block.foliage_transaction_block is not None
peer_request_cache.add_to_blocks(spent_state_block)
assert spent_state_block is not None
validate_removals_result = await request_and_validate_removals(
peer,
spent_state_block.height,
spent_state_block.header_hash,
coin_state.coin.name(),
spent_state_block.foliage_transaction_block.removals_root,
)
if validate_removals_result is False:
self.log.warning("Validate false 3")
await peer.close(9999)
return False
validated = await self.validate_block_inclusion(spent_state_block, peer, peer_request_cache)
if not validated:
return False
peer_request_cache.add_to_states_validated(coin_state)
return True
async def validate_block_inclusion(
self, block: HeaderBlock, peer: WSChiaConnection, peer_request_cache: PeerRequestCache
) -> bool:
assert self.wallet_state_manager is not None
assert self.server is not None
if self.wallet_state_manager.blockchain.contains_height(block.height):
stored_hash = self.wallet_state_manager.blockchain.height_to_hash(block.height)
stored_record = self.wallet_state_manager.blockchain.try_block_record(stored_hash)
if stored_record is not None:
if stored_record.header_hash == block.header_hash:
return True
weight_proof: Optional[WeightProof] = self.wallet_state_manager.blockchain.synced_weight_proof
if weight_proof is None:
return False
if block.height >= weight_proof.recent_chain_data[0].height:
# this was already validated as part of the wp validation
index = block.height - weight_proof.recent_chain_data[0].height
if index >= len(weight_proof.recent_chain_data):
return False
if weight_proof.recent_chain_data[index].header_hash != block.header_hash:
self.log.error("Failed validation 1")
return False
return True
else:
start = block.height + 1
compare_to_recent = False
current_ses: Optional[SubEpochData] = None
inserted: Optional[SubEpochData] = None
first_height_recent = weight_proof.recent_chain_data[0].height
if start > first_height_recent - 1000:
compare_to_recent = True
end = first_height_recent
else:
if block.height < self.constants.SUB_EPOCH_BLOCKS:
inserted = weight_proof.sub_epochs[1]
end = self.constants.SUB_EPOCH_BLOCKS + inserted.num_blocks_overflow
else:
request = RequestSESInfo(block.height, block.height + 32)
res_ses: Optional[RespondSESInfo] = peer_request_cache.get_ses_request(block.height)
if res_ses is None:
res_ses = await peer.request_ses_hashes(request)
peer_request_cache.add_to_ses_requests(block.height, res_ses)
assert res_ses is not None
ses_0 = res_ses.reward_chain_hash[0]
last_height = res_ses.heights[0][-1] # Last height in sub epoch
end = last_height
for idx, ses in enumerate(weight_proof.sub_epochs):
if idx > len(weight_proof.sub_epochs) - 3:
break
if ses.reward_chain_hash == ses_0:
current_ses = ses
inserted = weight_proof.sub_epochs[idx + 2]
break
if current_ses is None:
self.log.error("Failed validation 2")
return False
all_peers = self.server.get_full_node_connections()
blocks: Optional[List[HeaderBlock]] = await fetch_header_blocks_in_range(
start, end, peer_request_cache, all_peers
)
if blocks is None:
self.log.error(f"Error fetching blocks {start} {end}")
return False
if compare_to_recent and weight_proof.recent_chain_data[0].header_hash != blocks[-1].header_hash:
self.log.error("Failed validation 3")
return False
reversed_blocks = blocks.copy()
reversed_blocks.reverse()
if not compare_to_recent:
last = reversed_blocks[0].finished_sub_slots[-1].reward_chain.get_hash()
if inserted is None or last != inserted.reward_chain_hash:
self.log.error("Failed validation 4")
return False
for idx, en_block in enumerate(reversed_blocks):
if idx == len(reversed_blocks) - 1:
next_block_rc_hash = block.reward_chain_block.get_hash()
prev_hash = block.header_hash
else:
next_block_rc_hash = reversed_blocks[idx + 1].reward_chain_block.get_hash()
prev_hash = reversed_blocks[idx + 1].header_hash
if not en_block.prev_header_hash == prev_hash:
self.log.error("Failed validation 5")
return False
if len(en_block.finished_sub_slots) > 0:
# What to do here
reversed_slots = en_block.finished_sub_slots.copy()
reversed_slots.reverse()
for slot_idx, slot in enumerate(reversed_slots[:-1]):
hash_val = reversed_slots[slot_idx + 1].reward_chain.get_hash()
if not hash_val == slot.reward_chain.end_of_slot_vdf.challenge:
self.log.error("Failed validation 6")
return False
if not next_block_rc_hash == reversed_slots[-1].reward_chain.end_of_slot_vdf.challenge:
self.log.error("Failed validation 7")
return False
else:
if not next_block_rc_hash == en_block.reward_chain_block.reward_chain_ip_vdf.challenge:
self.log.error("Failed validation 8")
return False
if idx > len(reversed_blocks) - 50:
if not AugSchemeMPL.verify(
en_block.reward_chain_block.proof_of_space.plot_public_key,
en_block.foliage.foliage_block_data.get_hash(),
en_block.foliage.foliage_block_data_signature,
):
self.log.error("Failed validation 9")
return False
return True
async def fetch_puzzle_solution(self, peer: WSChiaConnection, height: uint32, coin: Coin) -> CoinSpend:
solution_response = await peer.request_puzzle_solution(
wallet_protocol.RequestPuzzleSolution(coin.name(), height)
)
if solution_response is None or not isinstance(solution_response, wallet_protocol.RespondPuzzleSolution):
raise ValueError(f"Was not able to obtain solution {solution_response}")
assert solution_response.response.puzzle.get_tree_hash() == coin.puzzle_hash
assert solution_response.response.coin_name == coin.name()
return CoinSpend(
coin,
solution_response.response.puzzle.to_serialized_program(),
solution_response.response.solution.to_serialized_program(),
)
async def get_coin_state(
self, coin_names: List[bytes32], fork_height: Optional[uint32] = None, peer: Optional[WSChiaConnection] = None
) -> List[CoinState]:
assert self.server is not None
all_nodes = self.server.connection_by_type[NodeType.FULL_NODE]
if len(all_nodes.keys()) == 0:
raise ValueError("Not connected to the full node")
# Use supplied if provided, prioritize trusted otherwise
if peer is None:
for node in list(all_nodes.values()):
if self.is_trusted(node):
peer = node
break
if peer is None:
peer = list(all_nodes.values())[0]
assert peer is not None
msg = wallet_protocol.RegisterForCoinUpdates(coin_names, uint32(0))
coin_state: Optional[RespondToCoinUpdates] = await peer.register_interest_in_coin(msg)
assert coin_state is not None
if not self.is_trusted(peer):
valid_list = []
for coin in coin_state.coin_states:
valid = await self.validate_received_state_from_peer(
coin, peer, self.get_cache_for_peer(peer), fork_height
)
if valid:
valid_list.append(coin)
return valid_list
return coin_state.coin_states
async def fetch_children(
self, peer: WSChiaConnection, coin_name: bytes32, fork_height: Optional[uint32] = None
) -> List[CoinState]:
response: Optional[wallet_protocol.RespondChildren] = await peer.request_children(
wallet_protocol.RequestChildren(coin_name)
)
if response is None or not isinstance(response, wallet_protocol.RespondChildren):
raise ValueError(f"Was not able to obtain children {response}")
if not self.is_trusted(peer):
request_cache = self.get_cache_for_peer(peer)
validated = []
for state in response.coin_states:
valid = await self.validate_received_state_from_peer(state, peer, request_cache, fork_height)
if valid:
validated.append(state)
return validated
return response.coin_states
# For RPC only. You should use wallet_state_manager.add_pending_transaction for normal wallet business.
async def push_tx(self, spend_bundle):
msg = make_msg(
ProtocolMessageTypes.send_transaction,
wallet_protocol.SendTransaction(spend_bundle),
)
full_nodes = self.server.get_full_node_connections()
for peer in full_nodes:
await peer.send_message(msg)
| import asyncio
import json
import logging
import random
import time
import traceback
from asyncio import CancelledError
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple
from blspy import AugSchemeMPL, PrivateKey
from packaging.version import Version
from chia.consensus.block_record import BlockRecord
from chia.consensus.blockchain import ReceiveBlockResult
from chia.consensus.constants import ConsensusConstants
from chia.daemon.keychain_proxy import (
KeychainProxy,
KeychainProxyConnectionFailure,
KeyringIsEmpty,
connect_to_keychain_and_validate,
wrap_local_keychain,
)
from chia.protocols import wallet_protocol
from chia.protocols.full_node_protocol import RequestProofOfWeight, RespondProofOfWeight
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import (
CoinState,
RequestHeaderBlocks,
RequestSESInfo,
RespondBlockHeader,
RespondSESInfo,
RespondToCoinUpdates,
RespondToPhUpdates,
)
from chia.server.node_discovery import WalletPeers
from chia.server.outbound_message import Message, NodeType, make_msg
from chia.server.peer_store_resolver import PeerStoreResolver
from chia.server.server import ChiaServer
from chia.server.ws_connection import WSChiaConnection
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.coin_spend import CoinSpend
from chia.types.header_block import HeaderBlock
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.peer_info import PeerInfo
from chia.types.weight_proof import SubEpochData, WeightProof
from chia.util.byte_types import hexstr_to_bytes
from chia.util.chunks import chunks
from chia.util.config import WALLET_PEERS_PATH_KEY_DEPRECATED
from chia.util.default_root import STANDALONE_ROOT_PATH
from chia.util.ints import uint32, uint64
from chia.util.keychain import Keychain, KeyringIsLocked
from chia.util.path import mkdir, path_from_root
from chia.util.profiler import profile_task
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.util.new_peak_queue import NewPeakItem, NewPeakQueue, NewPeakQueueTypes
from chia.wallet.util.peer_request_cache import PeerRequestCache, can_use_peer_request_cache
from chia.wallet.util.wallet_sync_utils import (
fetch_header_blocks_in_range,
fetch_last_tx_from_peer,
last_change_height_cs,
request_and_validate_additions,
request_and_validate_removals,
subscribe_to_coin_updates,
subscribe_to_phs,
)
from chia.wallet.wallet_action import WalletAction
from chia.wallet.wallet_coin_record import WalletCoinRecord
from chia.wallet.wallet_state_manager import WalletStateManager
class WalletNode:
key_config: Dict
config: Dict
constants: ConsensusConstants
server: Optional[ChiaServer]
log: logging.Logger
# Maintains the state of the wallet (blockchain and transactions), handles DB connections
wallet_state_manager: Optional[WalletStateManager]
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
syncing: bool
full_node_peer: Optional[PeerInfo]
peer_task: Optional[asyncio.Task]
logged_in: bool
wallet_peers_initialized: bool
keychain_proxy: Optional[KeychainProxy]
wallet_peers: Optional[WalletPeers]
race_cache: Dict[bytes32, Set[CoinState]]
race_cache_hashes: List[Tuple[uint32, bytes32]]
new_peak_queue: NewPeakQueue
_process_new_subscriptions_task: Optional[asyncio.Task]
_secondary_peer_sync_task: Optional[asyncio.Task]
node_peaks: Dict[bytes32, Tuple[uint32, bytes32]]
validation_semaphore: Optional[asyncio.Semaphore]
local_node_synced: bool
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
local_keychain: Optional[Keychain] = None,
):
self.config = config
self.constants = consensus_constants
self.root_path = root_path
self.log = logging.getLogger(name if name else __name__)
# Normal operation data
self.cached_blocks: Dict = {}
self.future_block_hashes: Dict = {}
# Sync data
self._shut_down = False
self.proof_hashes: List = []
self.state_changed_callback = None
self.wallet_state_manager = None
self.server = None
self.wsm_close_task = None
self.sync_task: Optional[asyncio.Task] = None
self.logged_in_fingerprint: Optional[int] = None
self.peer_task = None
self.logged_in = False
self.keychain_proxy = None
self.local_keychain = local_keychain
self.height_to_time: Dict[uint32, uint64] = {}
self.synced_peers: Set[bytes32] = set() # Peers that we have long synced to
self.wallet_peers = None
self.wallet_peers_initialized = False
self.valid_wp_cache: Dict[bytes32, Any] = {}
self.untrusted_caches: Dict[bytes32, PeerRequestCache] = {}
self.race_cache = {} # in Untrusted mode wallet might get the state update before receiving the block
self.race_cache_hashes = []
self._process_new_subscriptions_task = None
self._secondary_peer_sync_task = None
self.node_peaks = {}
self.validation_semaphore = None
self.local_node_synced = False
self.LONG_SYNC_THRESHOLD = 200
async def ensure_keychain_proxy(self) -> KeychainProxy:
if self.keychain_proxy is None:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self.root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure("Failed to connect to keychain service")
return self.keychain_proxy
def get_cache_for_peer(self, peer) -> PeerRequestCache:
if peer.peer_node_id not in self.untrusted_caches:
self.untrusted_caches[peer.peer_node_id] = PeerRequestCache()
return self.untrusted_caches[peer.peer_node_id]
def rollback_request_caches(self, reorg_height: int):
# Everything after reorg_height should be removed from the cache
for cache in self.untrusted_caches.values():
cache.clear_after_height(reorg_height)
async def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]:
try:
keychain_proxy = await self.ensure_keychain_proxy()
key = await keychain_proxy.get_key_for_fingerprint(fingerprint)
except KeyringIsEmpty:
self.log.warning("No keys present. Create keys with the UI, or with the 'chia keys' program.")
return None
except KeyringIsLocked:
self.log.warning("Keyring is locked")
return None
except KeychainProxyConnectionFailure as e:
tb = traceback.format_exc()
self.log.error(f"Missing keychain_proxy: {e} {tb}")
raise e # Re-raise so that the caller can decide whether to continue or abort
return key
async def _start(
self,
fingerprint: Optional[int] = None,
) -> bool:
# Makes sure the coin_state_updates get higher priority than new_peak messages
self.new_peak_queue = NewPeakQueue(asyncio.PriorityQueue())
self.synced_peers = set()
private_key = await self.get_key_for_fingerprint(fingerprint)
if private_key is None:
self.logged_in = False
return False
if self.config.get("enable_profiler", False):
asyncio.create_task(profile_task(self.root_path, "wallet", self.log))
db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
db_path_replaced: str = (
self.config["database_path"]
.replace("CHALLENGE", self.config["selected_network"])
.replace("KEY", db_path_key_suffix)
)
path = path_from_root(self.root_path, db_path_replaced.replace("v1", "v2"))
mkdir(path.parent)
standalone_path = path_from_root(STANDALONE_ROOT_PATH, f"{db_path_replaced.replace('v2', 'v1')}_new")
if not path.exists():
if standalone_path.exists():
self.log.info(f"Copying wallet db from {standalone_path} to {path}")
path.write_bytes(standalone_path.read_bytes())
assert self.server is not None
self.wallet_state_manager = await WalletStateManager.create(
private_key,
self.config,
path,
self.constants,
self.server,
self.root_path,
self,
)
assert self.wallet_state_manager is not None
self.config["starting_height"] = 0
if self.wallet_peers is None:
self.initialize_wallet_peers()
if self.state_changed_callback is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
self._shut_down = False
self._process_new_subscriptions_task = asyncio.create_task(self._process_new_subscriptions())
self.sync_event = asyncio.Event()
if fingerprint is None:
self.logged_in_fingerprint = private_key.get_g1().get_fingerprint()
else:
self.logged_in_fingerprint = fingerprint
self.logged_in = True
self.wallet_state_manager.set_sync_mode(False)
async with self.wallet_state_manager.puzzle_store.lock:
index = await self.wallet_state_manager.puzzle_store.get_last_derivation_path()
if index is None or index < self.config["initial_num_public_keys"] - 1:
await self.wallet_state_manager.create_more_puzzle_hashes(from_zero=True)
self.wsm_close_task = None
return True
def _close(self):
self.log.info("self._close")
self.logged_in_fingerprint = None
self._shut_down = True
if self._process_new_subscriptions_task is not None:
self._process_new_subscriptions_task.cancel()
if self._secondary_peer_sync_task is not None:
self._secondary_peer_sync_task.cancel()
async def _await_closed(self, shutting_down: bool = True):
self.log.info("self._await_closed")
if self.server is not None:
await self.server.close_all_connections()
if self.wallet_peers is not None:
await self.wallet_peers.ensure_is_closed()
if self.wallet_state_manager is not None:
await self.wallet_state_manager._await_closed()
self.wallet_state_manager = None
if shutting_down and self.keychain_proxy is not None:
proxy = self.keychain_proxy
self.keychain_proxy = None
await proxy.close()
await asyncio.sleep(0.5) # https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown
self.logged_in = False
self.wallet_peers = None
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
def _pending_tx_handler(self):
if self.wallet_state_manager is None:
return None
asyncio.create_task(self._resend_queue())
async def _action_messages(self) -> List[Message]:
if self.wallet_state_manager is None:
return []
actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions()
result: List[Message] = []
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
msg = make_msg(
ProtocolMessageTypes.request_puzzle_solution,
wallet_protocol.RequestPuzzleSolution(coin_name, height),
)
result.append(msg)
return result
async def _resend_queue(self):
if self._shut_down or self.server is None or self.wallet_state_manager is None:
return None
for msg, sent_peers in await self._messages_to_resend():
if self._shut_down or self.server is None or self.wallet_state_manager is None:
return None
full_nodes = self.server.get_full_node_connections()
for peer in full_nodes:
if peer.peer_node_id in sent_peers:
continue
self.log.debug(f"sending: {msg}")
await peer.send_message(msg)
for msg in await self._action_messages():
if self._shut_down or self.server is None or self.wallet_state_manager is None:
return None
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]:
if self.wallet_state_manager is None or self._shut_down:
return []
messages: List[Tuple[Message, Set[bytes32]]] = []
records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent()
for record in records:
if record.spend_bundle is None:
continue
msg = make_msg(
ProtocolMessageTypes.send_transaction,
wallet_protocol.SendTransaction(record.spend_bundle),
)
already_sent = set()
for peer, status, _ in record.sent_to:
if status == MempoolInclusionStatus.SUCCESS.value:
already_sent.add(bytes32.from_hexstr(peer))
messages.append((msg, already_sent))
return messages
async def _process_new_subscriptions(self):
while not self._shut_down:
# Here we process four types of messages in the queue, where the first one has higher priority (lower
# number in the queue), and priority decreases for each type.
peer: Optional[WSChiaConnection] = None
item: Optional[NewPeakItem] = None
try:
peer, item = None, None
item = await self.new_peak_queue.get()
self.log.debug("Pulled from queue: %s", item)
assert item is not None
if item.item_type == NewPeakQueueTypes.COIN_ID_SUBSCRIPTION:
# Subscriptions are the highest priority, because we don't want to process any more peaks or
# state updates until we are sure that we subscribed to everything that we need to. Otherwise,
# we might not be able to process some state.
coin_ids: List[bytes32] = item.data
for peer in self.server.get_full_node_connections():
coin_states: List[CoinState] = await subscribe_to_coin_updates(coin_ids, peer, uint32(0))
if len(coin_states) > 0:
async with self.wallet_state_manager.lock:
await self.receive_state_from_peer(coin_states, peer)
elif item.item_type == NewPeakQueueTypes.PUZZLE_HASH_SUBSCRIPTION:
puzzle_hashes: List[bytes32] = item.data
for peer in self.server.get_full_node_connections():
# Puzzle hash subscription
coin_states: List[CoinState] = await subscribe_to_phs(puzzle_hashes, peer, uint32(0))
if len(coin_states) > 0:
async with self.wallet_state_manager.lock:
await self.receive_state_from_peer(coin_states, peer)
elif item.item_type == NewPeakQueueTypes.FULL_NODE_STATE_UPDATED:
# Note: this can take a while when we have a lot of transactions. We want to process these
# before new_peaks, since new_peak_wallet requires that we first obtain the state for that peak.
request: wallet_protocol.CoinStateUpdate = item.data[0]
peer = item.data[1]
assert peer is not None
await self.state_update_received(request, peer)
elif item.item_type == NewPeakQueueTypes.NEW_PEAK_WALLET:
# This can take a VERY long time, because it might trigger a long sync. It is OK if we miss some
# subscriptions or state updates, since all subscriptions and state updates will be handled by
# long_sync (up to the target height).
request: wallet_protocol.NewPeakWallet = item.data[0]
peer = item.data[1]
assert peer is not None
await self.new_peak_wallet(request, peer)
else:
assert False
except CancelledError:
self.log.info("Queue task cancelled, exiting.")
raise
except Exception as e:
self.log.error(f"Exception handling {item}, {e} {traceback.format_exc()}")
if peer is not None:
await peer.close(9999)
def set_server(self, server: ChiaServer):
self.server = server
self.initialize_wallet_peers()
def initialize_wallet_peers(self):
self.server.on_connect = self.on_connect
network_name = self.config["selected_network"]
connect_to_unknown_peers = self.config.get("connect_to_unknown_peers", True)
testing = self.config.get("testing", False)
if self.wallet_peers is None and connect_to_unknown_peers and not testing:
self.wallet_peers = WalletPeers(
self.server,
self.config["target_peer_count"],
PeerStoreResolver(
self.root_path,
self.config,
selected_network=network_name,
peers_file_path_key="wallet_peers_file_path",
legacy_peer_db_path_key=WALLET_PEERS_PATH_KEY_DEPRECATED,
default_peers_file_path="wallet/db/wallet_peers.dat",
),
self.config["introducer_peer"],
self.config.get("dns_servers", ["dns-introducer.chia.net"]),
self.config["peer_connect_interval"],
network_name,
None,
self.log,
)
asyncio.create_task(self.wallet_peers.start())
def on_disconnect(self, peer: WSChiaConnection):
if self.is_trusted(peer):
self.local_node_synced = False
self.initialize_wallet_peers()
if peer.peer_node_id in self.untrusted_caches:
self.untrusted_caches.pop(peer.peer_node_id)
if peer.peer_node_id in self.synced_peers:
self.synced_peers.remove(peer.peer_node_id)
if peer.peer_node_id in self.node_peaks:
self.node_peaks.pop(peer.peer_node_id)
async def on_connect(self, peer: WSChiaConnection):
if self.wallet_state_manager is None:
return None
if Version(peer.protocol_version) < Version("0.0.33"):
self.log.info("Disconnecting, full node running old software")
await peer.close()
trusted = self.is_trusted(peer)
if not trusted and self.local_node_synced:
await peer.close()
if peer.peer_node_id in self.synced_peers:
self.synced_peers.remove(peer.peer_node_id)
self.log.info(f"Connected peer {peer.get_peer_info()} is trusted: {trusted}")
messages_peer_ids = await self._messages_to_resend()
self.wallet_state_manager.state_changed("add_connection")
for msg, peer_ids in messages_peer_ids:
if peer.peer_node_id in peer_ids:
continue
await peer.send_message(msg)
if self.wallet_peers is not None:
await self.wallet_peers.on_connect(peer)
async def perform_atomic_rollback(self, fork_height: int, cache: Optional[PeerRequestCache] = None):
assert self.wallet_state_manager is not None
self.log.info(f"perform_atomic_rollback to {fork_height}")
async with self.wallet_state_manager.db_wrapper.lock:
try:
await self.wallet_state_manager.db_wrapper.begin_transaction()
removed_wallet_ids = await self.wallet_state_manager.reorg_rollback(fork_height)
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(fork_height, True)
if cache is None:
self.rollback_request_caches(fork_height)
else:
cache.clear_after_height(fork_height)
await self.wallet_state_manager.db_wrapper.commit_transaction()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception while perform_atomic_rollback: {e} {tb}")
await self.wallet_state_manager.db_wrapper.rollback_transaction()
await self.wallet_state_manager.coin_store.rebuild_wallet_cache()
await self.wallet_state_manager.tx_store.rebuild_tx_cache()
await self.wallet_state_manager.pool_store.rebuild_cache()
raise
else:
await self.wallet_state_manager.blockchain.clean_block_records()
for wallet_id in removed_wallet_ids:
self.wallet_state_manager.wallets.pop(wallet_id)
async def long_sync(
self,
target_height: uint32,
full_node: WSChiaConnection,
fork_height: int,
*,
rollback: bool,
):
"""
Sync algorithm:
- Download and verify weight proof (if not trusted)
- Roll back anything after the fork point (if rollback=True)
- Subscribe to all puzzle_hashes over and over until there are no more updates
- Subscribe to all coin_ids over and over until there are no more updates
- rollback=False means that we are just double-checking with this peer to make sure we don't have any
missing transactions, so we don't need to rollback
"""
def is_new_state_update(cs: CoinState) -> bool:
if cs.spent_height is None and cs.created_height is None:
return True
if cs.spent_height is not None and cs.spent_height >= fork_height:
return True
if cs.created_height is not None and cs.created_height >= fork_height:
return True
return False
trusted: bool = self.is_trusted(full_node)
self.log.info(f"Starting sync trusted: {trusted} to peer {full_node.peer_host}")
assert self.wallet_state_manager is not None
start_time = time.time()
if rollback:
# we should clear all peers since this is a full rollback
await self.perform_atomic_rollback(fork_height)
await self.update_ui()
# We only process new state updates to avoid slow reprocessing. We set the sync height after adding
# Things, so we don't have to reprocess these later. There can be many things in ph_update_res.
already_checked_ph: Set[bytes32] = set()
continue_while: bool = True
all_puzzle_hashes: List[bytes32] = await self.get_puzzle_hashes_to_subscribe()
while continue_while:
# Get all phs from puzzle store
ph_chunks: Iterator[List[bytes32]] = chunks(all_puzzle_hashes, 1000)
for chunk in ph_chunks:
ph_update_res: List[CoinState] = await subscribe_to_phs(
[p for p in chunk if p not in already_checked_ph], full_node, 0
)
ph_update_res = list(filter(is_new_state_update, ph_update_res))
if not await self.receive_state_from_peer(ph_update_res, full_node, update_finished_height=True):
# If something goes wrong, abort sync
return
already_checked_ph.update(chunk)
# Check if new puzzle hashed have been created
await self.wallet_state_manager.create_more_puzzle_hashes()
all_puzzle_hashes = await self.get_puzzle_hashes_to_subscribe()
continue_while = False
for ph in all_puzzle_hashes:
if ph not in already_checked_ph:
continue_while = True
break
self.log.info(f"Successfully subscribed and updated {len(already_checked_ph)} puzzle hashes")
# The number of coin id updates are usually going to be significantly less than ph updates, so we can
# sync from 0 every time.
continue_while = True
all_coin_ids: List[bytes32] = await self.get_coin_ids_to_subscribe(0)
already_checked_coin_ids: Set[bytes32] = set()
while continue_while:
one_k_chunks = chunks(all_coin_ids, 1000)
for chunk in one_k_chunks:
c_update_res: List[CoinState] = await subscribe_to_coin_updates(chunk, full_node, 0)
if not await self.receive_state_from_peer(c_update_res, full_node):
# If something goes wrong, abort sync
return
already_checked_coin_ids.update(chunk)
all_coin_ids = await self.get_coin_ids_to_subscribe(0)
continue_while = False
for coin_id in all_coin_ids:
if coin_id not in already_checked_coin_ids:
continue_while = True
break
self.log.info(f"Successfully subscribed and updated {len(already_checked_coin_ids)} coin ids")
# Only update this fully when the entire sync has completed
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(target_height)
if trusted:
self.local_node_synced = True
self.wallet_state_manager.state_changed("new_block")
self.synced_peers.add(full_node.peer_node_id)
await self.update_ui()
end_time = time.time()
duration = end_time - start_time
self.log.info(f"Sync (trusted: {trusted}) duration was: {duration}")
async def receive_state_from_peer(
self,
items_input: List[CoinState],
peer: WSChiaConnection,
fork_height: Optional[uint32] = None,
height: Optional[uint32] = None,
header_hash: Optional[bytes32] = None,
update_finished_height: bool = False,
) -> bool:
# Adds the state to the wallet state manager. If the peer is trusted, we do not validate. If the peer is
# untrusted we do, but we might not add the state, since we need to receive the new_peak message as well.
if self.wallet_state_manager is None:
return False
trusted = self.is_trusted(peer)
# Validate states in parallel, apply serial
# TODO: optimize fetching
if self.validation_semaphore is None:
self.validation_semaphore = asyncio.Semaphore(6)
# Rollback is handled in wallet_short_sync_backtrack for untrusted peers, so we don't need to do it here.
# Also it's not safe to rollback, an untrusted peer can give us old fork point and make our TX dissapear.
# wallet_short_sync_backtrack can safely rollback because we validated the weight for the new peak so we
# know the peer is telling the truth about the reorg.
# If there is a fork, we need to ensure that we roll back in trusted mode to properly handle reorgs
cache: PeerRequestCache = self.get_cache_for_peer(peer)
if trusted and fork_height is not None and height is not None and fork_height != height - 1:
# only one peer told us to rollback so only clear for that peer
await self.perform_atomic_rollback(fork_height, cache=cache)
else:
if fork_height is not None:
# only one peer told us to rollback so only clear for that peer
cache.clear_after_height(fork_height)
self.log.info(f"clear_after_height {fork_height} for peer {peer}")
all_tasks: List[asyncio.Task] = []
target_concurrent_tasks: int = 20
concurrent_tasks_cs_heights: List[uint32] = []
# Ensure the list is sorted
items = sorted(items_input, key=last_change_height_cs)
async def receive_and_validate(inner_states: List[CoinState], inner_idx_start: int, cs_heights: List[uint32]):
assert self.wallet_state_manager is not None
try:
assert self.validation_semaphore is not None
async with self.validation_semaphore:
if header_hash is not None:
assert height is not None
for inner_state in inner_states:
self.add_state_to_race_cache(header_hash, height, inner_state)
self.log.info(f"Added to race cache: {height}, {inner_state}")
valid_states = [
inner_state
for inner_state in inner_states
if await self.validate_received_state_from_peer(inner_state, peer, cache, fork_height)
]
if len(valid_states) > 0:
async with self.wallet_state_manager.db_wrapper.lock:
self.log.info(
f"new coin state received ({inner_idx_start}-"
f"{inner_idx_start + len(inner_states) - 1}/ {len(items)})"
)
if self.wallet_state_manager is None:
return
try:
await self.wallet_state_manager.db_wrapper.begin_transaction()
await self.wallet_state_manager.new_coin_state(valid_states, peer, fork_height)
if update_finished_height:
if len(cs_heights) == 1:
# We have processed all past tasks, so we can increase the height safely
synced_up_to = last_change_height_cs(valid_states[-1]) - 1
else:
# We know we have processed everything before this min height
synced_up_to = min(cs_heights) - 1
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(
synced_up_to, in_transaction=True
)
await self.wallet_state_manager.db_wrapper.commit_transaction()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception while adding state: {e} {tb}")
await self.wallet_state_manager.db_wrapper.rollback_transaction()
await self.wallet_state_manager.coin_store.rebuild_wallet_cache()
await self.wallet_state_manager.tx_store.rebuild_tx_cache()
await self.wallet_state_manager.pool_store.rebuild_cache()
else:
await self.wallet_state_manager.blockchain.clean_block_records()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception while adding state: {e} {tb}")
finally:
cs_heights.remove(last_change_height_cs(inner_states[0]))
idx = 1
# Keep chunk size below 1000 just in case, windows has sqlite limits of 999 per query
# Untrusted has a smaller batch size since validation has to happen which takes a while
chunk_size: int = 900 if trusted else 10
for states in chunks(items, chunk_size):
if self.server is None:
self.log.error("No server")
await asyncio.gather(*all_tasks)
return False
if peer.peer_node_id not in self.server.all_connections:
self.log.error(f"Disconnected from peer {peer.peer_node_id} host {peer.peer_host}")
await asyncio.gather(*all_tasks)
return False
if trusted:
async with self.wallet_state_manager.db_wrapper.lock:
try:
self.log.info(f"new coin state received ({idx}-" f"{idx + len(states) - 1}/ {len(items)})")
await self.wallet_state_manager.db_wrapper.begin_transaction()
await self.wallet_state_manager.new_coin_state(states, peer, fork_height)
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(
last_change_height_cs(states[-1]) - 1, in_transaction=True
)
await self.wallet_state_manager.db_wrapper.commit_transaction()
except Exception as e:
await self.wallet_state_manager.db_wrapper.rollback_transaction()
await self.wallet_state_manager.coin_store.rebuild_wallet_cache()
await self.wallet_state_manager.tx_store.rebuild_tx_cache()
await self.wallet_state_manager.pool_store.rebuild_cache()
tb = traceback.format_exc()
self.log.error(f"Error adding states.. {e} {tb}")
return False
else:
await self.wallet_state_manager.blockchain.clean_block_records()
else:
while len(concurrent_tasks_cs_heights) >= target_concurrent_tasks:
await asyncio.sleep(0.1)
if self._shut_down:
self.log.info("Terminating receipt and validation due to shut down request")
await asyncio.gather(*all_tasks)
return False
concurrent_tasks_cs_heights.append(last_change_height_cs(states[0]))
all_tasks.append(asyncio.create_task(receive_and_validate(states, idx, concurrent_tasks_cs_heights)))
idx += len(states)
still_connected = self.server is not None and peer.peer_node_id in self.server.all_connections
await asyncio.gather(*all_tasks)
await self.update_ui()
return still_connected and self.server is not None and peer.peer_node_id in self.server.all_connections
async def get_coins_with_puzzle_hash(self, puzzle_hash) -> List[CoinState]:
assert self.wallet_state_manager is not None
assert self.server is not None
all_nodes = self.server.connection_by_type[NodeType.FULL_NODE]
if len(all_nodes.keys()) == 0:
raise ValueError("Not connected to the full node")
first_node = list(all_nodes.values())[0]
msg = wallet_protocol.RegisterForPhUpdates(puzzle_hash, uint32(0))
coin_state: Optional[RespondToPhUpdates] = await first_node.register_interest_in_puzzle_hash(msg)
assert coin_state is not None
return coin_state.coin_states
async def is_peer_synced(
self, peer: WSChiaConnection, header_block: HeaderBlock, request_time: uint64
) -> Optional[uint64]:
# Get last timestamp
last_tx: Optional[HeaderBlock] = await fetch_last_tx_from_peer(header_block.height, peer)
latest_timestamp: Optional[uint64] = None
if last_tx is not None:
assert last_tx.foliage_transaction_block is not None
latest_timestamp = last_tx.foliage_transaction_block.timestamp
# Return None if not synced
if latest_timestamp is None or self.config["testing"] is False and latest_timestamp < request_time - 600:
return None
return latest_timestamp
def is_trusted(self, peer) -> bool:
assert self.server is not None
return self.server.is_trusted_peer(peer, self.config["trusted_peers"])
def add_state_to_race_cache(self, header_hash: bytes32, height: uint32, coin_state: CoinState) -> None:
# Clears old state that is no longer relevant
delete_threshold = 100
for rc_height, rc_hh in self.race_cache_hashes:
if height - delete_threshold >= rc_height:
self.race_cache.pop(rc_hh)
self.race_cache_hashes = [
(rc_height, rc_hh) for rc_height, rc_hh in self.race_cache_hashes if height - delete_threshold < rc_height
]
if header_hash not in self.race_cache:
self.race_cache[header_hash] = set()
self.race_cache[header_hash].add(coin_state)
async def state_update_received(self, request: wallet_protocol.CoinStateUpdate, peer: WSChiaConnection) -> None:
# This gets called every time there is a new coin or puzzle hash change in the DB
# that is of interest to this wallet. It is not guaranteed to come for every height. This message is guaranteed
# to come before the corresponding new_peak for each height. We handle this differently for trusted and
# untrusted peers. For trusted, we always process the state, and we process reorgs as well.
assert self.wallet_state_manager is not None
assert self.server is not None
async with self.wallet_state_manager.lock:
await self.receive_state_from_peer(
request.items,
peer,
request.fork_height,
request.height,
request.peak_hash,
)
def get_full_node_peer(self) -> Optional[WSChiaConnection]:
if self.server is None:
return None
nodes = self.server.get_full_node_connections()
if len(nodes) > 0:
return random.choice(nodes)
else:
return None
async def disconnect_and_stop_wpeers(self) -> None:
if self.server is None:
return
# Close connection of non-trusted peers
if len(self.server.get_full_node_connections()) > 1:
for peer in self.server.get_full_node_connections():
if not self.is_trusted(peer):
await peer.close()
if self.wallet_peers is not None:
await self.wallet_peers.ensure_is_closed()
self.wallet_peers = None
async def check_for_synced_trusted_peer(self, header_block: HeaderBlock, request_time: uint64) -> bool:
if self.server is None:
return False
for peer in self.server.get_full_node_connections():
if self.is_trusted(peer) and await self.is_peer_synced(peer, header_block, request_time):
return True
return False
async def get_timestamp_for_height(self, height: uint32) -> uint64:
"""
Returns the timestamp for transaction block at h=height, if not transaction block, backtracks until it finds
a transaction block
"""
if height in self.height_to_time:
return self.height_to_time[height]
for cache in self.untrusted_caches.values():
cache_ts: Optional[uint64] = cache.get_height_timestamp(height)
if cache_ts is not None:
return cache_ts
peer: Optional[WSChiaConnection] = self.get_full_node_peer()
if peer is None:
raise ValueError("Cannot fetch timestamp, no peers")
self.log.debug(f"Fetching block at height: {height}")
last_tx_block: Optional[HeaderBlock] = await fetch_last_tx_from_peer(height, peer)
if last_tx_block is None:
raise ValueError(f"Error fetching blocks from peer {peer.get_peer_info()}")
assert last_tx_block.foliage_transaction_block is not None
self.get_cache_for_peer(peer).add_to_blocks(last_tx_block)
return last_tx_block.foliage_transaction_block.timestamp
async def new_peak_wallet(self, new_peak: wallet_protocol.NewPeakWallet, peer: WSChiaConnection):
if self.wallet_state_manager is None:
# When logging out of wallet
return
assert self.server is not None
request_time = uint64(int(time.time()))
trusted: bool = self.is_trusted(peer)
peak_hb: Optional[HeaderBlock] = await self.wallet_state_manager.blockchain.get_peak_block()
if peak_hb is not None and new_peak.weight < peak_hb.weight:
# Discards old blocks, but accepts blocks that are equal in weight to peak
return
request = wallet_protocol.RequestBlockHeader(new_peak.height)
response: Optional[RespondBlockHeader] = await peer.request_block_header(request)
if response is None:
self.log.warning(f"Peer {peer.get_peer_info()} did not respond in time.")
await peer.close(120)
return
header_block: HeaderBlock = response.header_block
latest_timestamp: Optional[uint64] = await self.is_peer_synced(peer, header_block, request_time)
if latest_timestamp is None:
if trusted:
self.log.debug(f"Trusted peer {peer.get_peer_info()} is not synced.")
return
else:
self.log.warning(f"Non-trusted peer {peer.get_peer_info()} is not synced, disconnecting")
await peer.close(120)
return
current_height: uint32 = await self.wallet_state_manager.blockchain.get_finished_sync_up_to()
if self.is_trusted(peer):
async with self.wallet_state_manager.lock:
await self.wallet_state_manager.blockchain.set_peak_block(header_block, latest_timestamp)
# Disconnect from all untrusted peers if our local node is trusted and synced
await self.disconnect_and_stop_wpeers()
# Sync to trusted node if we haven't done so yet. As long as we have synced once (and not
# disconnected), we assume that the full node will continue to give us state updates, so we do
# not need to resync.
if peer.peer_node_id not in self.synced_peers:
if new_peak.height - current_height > self.LONG_SYNC_THRESHOLD:
self.wallet_state_manager.set_sync_mode(True)
await self.long_sync(new_peak.height, peer, uint32(max(0, current_height - 256)), rollback=True)
self.wallet_state_manager.set_sync_mode(False)
else:
far_behind: bool = (
new_peak.height - self.wallet_state_manager.blockchain.get_peak_height() > self.LONG_SYNC_THRESHOLD
)
# check if claimed peak is heavier or same as our current peak
# if we haven't synced fully to this peer sync again
if (
peer.peer_node_id not in self.synced_peers or far_behind
) and new_peak.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
if await self.check_for_synced_trusted_peer(header_block, request_time):
self.wallet_state_manager.set_sync_mode(False)
self.log.info("Cancelling untrusted sync, we are connected to a trusted peer")
return
syncing = False
if far_behind or len(self.synced_peers) == 0:
syncing = True
self.wallet_state_manager.set_sync_mode(True)
try:
(
valid_weight_proof,
weight_proof,
summaries,
block_records,
) = await self.fetch_and_validate_the_weight_proof(peer, response.header_block)
if valid_weight_proof is False:
if syncing:
self.wallet_state_manager.set_sync_mode(False)
await peer.close()
return
if await self.check_for_synced_trusted_peer(header_block, request_time):
self.wallet_state_manager.set_sync_mode(False)
self.log.info("Cancelling untrusted sync, we are connected to a trusted peer")
return
assert weight_proof is not None
old_proof = self.wallet_state_manager.blockchain.synced_weight_proof
if syncing:
# This usually happens the first time we start up the wallet. We roll back slightly to be
# safe, but we don't want to rollback too much (hence 16)
fork_point: int = max(0, current_height - 16)
else:
# In this case we will not rollback so it's OK to check some older updates as well, to ensure
# that no recent transactions are being hidden.
fork_point = 0
if old_proof is not None:
# If the weight proof fork point is in the past, rollback more to ensure we don't have duplicate
# state.
wp_fork_point = self.wallet_state_manager.weight_proof_handler.get_fork_point(
old_proof, weight_proof
)
fork_point = min(fork_point, wp_fork_point)
await self.wallet_state_manager.blockchain.new_weight_proof(weight_proof, block_records)
if syncing:
async with self.wallet_state_manager.lock:
self.log.info("Primary peer syncing")
await self.long_sync(new_peak.height, peer, fork_point, rollback=True)
else:
if self._secondary_peer_sync_task is None or self._secondary_peer_sync_task.done():
self.log.info("Secondary peer syncing")
self._secondary_peer_sync_task = asyncio.create_task(
self.long_sync(new_peak.height, peer, fork_point, rollback=False)
)
return
else:
self.log.info("Will not do secondary sync, there is already another sync task running.")
return
self.log.info(f"New peak wallet.. {new_peak.height} {peer.get_peer_info()} 12")
if (
self.wallet_state_manager.blockchain.synced_weight_proof is None
or weight_proof.recent_chain_data[-1].weight
> self.wallet_state_manager.blockchain.synced_weight_proof.recent_chain_data[-1].weight
):
await self.wallet_state_manager.blockchain.new_weight_proof(weight_proof, block_records)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error syncing to {peer.get_peer_info()} {e} {tb}")
if syncing:
self.wallet_state_manager.set_sync_mode(False)
tb = traceback.format_exc()
self.log.error(f"Error syncing to {peer.get_peer_info()} {tb}")
await peer.close()
return
if syncing:
self.wallet_state_manager.set_sync_mode(False)
else:
# This is the (untrusted) case where we already synced and are not too far behind. Here we just
# fetch one by one.
async with self.wallet_state_manager.lock:
peak_hb = await self.wallet_state_manager.blockchain.get_peak_block()
if peak_hb is None or new_peak.weight > peak_hb.weight:
backtrack_fork_height: int = await self.wallet_short_sync_backtrack(header_block, peer)
else:
backtrack_fork_height = new_peak.height - 1
if peer.peer_node_id not in self.synced_peers:
# Edge case, this happens when the peak < WEIGHT_PROOF_RECENT_BLOCKS
# we still want to subscribe for all phs and coins.
# (Hints are not in filter)
all_coin_ids: List[bytes32] = await self.get_coin_ids_to_subscribe(uint32(0))
phs: List[bytes32] = await self.get_puzzle_hashes_to_subscribe()
ph_updates: List[CoinState] = await subscribe_to_phs(phs, peer, uint32(0))
coin_updates: List[CoinState] = await subscribe_to_coin_updates(all_coin_ids, peer, uint32(0))
peer_new_peak_height, peer_new_peak_hash = self.node_peaks[peer.peer_node_id]
success = await self.receive_state_from_peer(
ph_updates + coin_updates,
peer,
height=peer_new_peak_height,
header_hash=peer_new_peak_hash,
)
if success:
self.synced_peers.add(peer.peer_node_id)
else:
if peak_hb is not None and new_peak.weight <= peak_hb.weight:
# Don't process blocks at the same weight
return
# For every block, we need to apply the cache from race_cache
for potential_height in range(backtrack_fork_height + 1, new_peak.height + 1):
header_hash = self.wallet_state_manager.blockchain.height_to_hash(uint32(potential_height))
if header_hash in self.race_cache:
self.log.info(f"Receiving race state: {self.race_cache[header_hash]}")
await self.receive_state_from_peer(list(self.race_cache[header_hash]), peer)
self.wallet_state_manager.state_changed("new_block")
self.wallet_state_manager.set_sync_mode(False)
self.log.info(f"Finished processing new peak of {new_peak.height}")
if peer.peer_node_id in self.synced_peers:
await self.wallet_state_manager.blockchain.set_finished_sync_up_to(new_peak.height)
await self.wallet_state_manager.new_peak(new_peak)
async def wallet_short_sync_backtrack(self, header_block: HeaderBlock, peer: WSChiaConnection) -> int:
assert self.wallet_state_manager is not None
peak: Optional[HeaderBlock] = await self.wallet_state_manager.blockchain.get_peak_block()
top = header_block
blocks = [top]
# Fetch blocks backwards until we hit the one that we have,
# then complete them with additions / removals going forward
fork_height = 0
if self.wallet_state_manager.blockchain.contains_block(header_block.prev_header_hash):
fork_height = header_block.height - 1
while not self.wallet_state_manager.blockchain.contains_block(top.prev_header_hash) and top.height > 0:
request_prev = wallet_protocol.RequestBlockHeader(top.height - 1)
response_prev: Optional[RespondBlockHeader] = await peer.request_block_header(request_prev)
if response_prev is None or not isinstance(response_prev, RespondBlockHeader):
raise RuntimeError("bad block header response from peer while syncing")
prev_head = response_prev.header_block
blocks.append(prev_head)
top = prev_head
fork_height = top.height - 1
blocks.reverse()
# Roll back coins and transactions
peak_height = self.wallet_state_manager.blockchain.get_peak_height()
if fork_height < peak_height:
self.log.info(f"Rolling back to {fork_height}")
# we should clear all peers since this is a full rollback
await self.perform_atomic_rollback(fork_height)
await self.update_ui()
if peak is not None:
assert header_block.weight >= peak.weight
for block in blocks:
# Set blockchain to the latest peak
res, err = await self.wallet_state_manager.blockchain.receive_block(block)
if res == ReceiveBlockResult.INVALID_BLOCK:
raise ValueError(err)
return fork_height
async def update_ui(self):
for wallet_id, wallet in self.wallet_state_manager.wallets.items():
self.wallet_state_manager.state_changed("coin_removed", wallet_id)
self.wallet_state_manager.state_changed("coin_added", wallet_id)
async def fetch_and_validate_the_weight_proof(
self, peer: WSChiaConnection, peak: HeaderBlock
) -> Tuple[bool, Optional[WeightProof], List[SubEpochSummary], List[BlockRecord]]:
assert self.wallet_state_manager is not None
assert self.wallet_state_manager.weight_proof_handler is not None
weight_request = RequestProofOfWeight(peak.height, peak.header_hash)
wp_timeout = self.config.get("weight_proof_timeout", 360)
self.log.debug(f"weight proof timeout is {wp_timeout} sec")
weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight(
weight_request, timeout=wp_timeout
)
if weight_proof_response is None:
return False, None, [], []
start_validation = time.time()
weight_proof = weight_proof_response.wp
if weight_proof.recent_chain_data[-1].reward_chain_block.height != peak.height:
return False, None, [], []
if weight_proof.recent_chain_data[-1].reward_chain_block.weight != peak.weight:
return False, None, [], []
if weight_proof.get_hash() in self.valid_wp_cache:
valid, fork_point, summaries, block_records = self.valid_wp_cache[weight_proof.get_hash()]
else:
start_validation = time.time()
(
valid,
fork_point,
summaries,
block_records,
) = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof(weight_proof)
if valid:
self.valid_wp_cache[weight_proof.get_hash()] = valid, fork_point, summaries, block_records
end_validation = time.time()
self.log.info(f"It took {end_validation - start_validation} time to validate the weight proof")
return valid, weight_proof, summaries, block_records
async def get_puzzle_hashes_to_subscribe(self) -> List[bytes32]:
assert self.wallet_state_manager is not None
all_puzzle_hashes = list(await self.wallet_state_manager.puzzle_store.get_all_puzzle_hashes())
# Get all phs from interested store
interested_puzzle_hashes = [
t[0] for t in await self.wallet_state_manager.interested_store.get_interested_puzzle_hashes()
]
all_puzzle_hashes.extend(interested_puzzle_hashes)
return all_puzzle_hashes
async def get_coin_ids_to_subscribe(self, min_height: int) -> List[bytes32]:
assert self.wallet_state_manager is not None
all_coins: Set[WalletCoinRecord] = await self.wallet_state_manager.coin_store.get_coins_to_check(min_height)
all_coin_names: Set[bytes32] = {coin_record.name() for coin_record in all_coins}
removed_dict = await self.wallet_state_manager.trade_manager.get_coins_of_interest()
all_coin_names.update(removed_dict.keys())
all_coin_names.update(await self.wallet_state_manager.interested_store.get_interested_coin_ids())
return list(all_coin_names)
async def validate_received_state_from_peer(
self,
coin_state: CoinState,
peer: WSChiaConnection,
peer_request_cache: PeerRequestCache,
fork_height: Optional[uint32],
) -> bool:
"""
Returns all state that is valid and included in the blockchain proved by the weight proof. If return_old_states
is False, only new states that are not in the coin_store are returned.
"""
assert self.wallet_state_manager is not None
# Only use the cache if we are talking about states before the fork point. If we are evaluating something
# in a reorg, we cannot use the cache, since we don't know if it's actually in the new chain after the reorg.
if await can_use_peer_request_cache(coin_state, peer_request_cache, fork_height):
return True
spent_height = coin_state.spent_height
confirmed_height = coin_state.created_height
current = await self.wallet_state_manager.coin_store.get_coin_record(coin_state.coin.name())
# if remote state is same as current local state we skip validation
# CoinRecord unspent = height 0, coin state = None. We adjust for comparison below
current_spent_height = None
if current is not None and current.spent_block_height != 0:
current_spent_height = current.spent_block_height
# Same as current state, nothing to do
if (
current is not None
and current_spent_height == spent_height
and current.confirmed_block_height == confirmed_height
):
peer_request_cache.add_to_states_validated(coin_state)
return True
reorg_mode = False
# If coin was removed from the blockchain
if confirmed_height is None:
if current is None:
# Coin does not exist in local DB, so no need to do anything
return False
# This coin got reorged
reorg_mode = True
confirmed_height = current.confirmed_block_height
# request header block for created height
state_block: Optional[HeaderBlock] = peer_request_cache.get_block(confirmed_height)
if state_block is None or reorg_mode:
request = RequestHeaderBlocks(confirmed_height, confirmed_height)
res = await peer.request_header_blocks(request)
if res is None:
return False
state_block = res.header_blocks[0]
assert state_block is not None
peer_request_cache.add_to_blocks(state_block)
# get proof of inclusion
assert state_block.foliage_transaction_block is not None
validate_additions_result = await request_and_validate_additions(
peer,
state_block.height,
state_block.header_hash,
coin_state.coin.puzzle_hash,
state_block.foliage_transaction_block.additions_root,
)
if validate_additions_result is False:
self.log.warning("Validate false 1")
await peer.close(9999)
return False
# If spent_height is None, we need to validate that the creation block is actually in the longest blockchain.
# Otherwise, we don't have to, since we will validate the spent block later.
if coin_state.spent_height is None:
validated = await self.validate_block_inclusion(state_block, peer, peer_request_cache)
if not validated:
return False
# TODO: make sure all cases are covered
if current is not None:
if spent_height is None and current.spent_block_height != 0:
# Peer is telling us that coin that was previously known to be spent is not spent anymore
# Check old state
request = RequestHeaderBlocks(current.spent_block_height, current.spent_block_height)
res = await peer.request_header_blocks(request)
spent_state_block = res.header_blocks[0]
assert spent_state_block.height == current.spent_block_height
assert spent_state_block.foliage_transaction_block is not None
peer_request_cache.add_to_blocks(spent_state_block)
validate_removals_result: bool = await request_and_validate_removals(
peer,
current.spent_block_height,
spent_state_block.header_hash,
coin_state.coin.name(),
spent_state_block.foliage_transaction_block.removals_root,
)
if validate_removals_result is False:
self.log.warning("Validate false 2")
await peer.close(9999)
return False
validated = await self.validate_block_inclusion(spent_state_block, peer, peer_request_cache)
if not validated:
return False
if spent_height is not None:
# request header block for created height
spent_state_block = peer_request_cache.get_block(spent_height)
if spent_state_block is None:
request = RequestHeaderBlocks(spent_height, spent_height)
res = await peer.request_header_blocks(request)
spent_state_block = res.header_blocks[0]
assert spent_state_block.height == spent_height
assert spent_state_block.foliage_transaction_block is not None
peer_request_cache.add_to_blocks(spent_state_block)
assert spent_state_block is not None
validate_removals_result = await request_and_validate_removals(
peer,
spent_state_block.height,
spent_state_block.header_hash,
coin_state.coin.name(),
spent_state_block.foliage_transaction_block.removals_root,
)
if validate_removals_result is False:
self.log.warning("Validate false 3")
await peer.close(9999)
return False
validated = await self.validate_block_inclusion(spent_state_block, peer, peer_request_cache)
if not validated:
return False
peer_request_cache.add_to_states_validated(coin_state)
return True
async def validate_block_inclusion(
self, block: HeaderBlock, peer: WSChiaConnection, peer_request_cache: PeerRequestCache
) -> bool:
assert self.wallet_state_manager is not None
assert self.server is not None
if self.wallet_state_manager.blockchain.contains_height(block.height):
stored_hash = self.wallet_state_manager.blockchain.height_to_hash(block.height)
stored_record = self.wallet_state_manager.blockchain.try_block_record(stored_hash)
if stored_record is not None:
if stored_record.header_hash == block.header_hash:
return True
weight_proof: Optional[WeightProof] = self.wallet_state_manager.blockchain.synced_weight_proof
if weight_proof is None:
return False
if block.height >= weight_proof.recent_chain_data[0].height:
# this was already validated as part of the wp validation
index = block.height - weight_proof.recent_chain_data[0].height
if index >= len(weight_proof.recent_chain_data):
return False
if weight_proof.recent_chain_data[index].header_hash != block.header_hash:
self.log.error("Failed validation 1")
return False
return True
else:
start = block.height + 1
compare_to_recent = False
current_ses: Optional[SubEpochData] = None
inserted: Optional[SubEpochData] = None
first_height_recent = weight_proof.recent_chain_data[0].height
if start > first_height_recent - 1000:
compare_to_recent = True
end = first_height_recent
else:
if block.height < self.constants.SUB_EPOCH_BLOCKS:
inserted = weight_proof.sub_epochs[1]
end = self.constants.SUB_EPOCH_BLOCKS + inserted.num_blocks_overflow
else:
request = RequestSESInfo(block.height, block.height + 32)
res_ses: Optional[RespondSESInfo] = peer_request_cache.get_ses_request(block.height)
if res_ses is None:
res_ses = await peer.request_ses_hashes(request)
peer_request_cache.add_to_ses_requests(block.height, res_ses)
assert res_ses is not None
ses_0 = res_ses.reward_chain_hash[0]
last_height = res_ses.heights[0][-1] # Last height in sub epoch
end = last_height
for idx, ses in enumerate(weight_proof.sub_epochs):
if idx > len(weight_proof.sub_epochs) - 3:
break
if ses.reward_chain_hash == ses_0:
current_ses = ses
inserted = weight_proof.sub_epochs[idx + 2]
break
if current_ses is None:
self.log.error("Failed validation 2")
return False
all_peers = self.server.get_full_node_connections()
blocks: Optional[List[HeaderBlock]] = await fetch_header_blocks_in_range(
start, end, peer_request_cache, all_peers
)
if blocks is None:
self.log.error(f"Error fetching blocks {start} {end}")
return False
if compare_to_recent and weight_proof.recent_chain_data[0].header_hash != blocks[-1].header_hash:
self.log.error("Failed validation 3")
return False
reversed_blocks = blocks.copy()
reversed_blocks.reverse()
if not compare_to_recent:
last = reversed_blocks[0].finished_sub_slots[-1].reward_chain.get_hash()
if inserted is None or last != inserted.reward_chain_hash:
self.log.error("Failed validation 4")
return False
for idx, en_block in enumerate(reversed_blocks):
if idx == len(reversed_blocks) - 1:
next_block_rc_hash = block.reward_chain_block.get_hash()
prev_hash = block.header_hash
else:
next_block_rc_hash = reversed_blocks[idx + 1].reward_chain_block.get_hash()
prev_hash = reversed_blocks[idx + 1].header_hash
if not en_block.prev_header_hash == prev_hash:
self.log.error("Failed validation 5")
return False
if len(en_block.finished_sub_slots) > 0:
# What to do here
reversed_slots = en_block.finished_sub_slots.copy()
reversed_slots.reverse()
for slot_idx, slot in enumerate(reversed_slots[:-1]):
hash_val = reversed_slots[slot_idx + 1].reward_chain.get_hash()
if not hash_val == slot.reward_chain.end_of_slot_vdf.challenge:
self.log.error("Failed validation 6")
return False
if not next_block_rc_hash == reversed_slots[-1].reward_chain.end_of_slot_vdf.challenge:
self.log.error("Failed validation 7")
return False
else:
if not next_block_rc_hash == en_block.reward_chain_block.reward_chain_ip_vdf.challenge:
self.log.error("Failed validation 8")
return False
if idx > len(reversed_blocks) - 50:
if not AugSchemeMPL.verify(
en_block.reward_chain_block.proof_of_space.plot_public_key,
en_block.foliage.foliage_block_data.get_hash(),
en_block.foliage.foliage_block_data_signature,
):
self.log.error("Failed validation 9")
return False
return True
async def fetch_puzzle_solution(self, peer: WSChiaConnection, height: uint32, coin: Coin) -> CoinSpend:
solution_response = await peer.request_puzzle_solution(
wallet_protocol.RequestPuzzleSolution(coin.name(), height)
)
if solution_response is None or not isinstance(solution_response, wallet_protocol.RespondPuzzleSolution):
raise ValueError(f"Was not able to obtain solution {solution_response}")
assert solution_response.response.puzzle.get_tree_hash() == coin.puzzle_hash
assert solution_response.response.coin_name == coin.name()
return CoinSpend(
coin,
solution_response.response.puzzle.to_serialized_program(),
solution_response.response.solution.to_serialized_program(),
)
async def get_coin_state(
self, coin_names: List[bytes32], fork_height: Optional[uint32] = None, peer: Optional[WSChiaConnection] = None
) -> List[CoinState]:
assert self.server is not None
all_nodes = self.server.connection_by_type[NodeType.FULL_NODE]
if len(all_nodes.keys()) == 0:
raise ValueError("Not connected to the full node")
# Use supplied if provided, prioritize trusted otherwise
if peer is None:
for node in list(all_nodes.values()):
if self.is_trusted(node):
peer = node
break
if peer is None:
peer = list(all_nodes.values())[0]
assert peer is not None
msg = wallet_protocol.RegisterForCoinUpdates(coin_names, uint32(0))
coin_state: Optional[RespondToCoinUpdates] = await peer.register_interest_in_coin(msg)
assert coin_state is not None
if not self.is_trusted(peer):
valid_list = []
for coin in coin_state.coin_states:
valid = await self.validate_received_state_from_peer(
coin, peer, self.get_cache_for_peer(peer), fork_height
)
if valid:
valid_list.append(coin)
return valid_list
return coin_state.coin_states
async def fetch_children(
self, peer: WSChiaConnection, coin_name: bytes32, fork_height: Optional[uint32] = None
) -> List[CoinState]:
response: Optional[wallet_protocol.RespondChildren] = await peer.request_children(
wallet_protocol.RequestChildren(coin_name)
)
if response is None or not isinstance(response, wallet_protocol.RespondChildren):
raise ValueError(f"Was not able to obtain children {response}")
if not self.is_trusted(peer):
request_cache = self.get_cache_for_peer(peer)
validated = []
for state in response.coin_states:
valid = await self.validate_received_state_from_peer(state, peer, request_cache, fork_height)
if valid:
validated.append(state)
return validated
return response.coin_states
# For RPC only. You should use wallet_state_manager.add_pending_transaction for normal wallet business.
async def push_tx(self, spend_bundle):
msg = make_msg(
ProtocolMessageTypes.send_transaction,
wallet_protocol.SendTransaction(spend_bundle),
)
full_nodes = self.server.get_full_node_connections()
for peer in full_nodes:
await peer.send_message(msg)
|
# -*- coding: future_fstrings -*-
#
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import sys, os, json
import numpy as np
from ctypes import *
from copy import deepcopy
from .generate_c_code_explicit_ode import generate_c_code_explicit_ode
from .generate_c_code_implicit_ode import generate_c_code_implicit_ode
from .acados_sim import AcadosSim
from .acados_ocp import AcadosOcp
from .acados_model import acados_model_strip_casadi_symbolics
from .utils import is_column, render_template, format_class_dict, np_array_to_list, make_model_consistent
def make_sim_dims_consistent(acados_sim):
dims = acados_sim.dims
model = acados_sim.model
# nx
if is_column(model.x):
dims.nx = model.x.shape[0]
else:
raise Exception("model.x should be column vector!")
# nu
if is_column(model.u):
dims.nu = model.u.shape[0]
elif model.u == None or model.u == []:
dims.nu = 0
else:
raise Exception("model.u should be column vector or None!")
# nz
if is_column(model.z):
dims.nz = model.z.shape[0]
elif model.z == None or model.z == []:
dims.nz = 0
else:
raise Exception("model.z should be column vector or None!")
# np
if is_column(model.p):
dims.np = model.p.shape[0]
elif model.p == None or model.p == []:
dims.np = 0
else:
raise Exception("model.p should be column vector or None!")
def get_sim_layout():
current_module = sys.modules[__name__]
acados_path = os.path.dirname(current_module.__file__)
with open(acados_path + '/acados_sim_layout.json', 'r') as f:
sim_layout = json.load(f)
return sim_layout
def sim_formulation_json_dump(acados_sim, json_file='acados_sim.json'):
# Load acados_sim structure description
sim_layout = get_sim_layout()
# Copy input sim object dictionary
sim_dict = dict(deepcopy(acados_sim).__dict__)
for key, v in sim_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
# Copy sim object attributes dictionaries
sim_dict[key]=dict(getattr(acados_sim, key).__dict__)
sim_dict['model'] = acados_model_strip_casadi_symbolics(sim_dict['model'])
sim_json = format_class_dict(sim_dict)
with open(json_file, 'w') as f:
json.dump(sim_json, f, default=np_array_to_list, indent=4, sort_keys=True)
def sim_render_templates(json_file, model_name):
# setting up loader and environment
json_path = '{cwd}/{json_file}'.format(
cwd=os.getcwd(),
json_file=json_file)
if not os.path.exists(json_path):
raise Exception("{} not found!".format(json_path))
template_dir = 'c_generated_code/'
## Render templates
in_file = 'acados_sim_solver.in.c'
out_file = 'acados_sim_solver_{}.c'.format(model_name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_sim_solver.in.h'
out_file = 'acados_sim_solver_{}.h'.format(model_name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'Makefile.in'
out_file = 'Makefile'
render_template(in_file, out_file, template_dir, json_path)
## folder model
template_dir = 'c_generated_code/{}_model/'.format(model_name)
in_file = 'model.in.h'
out_file = '{}_model.h'.format(model_name)
render_template(in_file, out_file, template_dir, json_path)
def sim_generate_casadi_functions(acados_sim):
model = acados_sim.model
model = make_model_consistent(model)
integrator_type = acados_sim.solver_options.integrator_type
# generate external functions
if integrator_type == 'ERK':
# explicit model -- generate C code
generate_c_code_explicit_ode(model)
elif integrator_type == 'IRK':
# implicit model -- generate C code
opts = dict(generate_hess=1)
generate_c_code_implicit_ode(model, opts)
class AcadosSimSolver:
def __init__(self, acados_sim_, json_file='acados_sim.json'):
if isinstance(acados_sim_, AcadosOcp):
# set up acados_sim_
acados_sim = AcadosSim()
acados_sim.model = acados_sim_.model
acados_sim.dims.nx = acados_sim_.dims.nx
acados_sim.dims.nu = acados_sim_.dims.nu
acados_sim.dims.nz = acados_sim_.dims.nz
acados_sim.dims.np = acados_sim_.dims.np
acados_sim.solver_options.integrator_type = acados_sim_.solver_options.integrator_type
elif isinstance(acados_sim_, AcadosSim):
acados_sim = acados_sim_
model_name = acados_sim.model.name
make_sim_dims_consistent(acados_sim)
# use existing json when creating integrator from ocp
if isinstance(acados_sim_, AcadosSim):
sim_formulation_json_dump(acados_sim, json_file)
# render templates
sim_render_templates(json_file, model_name)
# generate casadi functions
sim_generate_casadi_functions(acados_sim)
## Compile solver
os.chdir('c_generated_code')
os.system('make sim_shared_lib')
os.chdir('..')
# Ctypes
shared_lib = 'c_generated_code/libacados_sim_solver_' + model_name + '.so'
self.sim_struct = acados_sim
model_name = self.sim_struct.model.name
self.shared_lib = CDLL(shared_lib)
getattr(self.shared_lib, f"{model_name}_acados_sim_create")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_opts").restype = c_void_p
self.sim_opts = getattr(self.shared_lib, f"{model_name}_acados_get_sim_opts")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_dims").restype = c_void_p
self.sim_dims = getattr(self.shared_lib, f"{model_name}_acados_get_sim_dims")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_config").restype = c_void_p
self.sim_config = getattr(self.shared_lib, f"{model_name}_acados_get_sim_config")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_out").restype = c_void_p
self.sim_out = getattr(self.shared_lib, f"{model_name}_acados_get_sim_out")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_in").restype = c_void_p
self.sim_in = getattr(self.shared_lib, f"{model_name}_acados_get_sim_in")()
nu = self.sim_struct.dims.nu
nx = self.sim_struct.dims.nx
self.gettable = {
'x': nx,
'xn': nx,
'u': nu,
'S_forw': nx*(nx+nu),
'Sx': nx*nx,
'Su': nx*nu,
}
self.settable = ['S_adj', 'T', 'x', 'u', 'xdot', 'z', 'p'] # S_forw
self.model_name = model_name
def solve(self):
status = getattr(self.shared_lib, f"{self.model_name}_acados_sim_solve")()
return status
def get(self, field_):
field = field_
field = field.encode('utf-8')
if field_ in self.gettable.keys():
# allocate array
dims = self.gettable[field_]
out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
self.shared_lib.sim_out_get.argtypes = [c_void_p, c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.sim_out_get(self.sim_config, self.sim_dims, self.sim_out, field, out_data)
if field_ == 'S_forw':
nu = self.sim_struct.dims.nu
nx = self.sim_struct.dims.nx
out = out.reshape(nx, nx+nu, order='F')
elif field_ == 'Sx':
nx = self.sim_struct.dims.nx
out = out.reshape(nx, nx, order='F')
elif field_ == 'Sx':
nx = self.sim_struct.dims.nx
nu = self.sim_struct.dims.nu
out = out.reshape(nx, nu, order='F')
else:
raise Exception(f'acados_solver.set(): Unknown field {field}, available fiels are {','.join(self.gettable.keys())}')
return out
def set(self, field_, value_):
# cast value_ to avoid conversion issues
if type(value_) == float:
value_ = np.array([value_])
value_ = value_.astype(float)
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
field = field_
field = field.encode('utf-8')
# treat parameters separately
if field_ is 'p':
model_name = self.sim_struct.model.name
getattr(self.shared_lib, f"{model_name}_acados_sim_update_params").argtypes = [POINTER(c_double)]
value_data = cast(value_.ctypes.data, POINTER(c_double))
getattr(self.shared_lib, f"{model_name}_acados_sim_update_params")(value_data, value_.shape[0])
elif field_ in self.settable:
self.shared_lib.sim_in_set.argtypes = [c_void_p, c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.sim_in_set(self.sim_config, self.sim_dims, self.sim_in, field, value_data_p)
else:
raise Exception(f'acados_solver.set(): Unknown field {field}, available fiels are {','.join(self.settable)}')
return
def __del__(self):
getattr(self.shared_lib, f"{self.model_name}_acados_sim_free")()
| # -*- coding: future_fstrings -*-
#
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import sys, os, json
import numpy as np
from ctypes import *
from copy import deepcopy
from .generate_c_code_explicit_ode import generate_c_code_explicit_ode
from .generate_c_code_implicit_ode import generate_c_code_implicit_ode
from .acados_sim import AcadosSim
from .acados_ocp import AcadosOcp
from .acados_model import acados_model_strip_casadi_symbolics
from .utils import is_column, render_template, format_class_dict, np_array_to_list, make_model_consistent
def make_sim_dims_consistent(acados_sim):
dims = acados_sim.dims
model = acados_sim.model
# nx
if is_column(model.x):
dims.nx = model.x.shape[0]
else:
raise Exception("model.x should be column vector!")
# nu
if is_column(model.u):
dims.nu = model.u.shape[0]
elif model.u == None or model.u == []:
dims.nu = 0
else:
raise Exception("model.u should be column vector or None!")
# nz
if is_column(model.z):
dims.nz = model.z.shape[0]
elif model.z == None or model.z == []:
dims.nz = 0
else:
raise Exception("model.z should be column vector or None!")
# np
if is_column(model.p):
dims.np = model.p.shape[0]
elif model.p == None or model.p == []:
dims.np = 0
else:
raise Exception("model.p should be column vector or None!")
def get_sim_layout():
current_module = sys.modules[__name__]
acados_path = os.path.dirname(current_module.__file__)
with open(acados_path + '/acados_sim_layout.json', 'r') as f:
sim_layout = json.load(f)
return sim_layout
def sim_formulation_json_dump(acados_sim, json_file='acados_sim.json'):
# Load acados_sim structure description
sim_layout = get_sim_layout()
# Copy input sim object dictionary
sim_dict = dict(deepcopy(acados_sim).__dict__)
for key, v in sim_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
# Copy sim object attributes dictionaries
sim_dict[key]=dict(getattr(acados_sim, key).__dict__)
sim_dict['model'] = acados_model_strip_casadi_symbolics(sim_dict['model'])
sim_json = format_class_dict(sim_dict)
with open(json_file, 'w') as f:
json.dump(sim_json, f, default=np_array_to_list, indent=4, sort_keys=True)
def sim_render_templates(json_file, model_name):
# setting up loader and environment
json_path = '{cwd}/{json_file}'.format(
cwd=os.getcwd(),
json_file=json_file)
if not os.path.exists(json_path):
raise Exception("{} not found!".format(json_path))
template_dir = 'c_generated_code/'
## Render templates
in_file = 'acados_sim_solver.in.c'
out_file = 'acados_sim_solver_{}.c'.format(model_name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_sim_solver.in.h'
out_file = 'acados_sim_solver_{}.h'.format(model_name)
render_template(in_file, out_file, template_dir, json_path)
in_file = 'Makefile.in'
out_file = 'Makefile'
render_template(in_file, out_file, template_dir, json_path)
## folder model
template_dir = 'c_generated_code/{}_model/'.format(model_name)
in_file = 'model.in.h'
out_file = '{}_model.h'.format(model_name)
render_template(in_file, out_file, template_dir, json_path)
def sim_generate_casadi_functions(acados_sim):
model = acados_sim.model
model = make_model_consistent(model)
integrator_type = acados_sim.solver_options.integrator_type
# generate external functions
if integrator_type == 'ERK':
# explicit model -- generate C code
generate_c_code_explicit_ode(model)
elif integrator_type == 'IRK':
# implicit model -- generate C code
opts = dict(generate_hess=1)
generate_c_code_implicit_ode(model, opts)
class AcadosSimSolver:
def __init__(self, acados_sim_, json_file='acados_sim.json'):
if isinstance(acados_sim_, AcadosOcp):
# set up acados_sim_
acados_sim = AcadosSim()
acados_sim.model = acados_sim_.model
acados_sim.dims.nx = acados_sim_.dims.nx
acados_sim.dims.nu = acados_sim_.dims.nu
acados_sim.dims.nz = acados_sim_.dims.nz
acados_sim.dims.np = acados_sim_.dims.np
acados_sim.solver_options.integrator_type = acados_sim_.solver_options.integrator_type
elif isinstance(acados_sim_, AcadosSim):
acados_sim = acados_sim_
model_name = acados_sim.model.name
make_sim_dims_consistent(acados_sim)
# use existing json when creating integrator from ocp
if isinstance(acados_sim_, AcadosSim):
sim_formulation_json_dump(acados_sim, json_file)
# render templates
sim_render_templates(json_file, model_name)
# generate casadi functions
sim_generate_casadi_functions(acados_sim)
## Compile solver
os.chdir('c_generated_code')
os.system('make sim_shared_lib')
os.chdir('..')
# Ctypes
shared_lib = 'c_generated_code/libacados_sim_solver_' + model_name + '.so'
self.sim_struct = acados_sim
model_name = self.sim_struct.model.name
self.shared_lib = CDLL(shared_lib)
getattr(self.shared_lib, f"{model_name}_acados_sim_create")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_opts").restype = c_void_p
self.sim_opts = getattr(self.shared_lib, f"{model_name}_acados_get_sim_opts")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_dims").restype = c_void_p
self.sim_dims = getattr(self.shared_lib, f"{model_name}_acados_get_sim_dims")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_config").restype = c_void_p
self.sim_config = getattr(self.shared_lib, f"{model_name}_acados_get_sim_config")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_out").restype = c_void_p
self.sim_out = getattr(self.shared_lib, f"{model_name}_acados_get_sim_out")()
getattr(self.shared_lib, f"{model_name}_acados_get_sim_in").restype = c_void_p
self.sim_in = getattr(self.shared_lib, f"{model_name}_acados_get_sim_in")()
nu = self.sim_struct.dims.nu
nx = self.sim_struct.dims.nx
self.gettable = {
'x': nx,
'xn': nx,
'u': nu,
'S_forw': nx*(nx+nu),
'Sx': nx*nx,
'Su': nx*nu,
}
self.settable = ['S_adj', 'T', 'x', 'u', 'xdot', 'z', 'p'] # S_forw
self.model_name = model_name
def solve(self):
status = getattr(self.shared_lib, f"{self.model_name}_acados_sim_solve")()
return status
def get(self, field_):
field = field_
field = field.encode('utf-8')
if field_ in self.gettable.keys():
# allocate array
dims = self.gettable[field_]
out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
self.shared_lib.sim_out_get.argtypes = [c_void_p, c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.sim_out_get(self.sim_config, self.sim_dims, self.sim_out, field, out_data)
if field_ == 'S_forw':
nu = self.sim_struct.dims.nu
nx = self.sim_struct.dims.nx
out = out.reshape(nx, nx+nu, order='F')
elif field_ == 'Sx':
nx = self.sim_struct.dims.nx
out = out.reshape(nx, nx, order='F')
elif field_ == 'Sx':
nx = self.sim_struct.dims.nx
nu = self.sim_struct.dims.nu
out = out.reshape(nx, nu, order='F')
else:
raise Exception(f'acados_solver.set(): Unknown field {field}, available fiels are {",".join(self.gettable.keys())}')
return out
def set(self, field_, value_):
# cast value_ to avoid conversion issues
if type(value_) == float:
value_ = np.array([value_])
value_ = value_.astype(float)
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
field = field_
field = field.encode('utf-8')
# treat parameters separately
if field_ is 'p':
model_name = self.sim_struct.model.name
getattr(self.shared_lib, f"{model_name}_acados_sim_update_params").argtypes = [POINTER(c_double)]
value_data = cast(value_.ctypes.data, POINTER(c_double))
getattr(self.shared_lib, f"{model_name}_acados_sim_update_params")(value_data, value_.shape[0])
elif field_ in self.settable:
self.shared_lib.sim_in_set.argtypes = [c_void_p, c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.sim_in_set(self.sim_config, self.sim_dims, self.sim_in, field, value_data_p)
else:
raise Exception(f'acados_solver.set(): Unknown field {field}, available fiels are {",".join(self.settable)}')
return
def __del__(self):
getattr(self.shared_lib, f"{self.model_name}_acados_sim_free")()
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import uuid
from enum import Enum, unique
from typing import Any, ClassVar, Dict, List, Optional, Set, Tuple, Type, Union, cast
import boto3
from botocore.exceptions import ClientError
from intelliflow.core.deployment import get_working_set_as_zip_stream, is_environment_immutable
from intelliflow.core.permission import PermissionContext
from intelliflow.core.platform.definitions.aws.glue.script.batch.glueetl_scala_all_ABI import GlueAllABIScala
from intelliflow.core.signal_processing import DimensionFilter, DimensionSpec, Signal, Slot
from intelliflow.core.signal_processing.definitions.compute_defs import ABI, Lang
from intelliflow.core.signal_processing.routing_runtime_constructs import Route
from intelliflow.core.signal_processing.signal import SignalDomainSpec, SignalType
from intelliflow.core.signal_processing.signal_source import (
DATASET_HEADER_KEY,
DATASET_SCHEMA_TYPE_KEY,
ENCRYPTION_KEY_KEY,
CWMetricSignalSourceAccessSpec,
DatasetSchemaType,
S3SignalSourceAccessSpec,
SignalSourceAccessSpec,
SignalSourceType,
)
from intelliflow.core.signal_processing.slot import SlotCodeMetadata, SlotCodeType, SlotType
from ...constructs import (
BatchCompute,
ConstructInternalMetricDesc,
ConstructParamsDict,
ConstructPermission,
ConstructSecurityConf,
EncryptionKeyAllocationLevel,
Storage,
)
from ...definitions.aws.common import AWS_COMMON_RETRYABLE_ERRORS, MAX_SLEEP_INTERVAL_PARAM
from ...definitions.aws.common import CommonParams as AWSCommonParams
from ...definitions.aws.common import exponential_retry
from ...definitions.aws.emr.client_wrapper import EmrJobLanguage, EmrReleaseLabel, build_job_arn, validate_job_name
from ...definitions.aws.emr.script.batch.emr_default_ABI import EmrDefaultABIPython
from ...definitions.aws.emr.script.batch.emr_scala_all_ABI import EmrAllABIScala
from ...definitions.aws.glue import catalog as glue_catalog
from ...definitions.aws.glue.client_wrapper import GlueVersion, get_bundles
from ...definitions.aws.glue.script.batch.common import (
AWS_REGION,
BOOTSTRAPPER_PLATFORM_KEY_PARAM,
CLIENT_CODE_BUCKET,
CLIENT_CODE_PARAM,
INPUT_MAP_PARAM,
OUTPUT_PARAM,
USER_EXTRA_PARAMS_PARAM,
BatchInputMap,
BatchOutput,
)
from ...definitions.aws.glue.script.batch.glueetl_default_ABI import GlueDefaultABIPython
from ...definitions.aws.s3.bucket_wrapper import MAX_BUCKET_LEN, bucket_exists, create_bucket, delete_bucket, get_bucket, put_policy
from ...definitions.aws.s3.object_wrapper import build_object_key, empty_bucket, object_exists, put_object
from ...definitions.compute import (
ComputeExecutionDetails,
ComputeFailedResponse,
ComputeFailedResponseType,
ComputeFailedSessionState,
ComputeFailedSessionStateType,
ComputeResourceDesc,
ComputeResponse,
ComputeResponseType,
ComputeSessionDesc,
ComputeSessionState,
ComputeSessionStateType,
ComputeSuccessfulResponse,
ComputeSuccessfulResponseType,
)
from ..aws_common import AWSConstructMixin
module_logger = logging.getLogger(__file__)
@unique
class RuntimeConfig(Enum):
GlueVersion_0_9 = "GlueVersion0.9"
GlueVersion_1_0 = "GlueVersion1.0"
GlueVersion_2_0 = "GlueVersion2.0"
GlueVersion_3_0 = "GlueVersion3.0"
@classmethod
def from_glue_version(cls, glue_version: GlueVersion):
return {
GlueVersion.VERSION_0_9: RuntimeConfig.GlueVersion_0_9,
GlueVersion.VERSION_1_0: RuntimeConfig.GlueVersion_1_0,
GlueVersion.VERSION_2_0: RuntimeConfig.GlueVersion_2_0,
GlueVersion.VERSION_3_0: RuntimeConfig.GlueVersion_3_0,
}[glue_version]
class AWSEMRBatchCompute(AWSConstructMixin, BatchCompute):
"""AWS EMR based BatchCompute impl"""
@classmethod
def driver_spec(cls) -> DimensionFilter:
return DimensionFilter.load_raw(
{
Lang.SPARK_SQL: {ABI.PARAMETRIZED_QUERY: {"*": {"*": {}}}}, # irrespective of extra params
Lang.PYTHON: {
ABI.GLUE_EMBEDDED: {
# also a RuntimeConfig, supported as an explicit param for compatibility with Glue driver
"GlueVersion": {GlueVersion.AUTO.value: {}, "1.0": {}, "2.0": {}, "3.0": {}},
"InstanceConfig": {"*": {}},
# for other EMR specific runtime configurations
"RuntimeConfig": {"*": {}},
}
},
Lang.SCALA: {
ABI.GLUE_EMBEDDED: {
"GlueVersion": {GlueVersion.AUTO.value: {}, "1.0": {}, "2.0": {}, "3.0": {}},
"InstanceConfig": {"*": {}},
"RuntimeConfig": {"*": {}},
}
},
}
)
@classmethod
def runtime_config_mapping(cls) -> Dict[EmrJobLanguage, Dict[ABI, Dict[RuntimeConfig, Dict]]]:
# TODO: Using Glue 3.0 spark version as it's the only tested one so far
return {
EmrJobLanguage.PYTHON: {
ABI.GLUE_EMBEDDED: {
RuntimeConfig.GlueVersion_0_9: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrDefaultABIPython,
},
RuntimeConfig.GlueVersion_1_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrDefaultABIPython,
},
RuntimeConfig.GlueVersion_2_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrDefaultABIPython,
},
RuntimeConfig.GlueVersion_3_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrDefaultABIPython,
},
}
},
EmrJobLanguage.SCALA: {
ABI.GLUE_EMBEDDED: {
RuntimeConfig.GlueVersion_0_9: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrAllABIScala,
},
RuntimeConfig.GlueVersion_1_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrAllABIScala,
},
RuntimeConfig.GlueVersion_2_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrAllABIScala,
},
RuntimeConfig.GlueVersion_3_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrAllABIScala,
},
}
},
}
def __init__(self, params: ConstructParamsDict) -> None:
super().__init__(params)
self._emr = self._session.client("emr", region_name=self._region)
self._ec2 = self._session.resource("ec2", region_name=self._region)
self._s3 = self._session.resource("s3", region_name=self._region)
self._bucket = None
self._bucket_name = None
self._iam = self._session.resource("iam", region_name=self._region)
self._intelliflow_python_workingset_key = None
def _deserialized_init(self, params: ConstructParamsDict) -> None:
super()._deserialized_init(params)
self._emr = self._session.client("emr", region_name=self._region)
self._ec2 = self._session.resource("ec2", region_name=self._region)
self._s3 = self._session.resource("s3", region_name=self._region)
self._bucket = get_bucket(self._s3, self._bucket_name)
self._iam = self._session.resource("iam", region_name=self._region)
def _serializable_copy_init(self, org_instance: "BaseConstruct") -> None:
AWSConstructMixin._serializable_copy_init(self, org_instance)
self._emr = None
self._ec2 = None
self._s3 = None
self._bucket = None
self._iam = None
def provide_output_attributes(self, slot: Slot, user_attrs: Dict[str, Any]) -> Optional[Dict[str, Any]]:
# header: supports both so it is up to user input. but default to True if not set.
return {DATASET_HEADER_KEY: user_attrs.get(DATASET_HEADER_KEY, True), DATASET_SCHEMA_TYPE_KEY: DatasetSchemaType.SPARK_SCHEMA_JSON}
def query_external_source_spec(
self, ext_signal_source: SignalSourceAccessSpec
) -> Optional[Tuple[SignalSourceAccessSpec, DimensionSpec]]:
if ext_signal_source.source == SignalSourceType.GLUE_TABLE:
return glue_catalog.query_table_spec(ext_signal_source.database, ext_signal_source.table_name)
raise NotImplementedError(
f"This external signal source ({ext_signal_source.source!r}) cannot be queried"
f" by BatchCompute driver: {self.__class__.__name__}"
)
def dev_init(self, platform: "DevelopmentPlatform") -> None:
super().dev_init(platform)
# construct lang -> runtime_version -> {name, arn, boilerplate, suffix, ext}
# how to de-dup?? each run will create a new folder with uuid
# arn format https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonelasticmapreduce.html#amazonelasticmapreduce-resources-for-iam-policies
self._intelliflow_python_workingset_key = build_object_key(["batch"], "bundle.zip")
self._bucket_name = self.build_bucket_name()
# eagerly validate all possible job names
self.validate_job_names()
def validate_job_names(self):
for lang, lang_spec in self.runtime_config_mapping().items():
for abi, abi_spec in lang_spec.items():
for runtime_config, runtime_config_spec in abi_spec.items():
boilerplate_type = runtime_config_spec["boilerplate"]
if not boilerplate_type:
raise ValueError(f"No boilerplate defined for lang: {lang}, abi: {abi}, runtime_config: {runtime_config}")
job_name = self.build_job_name(lang, abi, runtime_config)
if len(job_name) > 255:
raise ValueError(
f"Cannot dev_init {self.__class__.__name__} due to very long"
f" AWS EMR Job Name {job_name} (limit < 255),"
f" as a result of very long context_id '{self._dev_platform.context_id}'."
)
if not validate_job_name(job_name):
raise ValueError(
f"Cannot dev_init {self.__class__.__name__} due to invalid job name {job_name} doesn't meet EMR job name "
f"pattern"
)
def build_bucket_name(self) -> str:
bucket_name: str = f"if-awsemr-{self._dev_platform.context_id.lower()}-{self._account_id}-{self._region}"
bucket_len_diff = len(bucket_name) - MAX_BUCKET_LEN
if bucket_len_diff > 0:
msg = (
f"Platform context_id '{self._dev_platform.context_id}' is too long (by {bucket_len_diff}!"
f" {self.__class__.__name__} needs to use it create {bucket_name} bucket in S3."
f" Please refer https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html"
f" to align your naming accordingly in order to be able to use this driver."
)
module_logger.error(msg)
raise ValueError(msg)
return bucket_name
def runtime_init(self, platform: "RuntimePlatform", context_owner: "BaseConstruct") -> None:
AWSConstructMixin.runtime_init(self, platform, context_owner)
self._emr = boto3.client("emr", region_name=self._region)
self._ec2 = boto3.resource("ec2", region_name=self._region)
# TODO comment the following, probably won't need at runtime
self._s3 = boto3.resource("s3")
self._bucket = get_bucket(self._s3, self._bucket_name)
def compute(
self,
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
return ComputeFailedResponse(
ComputeFailedSessionStateType.COMPUTE_INTERNAL,
ComputeResourceDesc("placeholder", "placeholder", driver=self.__class__),
"NotImplemented",
"NotImplemented",
)
def get_session_state(
self, session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord"
) -> ComputeSessionState:
return ComputeFailedSessionState(ComputeFailedSessionStateType.COMPUTE_INTERNAL, session_desc, [])
def kill_session(self, active_compute_record: "RoutingTable.ComputeRecord") -> None:
pass
def provide_runtime_trusted_entities(self) -> List[str]:
return ["elasticmapreduce.amazonaws.com", "ec2.amazonaws.com"]
def provide_runtime_default_policies(self) -> List[str]:
return [
# TODO: fill policies
# "service-role/AmazonEMRServicePolicy_v2",
# "service-role/AmazonElasticMapReduceRole",
# "service-role/AmazonElasticMapReduceforEC2Role",
]
def provide_runtime_permissions(self) -> List[ConstructPermission]:
# allow exec-role (post-activation, cumulative list of all trusted entities [AWS services]) to do the following;
permissions = [
ConstructPermission([f"arn:aws:s3:::{self._bucket_name}", f"arn:aws:s3:::{self._bucket_name}/*"], ["s3:*"]),
# TODO be more picky.
# allow other service assuming our role to call the jobs here
ConstructPermission([build_job_arn(self._region, self._account_id, "*")], ["ec2:*", "elasticmapreduce:*"]),
# CW Logs (might look redundant, but please forget about other drivers while declaring these),
# deduping is handled automatically.
ConstructPermission([f"arn:aws:logs:{self._region}:{self._account_id}:*"], ["logs:*"]),
# must add a policy to allow your users the iam:PassRole permission for IAM roles to match your naming convention
ConstructPermission([self._params[AWSCommonParams.IF_EXE_ROLE]], ["iam:PassRole"]),
]
external_library_resource_arns = set()
for route in self._pending_internal_routes:
for slot in route.slots:
if slot.code_metadata.external_library_paths:
for s3_path in slot.code_metadata.external_library_paths:
try:
s3_spec = S3SignalSourceAccessSpec.from_url(account_id=None, url=s3_path)
except Exception:
module_logger.error(
f"External library path {s3_path} attached to route {route.route_id!r} "
f" via slot: {(slot.type, slot.code_lang)!r} is not supported by "
f" BatchCompute driver {self.__class__.__name__!r}. "
)
raise
# exact resource (JARs, zips)
external_library_resource_arns.add(f"arn:aws:s3:::{s3_spec.bucket}/{s3_path[len(f"s3://{s3_spec.bucket}/"):]}")
# TODO Move into <BatchCompute>
# TODO evalute moving is_batch_compute check even before the external library paths extraction.
if slot.type.is_batch_compute() and slot.permissions:
for compute_perm in slot.permissions:
# TODO check compute_perm feasibility in AWS EMR (check ARN, resource type, etc)
if compute_perm.context != PermissionContext.DEVTIME:
permissions.append(ConstructPermission(compute_perm.resource, compute_perm.action))
if external_library_resource_arns:
permissions.append(
ConstructPermission(list(external_library_resource_arns), ["s3:GetObject", "s3:GetObjectVersion", "s3:ListBucket"])
)
# might look familiar (from Processor impl maybe), but please forget about other drivers while declaring these),
# deduping is handled automatically.
ext_s3_signals = [
ext_signal for ext_signal in self._pending_external_signals if ext_signal.resource_access_spec.source == SignalSourceType.S3
]
if ext_s3_signals:
# External S3 access
permissions.append(
ConstructPermission(
[
f"arn:aws:s3:::{ext_signal.resource_access_spec.bucket}{"/" + ext_signal.resource_access_spec.folder if ext_signal.resource_access_spec.folder else ""}/*"
for ext_signal in ext_s3_signals
]
+ [
f"arn:aws:s3:::{ext_signal.resource_access_spec.bucket}/{ext_signal.resource_access_spec.folder if ext_signal.resource_access_spec.folder else ""}"
for ext_signal in ext_s3_signals
],
["s3:GetObject", "s3:GetObjectVersion", "s3:ListBucket"],
)
)
encryption_key_list: Set[str] = {
ext_signal.resource_access_spec.encryption_key
for ext_signal in ext_s3_signals
if ext_signal.resource_access_spec.encryption_key
}
if encryption_key_list:
permissions.append(
ConstructPermission(
list(encryption_key_list),
[
"kms:Decrypt",
"kms:DescribeKey",
"kms:GenerateDataKey",
"kms:DescribeCustomKeyStores",
"kms:ListKeys",
"kms:ListAliases",
],
)
)
return permissions
@classmethod
def provide_devtime_permissions(cls, params: ConstructParamsDict) -> List[ConstructPermission]:
return [
# TODO narrow down to exact operations and resources
ConstructPermission(["*"], ["elasticmapreduce:*"]),
ConstructPermission(["*"], ["ec2:*"]),
# instance-profile for ec2 instance in cluster
ConstructPermission(
[f"arn:aws:iam::{params[AWSCommonParams.ACCOUNT_ID]}:instance-profile/*"],
["iam:CreateInstanceProfile", "iam:AddRoleToInstanceProfile"],
),
]
def _provide_system_metrics(self) -> List[Signal]:
return []
def _provide_internal_metrics(self) -> List[ConstructInternalMetricDesc]:
return []
def _provide_route_metrics(self, route: Route) -> List[ConstructInternalMetricDesc]:
return []
def _provide_internal_alarms(self) -> List[Signal]:
return []
def build_bootstrapper_object_key(self) -> str:
return build_object_key(["bootstrapper"], f"{self.__class__.__name__.lower()}_RuntimePlatform.data")
def _update_bootstrapper(self, bootstrapper: "RuntimePlatform") -> None:
# uploading it to S3 and passing S3 link as job arg.
bootstrapped_platform = bootstrapper.serialize()
self.bootstrapper_object_key = self.build_bootstrapper_object_key()
exponential_retry(
put_object, {"ServiceException", "TooManyRequestsException"}, self._bucket, self.bootstrapper_object_key, bootstrapped_platform
)
def activate(self) -> None:
if not bucket_exists(self._s3, self._bucket_name):
self._setup_scripts_bucket()
else:
self._bucket = get_bucket(self._s3, self._bucket_name)
bundles: List[Tuple[str, "Path"]] = get_bundles("1.0")
self._bundle_s3_keys = []
self._bundle_s3_paths = []
for bundle_name, bundle_path in bundles:
bundle_s3_key = build_object_key(["batch", "lib"], bundle_name)
self._bundle_s3_keys.append(bundle_s3_key)
self._bundle_s3_paths.append(f"s3://{self._bucket_name}/{bundle_s3_key}")
if not object_exists(self._s3, self._bucket, bundle_s3_key):
exponential_retry(
put_object,
{"ServiceException", "TooManyRequestsException"},
self._bucket,
bundle_s3_key,
open(bundle_path, "rb").read(),
)
for lang, lang_spec in self.runtime_config_mapping().items():
if lang == EmrJobLanguage.PYTHON:
# Upload the bundle (working set) to its own bucket.
exponential_retry(
put_object,
{"ServiceException", "TooManyRequestsException"},
self._bucket,
self._intelliflow_python_workingset_key,
get_working_set_as_zip_stream(),
)
for abi, abi_spec in lang_spec.items():
for runtime_config, runtime_config_spec in abi_spec.items():
batch = runtime_config_spec["boilerplate"]()
file_ext = lang.extension
batch_script_file_key = build_object_key(["batch"], f"emretl_{batch.__class__.__name__.lower()}.{file_ext}")
exponential_retry(
put_object,
{"ServiceException", "TooManyRequestsException"},
self._bucket,
batch_script_file_key,
batch.generate_emr_script().encode("utf-8"),
)
super().activate()
def rollback(self) -> None:
super().rollback()
def terminate(self) -> None:
super().terminate()
# TODO eliminate emr instance profile
def check_update(self, prev_construct: "BaseConstruct") -> None:
super().check_update(prev_construct)
def _process_external(self, new_signals: Set[Signal], current_signals: Set[Signal]) -> None:
pass
def _process_internal(self, new_routes: Set[Route], current_routes: Set[Route]) -> None:
pass
def _process_internal_signals(self, new_signals: Set[Signal], current_signals: Set[Signal]) -> None:
pass
def _process_construct_connections(
self, new_construct_conns: Set["_PendingConnRequest"], current_construct_conns: Set["_PendingConnRequest"]
) -> None:
pass
def _process_security_conf(self, new_security_conf: ConstructSecurityConf, current_security_conf: ConstructSecurityConf) -> None:
pass
def _revert_external(self, signals: Set[Signal], prev_signals: Set[Signal]) -> None:
pass
def _revert_internal(self, routes: Set[Route], prev_routes: Set[Route]) -> None:
pass
def _revert_internal_signals(self, signals: Set[Signal], prev_signals: Set[Signal]) -> None:
pass
def _revert_construct_connections(
self, construct_conns: Set["_PendingConnRequest"], prev_construct_conns: Set["_PendingConnRequest"]
) -> None:
pass
def _revert_security_conf(self, security_conf: ConstructSecurityConf, prev_security_conf: ConstructSecurityConf) -> None:
pass
def build_job_name(self, lang: EmrJobLanguage, abi: ABI, runtime_config: RuntimeConfig):
return (
f"IntelliFlow-{self._dev_platform.context_id}-{self._region}-{self.__class__.__name__}-{lang.extension}-{abi.name.lower()}-"
f"{runtime_config.name}"
)
def _setup_scripts_bucket(self):
"""Initial setup of storage bucket. Enforces policy for access from dev and exec roles."""
try:
self._bucket = create_bucket(self._s3, self._bucket_name, self._region)
except ClientError as error:
if error.response["Error"]["Code"] == "InvalidBucketName":
msg = (
f"Platform context_id '{self._dev_platform.context_id}' is not valid!"
f" {self.__class__.__name__} needs to use it create {self._bucket_name} bucket in S3."
f" Please refer https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html"
f" to align your naming accordingly in order to be able to use this driver."
)
module_logger.error(msg)
raise ValueError(msg)
elif error.response["Error"]["Code"] == "BucketAlreadyExists":
msg = (
f"Bucket {self._bucket_name!r} has been taken by some other application. Cannot "
f"proceed with activation until S3 bucket is retained by same account "
f" (AWS Entity: {self._params[AWSCommonParams.IF_DEV_ROLE]!r}, Region: {self.region})."
)
module_logger.error(msg)
raise RuntimeError(msg, error)
else:
raise
self._setup_activated_bucket_policy()
def _setup_activated_bucket_policy(self) -> None:
put_policy_desc = {
"Version": "2012-10-17",
"Id": str(uuid.uuid1()),
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": self._params[AWSCommonParams.IF_DEV_ROLE]},
"Action": ["s3:*"],
"Resource": [f"arn:aws:s3:::{self._bucket.name}/*", f"arn:aws:s3:::{self._bucket.name}"],
},
{
"Effect": "Allow",
"Principal": {"AWS": self._params[AWSCommonParams.IF_EXE_ROLE]},
"Action": ["s3:*"],
# TODO post-MVP
# the following is the complete list for both Data sources + targets combined.
# 'Action': [ 's3:GetObject', 's3:PutObject', 's3:DeleteObject', 's3:GetObjectVersion' 's3:ListBucket' ],
"Resource": [f"arn:aws:s3:::{self._bucket.name}/*", f"arn:aws:s3:::{self._bucket.name}"],
},
],
}
try:
exponential_retry(put_policy, ["MalformedPolicy"], self._s3, self._bucket.name, put_policy_desc)
except ClientError as error:
if error.response["Error"]["Code"] == "MalformedPolicy":
module_logger.error("Couldn't put the policy for EMR scripts folder! Error:", str(error))
else:
raise
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import uuid
from enum import Enum, unique
from typing import Any, ClassVar, Dict, List, Optional, Set, Tuple, Type, Union, cast
import boto3
from botocore.exceptions import ClientError
from intelliflow.core.deployment import get_working_set_as_zip_stream, is_environment_immutable
from intelliflow.core.permission import PermissionContext
from intelliflow.core.platform.definitions.aws.glue.script.batch.glueetl_scala_all_ABI import GlueAllABIScala
from intelliflow.core.signal_processing import DimensionFilter, DimensionSpec, Signal, Slot
from intelliflow.core.signal_processing.definitions.compute_defs import ABI, Lang
from intelliflow.core.signal_processing.routing_runtime_constructs import Route
from intelliflow.core.signal_processing.signal import SignalDomainSpec, SignalType
from intelliflow.core.signal_processing.signal_source import (
DATASET_HEADER_KEY,
DATASET_SCHEMA_TYPE_KEY,
ENCRYPTION_KEY_KEY,
CWMetricSignalSourceAccessSpec,
DatasetSchemaType,
S3SignalSourceAccessSpec,
SignalSourceAccessSpec,
SignalSourceType,
)
from intelliflow.core.signal_processing.slot import SlotCodeMetadata, SlotCodeType, SlotType
from ...constructs import (
BatchCompute,
ConstructInternalMetricDesc,
ConstructParamsDict,
ConstructPermission,
ConstructSecurityConf,
EncryptionKeyAllocationLevel,
Storage,
)
from ...definitions.aws.common import AWS_COMMON_RETRYABLE_ERRORS, MAX_SLEEP_INTERVAL_PARAM
from ...definitions.aws.common import CommonParams as AWSCommonParams
from ...definitions.aws.common import exponential_retry
from ...definitions.aws.emr.client_wrapper import EmrJobLanguage, EmrReleaseLabel, build_job_arn, validate_job_name
from ...definitions.aws.emr.script.batch.emr_default_ABI import EmrDefaultABIPython
from ...definitions.aws.emr.script.batch.emr_scala_all_ABI import EmrAllABIScala
from ...definitions.aws.glue import catalog as glue_catalog
from ...definitions.aws.glue.client_wrapper import GlueVersion, get_bundles
from ...definitions.aws.glue.script.batch.common import (
AWS_REGION,
BOOTSTRAPPER_PLATFORM_KEY_PARAM,
CLIENT_CODE_BUCKET,
CLIENT_CODE_PARAM,
INPUT_MAP_PARAM,
OUTPUT_PARAM,
USER_EXTRA_PARAMS_PARAM,
BatchInputMap,
BatchOutput,
)
from ...definitions.aws.glue.script.batch.glueetl_default_ABI import GlueDefaultABIPython
from ...definitions.aws.s3.bucket_wrapper import MAX_BUCKET_LEN, bucket_exists, create_bucket, delete_bucket, get_bucket, put_policy
from ...definitions.aws.s3.object_wrapper import build_object_key, empty_bucket, object_exists, put_object
from ...definitions.compute import (
ComputeExecutionDetails,
ComputeFailedResponse,
ComputeFailedResponseType,
ComputeFailedSessionState,
ComputeFailedSessionStateType,
ComputeResourceDesc,
ComputeResponse,
ComputeResponseType,
ComputeSessionDesc,
ComputeSessionState,
ComputeSessionStateType,
ComputeSuccessfulResponse,
ComputeSuccessfulResponseType,
)
from ..aws_common import AWSConstructMixin
module_logger = logging.getLogger(__file__)
@unique
class RuntimeConfig(Enum):
GlueVersion_0_9 = "GlueVersion0.9"
GlueVersion_1_0 = "GlueVersion1.0"
GlueVersion_2_0 = "GlueVersion2.0"
GlueVersion_3_0 = "GlueVersion3.0"
@classmethod
def from_glue_version(cls, glue_version: GlueVersion):
return {
GlueVersion.VERSION_0_9: RuntimeConfig.GlueVersion_0_9,
GlueVersion.VERSION_1_0: RuntimeConfig.GlueVersion_1_0,
GlueVersion.VERSION_2_0: RuntimeConfig.GlueVersion_2_0,
GlueVersion.VERSION_3_0: RuntimeConfig.GlueVersion_3_0,
}[glue_version]
class AWSEMRBatchCompute(AWSConstructMixin, BatchCompute):
"""AWS EMR based BatchCompute impl"""
@classmethod
def driver_spec(cls) -> DimensionFilter:
return DimensionFilter.load_raw(
{
Lang.SPARK_SQL: {ABI.PARAMETRIZED_QUERY: {"*": {"*": {}}}}, # irrespective of extra params
Lang.PYTHON: {
ABI.GLUE_EMBEDDED: {
# also a RuntimeConfig, supported as an explicit param for compatibility with Glue driver
"GlueVersion": {GlueVersion.AUTO.value: {}, "1.0": {}, "2.0": {}, "3.0": {}},
"InstanceConfig": {"*": {}},
# for other EMR specific runtime configurations
"RuntimeConfig": {"*": {}},
}
},
Lang.SCALA: {
ABI.GLUE_EMBEDDED: {
"GlueVersion": {GlueVersion.AUTO.value: {}, "1.0": {}, "2.0": {}, "3.0": {}},
"InstanceConfig": {"*": {}},
"RuntimeConfig": {"*": {}},
}
},
}
)
@classmethod
def runtime_config_mapping(cls) -> Dict[EmrJobLanguage, Dict[ABI, Dict[RuntimeConfig, Dict]]]:
# TODO: Using Glue 3.0 spark version as it's the only tested one so far
return {
EmrJobLanguage.PYTHON: {
ABI.GLUE_EMBEDDED: {
RuntimeConfig.GlueVersion_0_9: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrDefaultABIPython,
},
RuntimeConfig.GlueVersion_1_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrDefaultABIPython,
},
RuntimeConfig.GlueVersion_2_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrDefaultABIPython,
},
RuntimeConfig.GlueVersion_3_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrDefaultABIPython,
},
}
},
EmrJobLanguage.SCALA: {
ABI.GLUE_EMBEDDED: {
RuntimeConfig.GlueVersion_0_9: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrAllABIScala,
},
RuntimeConfig.GlueVersion_1_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrAllABIScala,
},
RuntimeConfig.GlueVersion_2_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrAllABIScala,
},
RuntimeConfig.GlueVersion_3_0: {
"runtime_version": EmrReleaseLabel.resolve_from_glue_version(GlueVersion.VERSION_3_0),
"boilerplate": EmrAllABIScala,
},
}
},
}
def __init__(self, params: ConstructParamsDict) -> None:
super().__init__(params)
self._emr = self._session.client("emr", region_name=self._region)
self._ec2 = self._session.resource("ec2", region_name=self._region)
self._s3 = self._session.resource("s3", region_name=self._region)
self._bucket = None
self._bucket_name = None
self._iam = self._session.resource("iam", region_name=self._region)
self._intelliflow_python_workingset_key = None
def _deserialized_init(self, params: ConstructParamsDict) -> None:
super()._deserialized_init(params)
self._emr = self._session.client("emr", region_name=self._region)
self._ec2 = self._session.resource("ec2", region_name=self._region)
self._s3 = self._session.resource("s3", region_name=self._region)
self._bucket = get_bucket(self._s3, self._bucket_name)
self._iam = self._session.resource("iam", region_name=self._region)
def _serializable_copy_init(self, org_instance: "BaseConstruct") -> None:
AWSConstructMixin._serializable_copy_init(self, org_instance)
self._emr = None
self._ec2 = None
self._s3 = None
self._bucket = None
self._iam = None
def provide_output_attributes(self, slot: Slot, user_attrs: Dict[str, Any]) -> Optional[Dict[str, Any]]:
# header: supports both so it is up to user input. but default to True if not set.
return {DATASET_HEADER_KEY: user_attrs.get(DATASET_HEADER_KEY, True), DATASET_SCHEMA_TYPE_KEY: DatasetSchemaType.SPARK_SCHEMA_JSON}
def query_external_source_spec(
self, ext_signal_source: SignalSourceAccessSpec
) -> Optional[Tuple[SignalSourceAccessSpec, DimensionSpec]]:
if ext_signal_source.source == SignalSourceType.GLUE_TABLE:
return glue_catalog.query_table_spec(ext_signal_source.database, ext_signal_source.table_name)
raise NotImplementedError(
f"This external signal source ({ext_signal_source.source!r}) cannot be queried"
f" by BatchCompute driver: {self.__class__.__name__}"
)
def dev_init(self, platform: "DevelopmentPlatform") -> None:
super().dev_init(platform)
# construct lang -> runtime_version -> {name, arn, boilerplate, suffix, ext}
# how to de-dup?? each run will create a new folder with uuid
# arn format https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonelasticmapreduce.html#amazonelasticmapreduce-resources-for-iam-policies
self._intelliflow_python_workingset_key = build_object_key(["batch"], "bundle.zip")
self._bucket_name = self.build_bucket_name()
# eagerly validate all possible job names
self.validate_job_names()
def validate_job_names(self):
for lang, lang_spec in self.runtime_config_mapping().items():
for abi, abi_spec in lang_spec.items():
for runtime_config, runtime_config_spec in abi_spec.items():
boilerplate_type = runtime_config_spec["boilerplate"]
if not boilerplate_type:
raise ValueError(f"No boilerplate defined for lang: {lang}, abi: {abi}, runtime_config: {runtime_config}")
job_name = self.build_job_name(lang, abi, runtime_config)
if len(job_name) > 255:
raise ValueError(
f"Cannot dev_init {self.__class__.__name__} due to very long"
f" AWS EMR Job Name {job_name} (limit < 255),"
f" as a result of very long context_id '{self._dev_platform.context_id}'."
)
if not validate_job_name(job_name):
raise ValueError(
f"Cannot dev_init {self.__class__.__name__} due to invalid job name {job_name} doesn't meet EMR job name "
f"pattern"
)
def build_bucket_name(self) -> str:
bucket_name: str = f"if-awsemr-{self._dev_platform.context_id.lower()}-{self._account_id}-{self._region}"
bucket_len_diff = len(bucket_name) - MAX_BUCKET_LEN
if bucket_len_diff > 0:
msg = (
f"Platform context_id '{self._dev_platform.context_id}' is too long (by {bucket_len_diff}!"
f" {self.__class__.__name__} needs to use it create {bucket_name} bucket in S3."
f" Please refer https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html"
f" to align your naming accordingly in order to be able to use this driver."
)
module_logger.error(msg)
raise ValueError(msg)
return bucket_name
def runtime_init(self, platform: "RuntimePlatform", context_owner: "BaseConstruct") -> None:
AWSConstructMixin.runtime_init(self, platform, context_owner)
self._emr = boto3.client("emr", region_name=self._region)
self._ec2 = boto3.resource("ec2", region_name=self._region)
# TODO comment the following, probably won't need at runtime
self._s3 = boto3.resource("s3")
self._bucket = get_bucket(self._s3, self._bucket_name)
def compute(
self,
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
return ComputeFailedResponse(
ComputeFailedSessionStateType.COMPUTE_INTERNAL,
ComputeResourceDesc("placeholder", "placeholder", driver=self.__class__),
"NotImplemented",
"NotImplemented",
)
def get_session_state(
self, session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord"
) -> ComputeSessionState:
return ComputeFailedSessionState(ComputeFailedSessionStateType.COMPUTE_INTERNAL, session_desc, [])
def kill_session(self, active_compute_record: "RoutingTable.ComputeRecord") -> None:
pass
def provide_runtime_trusted_entities(self) -> List[str]:
return ["elasticmapreduce.amazonaws.com", "ec2.amazonaws.com"]
def provide_runtime_default_policies(self) -> List[str]:
return [
# TODO: fill policies
# "service-role/AmazonEMRServicePolicy_v2",
# "service-role/AmazonElasticMapReduceRole",
# "service-role/AmazonElasticMapReduceforEC2Role",
]
def provide_runtime_permissions(self) -> List[ConstructPermission]:
# allow exec-role (post-activation, cumulative list of all trusted entities [AWS services]) to do the following;
permissions = [
ConstructPermission([f"arn:aws:s3:::{self._bucket_name}", f"arn:aws:s3:::{self._bucket_name}/*"], ["s3:*"]),
# TODO be more picky.
# allow other service assuming our role to call the jobs here
ConstructPermission([build_job_arn(self._region, self._account_id, "*")], ["ec2:*", "elasticmapreduce:*"]),
# CW Logs (might look redundant, but please forget about other drivers while declaring these),
# deduping is handled automatically.
ConstructPermission([f"arn:aws:logs:{self._region}:{self._account_id}:*"], ["logs:*"]),
# must add a policy to allow your users the iam:PassRole permission for IAM roles to match your naming convention
ConstructPermission([self._params[AWSCommonParams.IF_EXE_ROLE]], ["iam:PassRole"]),
]
external_library_resource_arns = set()
for route in self._pending_internal_routes:
for slot in route.slots:
if slot.code_metadata.external_library_paths:
for s3_path in slot.code_metadata.external_library_paths:
try:
s3_spec = S3SignalSourceAccessSpec.from_url(account_id=None, url=s3_path)
except Exception:
module_logger.error(
f"External library path {s3_path} attached to route {route.route_id!r} "
f" via slot: {(slot.type, slot.code_lang)!r} is not supported by "
f" BatchCompute driver {self.__class__.__name__!r}. "
)
raise
# exact resource (JARs, zips)
external_library_resource_arns.add(f"arn:aws:s3:::{s3_spec.bucket}/{s3_path[len(f's3://{s3_spec.bucket}/'):]}")
# TODO Move into <BatchCompute>
# TODO evalute moving is_batch_compute check even before the external library paths extraction.
if slot.type.is_batch_compute() and slot.permissions:
for compute_perm in slot.permissions:
# TODO check compute_perm feasibility in AWS EMR (check ARN, resource type, etc)
if compute_perm.context != PermissionContext.DEVTIME:
permissions.append(ConstructPermission(compute_perm.resource, compute_perm.action))
if external_library_resource_arns:
permissions.append(
ConstructPermission(list(external_library_resource_arns), ["s3:GetObject", "s3:GetObjectVersion", "s3:ListBucket"])
)
# might look familiar (from Processor impl maybe), but please forget about other drivers while declaring these),
# deduping is handled automatically.
ext_s3_signals = [
ext_signal for ext_signal in self._pending_external_signals if ext_signal.resource_access_spec.source == SignalSourceType.S3
]
if ext_s3_signals:
# External S3 access
permissions.append(
ConstructPermission(
[
f"arn:aws:s3:::{ext_signal.resource_access_spec.bucket}{'/' + ext_signal.resource_access_spec.folder if ext_signal.resource_access_spec.folder else ''}/*"
for ext_signal in ext_s3_signals
]
+ [
f"arn:aws:s3:::{ext_signal.resource_access_spec.bucket}/{ext_signal.resource_access_spec.folder if ext_signal.resource_access_spec.folder else ''}"
for ext_signal in ext_s3_signals
],
["s3:GetObject", "s3:GetObjectVersion", "s3:ListBucket"],
)
)
encryption_key_list: Set[str] = {
ext_signal.resource_access_spec.encryption_key
for ext_signal in ext_s3_signals
if ext_signal.resource_access_spec.encryption_key
}
if encryption_key_list:
permissions.append(
ConstructPermission(
list(encryption_key_list),
[
"kms:Decrypt",
"kms:DescribeKey",
"kms:GenerateDataKey",
"kms:DescribeCustomKeyStores",
"kms:ListKeys",
"kms:ListAliases",
],
)
)
return permissions
@classmethod
def provide_devtime_permissions(cls, params: ConstructParamsDict) -> List[ConstructPermission]:
return [
# TODO narrow down to exact operations and resources
ConstructPermission(["*"], ["elasticmapreduce:*"]),
ConstructPermission(["*"], ["ec2:*"]),
# instance-profile for ec2 instance in cluster
ConstructPermission(
[f"arn:aws:iam::{params[AWSCommonParams.ACCOUNT_ID]}:instance-profile/*"],
["iam:CreateInstanceProfile", "iam:AddRoleToInstanceProfile"],
),
]
def _provide_system_metrics(self) -> List[Signal]:
return []
def _provide_internal_metrics(self) -> List[ConstructInternalMetricDesc]:
return []
def _provide_route_metrics(self, route: Route) -> List[ConstructInternalMetricDesc]:
return []
def _provide_internal_alarms(self) -> List[Signal]:
return []
def build_bootstrapper_object_key(self) -> str:
return build_object_key(["bootstrapper"], f"{self.__class__.__name__.lower()}_RuntimePlatform.data")
def _update_bootstrapper(self, bootstrapper: "RuntimePlatform") -> None:
# uploading it to S3 and passing S3 link as job arg.
bootstrapped_platform = bootstrapper.serialize()
self.bootstrapper_object_key = self.build_bootstrapper_object_key()
exponential_retry(
put_object, {"ServiceException", "TooManyRequestsException"}, self._bucket, self.bootstrapper_object_key, bootstrapped_platform
)
def activate(self) -> None:
if not bucket_exists(self._s3, self._bucket_name):
self._setup_scripts_bucket()
else:
self._bucket = get_bucket(self._s3, self._bucket_name)
bundles: List[Tuple[str, "Path"]] = get_bundles("1.0")
self._bundle_s3_keys = []
self._bundle_s3_paths = []
for bundle_name, bundle_path in bundles:
bundle_s3_key = build_object_key(["batch", "lib"], bundle_name)
self._bundle_s3_keys.append(bundle_s3_key)
self._bundle_s3_paths.append(f"s3://{self._bucket_name}/{bundle_s3_key}")
if not object_exists(self._s3, self._bucket, bundle_s3_key):
exponential_retry(
put_object,
{"ServiceException", "TooManyRequestsException"},
self._bucket,
bundle_s3_key,
open(bundle_path, "rb").read(),
)
for lang, lang_spec in self.runtime_config_mapping().items():
if lang == EmrJobLanguage.PYTHON:
# Upload the bundle (working set) to its own bucket.
exponential_retry(
put_object,
{"ServiceException", "TooManyRequestsException"},
self._bucket,
self._intelliflow_python_workingset_key,
get_working_set_as_zip_stream(),
)
for abi, abi_spec in lang_spec.items():
for runtime_config, runtime_config_spec in abi_spec.items():
batch = runtime_config_spec["boilerplate"]()
file_ext = lang.extension
batch_script_file_key = build_object_key(["batch"], f"emretl_{batch.__class__.__name__.lower()}.{file_ext}")
exponential_retry(
put_object,
{"ServiceException", "TooManyRequestsException"},
self._bucket,
batch_script_file_key,
batch.generate_emr_script().encode("utf-8"),
)
super().activate()
def rollback(self) -> None:
super().rollback()
def terminate(self) -> None:
super().terminate()
# TODO eliminate emr instance profile
def check_update(self, prev_construct: "BaseConstruct") -> None:
super().check_update(prev_construct)
def _process_external(self, new_signals: Set[Signal], current_signals: Set[Signal]) -> None:
pass
def _process_internal(self, new_routes: Set[Route], current_routes: Set[Route]) -> None:
pass
def _process_internal_signals(self, new_signals: Set[Signal], current_signals: Set[Signal]) -> None:
pass
def _process_construct_connections(
self, new_construct_conns: Set["_PendingConnRequest"], current_construct_conns: Set["_PendingConnRequest"]
) -> None:
pass
def _process_security_conf(self, new_security_conf: ConstructSecurityConf, current_security_conf: ConstructSecurityConf) -> None:
pass
def _revert_external(self, signals: Set[Signal], prev_signals: Set[Signal]) -> None:
pass
def _revert_internal(self, routes: Set[Route], prev_routes: Set[Route]) -> None:
pass
def _revert_internal_signals(self, signals: Set[Signal], prev_signals: Set[Signal]) -> None:
pass
def _revert_construct_connections(
self, construct_conns: Set["_PendingConnRequest"], prev_construct_conns: Set["_PendingConnRequest"]
) -> None:
pass
def _revert_security_conf(self, security_conf: ConstructSecurityConf, prev_security_conf: ConstructSecurityConf) -> None:
pass
def build_job_name(self, lang: EmrJobLanguage, abi: ABI, runtime_config: RuntimeConfig):
return (
f"IntelliFlow-{self._dev_platform.context_id}-{self._region}-{self.__class__.__name__}-{lang.extension}-{abi.name.lower()}-"
f"{runtime_config.name}"
)
def _setup_scripts_bucket(self):
"""Initial setup of storage bucket. Enforces policy for access from dev and exec roles."""
try:
self._bucket = create_bucket(self._s3, self._bucket_name, self._region)
except ClientError as error:
if error.response["Error"]["Code"] == "InvalidBucketName":
msg = (
f"Platform context_id '{self._dev_platform.context_id}' is not valid!"
f" {self.__class__.__name__} needs to use it create {self._bucket_name} bucket in S3."
f" Please refer https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html"
f" to align your naming accordingly in order to be able to use this driver."
)
module_logger.error(msg)
raise ValueError(msg)
elif error.response["Error"]["Code"] == "BucketAlreadyExists":
msg = (
f"Bucket {self._bucket_name!r} has been taken by some other application. Cannot "
f"proceed with activation until S3 bucket is retained by same account "
f" (AWS Entity: {self._params[AWSCommonParams.IF_DEV_ROLE]!r}, Region: {self.region})."
)
module_logger.error(msg)
raise RuntimeError(msg, error)
else:
raise
self._setup_activated_bucket_policy()
def _setup_activated_bucket_policy(self) -> None:
put_policy_desc = {
"Version": "2012-10-17",
"Id": str(uuid.uuid1()),
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": self._params[AWSCommonParams.IF_DEV_ROLE]},
"Action": ["s3:*"],
"Resource": [f"arn:aws:s3:::{self._bucket.name}/*", f"arn:aws:s3:::{self._bucket.name}"],
},
{
"Effect": "Allow",
"Principal": {"AWS": self._params[AWSCommonParams.IF_EXE_ROLE]},
"Action": ["s3:*"],
# TODO post-MVP
# the following is the complete list for both Data sources + targets combined.
# 'Action': [ 's3:GetObject', 's3:PutObject', 's3:DeleteObject', 's3:GetObjectVersion' 's3:ListBucket' ],
"Resource": [f"arn:aws:s3:::{self._bucket.name}/*", f"arn:aws:s3:::{self._bucket.name}"],
},
],
}
try:
exponential_retry(put_policy, ["MalformedPolicy"], self._s3, self._bucket.name, put_policy_desc)
except ClientError as error:
if error.response["Error"]["Code"] == "MalformedPolicy":
module_logger.error("Couldn't put the policy for EMR scripts folder! Error:", str(error))
else:
raise
|
"""
DESAFIO 088: Palpites Para a Mega Sena
Faça um programa que ajude um jogador da MEGA SENA a criar palpites. O programa vai perguntar quantos jogos
serão gerados e vai sortear 6 números entre 1 e 60 para cada jogo, cadastrando tudo em uma lista composta.
"""
from random import randint
from time import sleep
sorteio, megasena = list(), list()
print('-' * 30)
print(f'{'JOGA NA MEGA SENA':^30}')
print('-' * 30)
jogos = int(input('Quantos jogos você quer que eu sorteie? '))
print('-=' * 4, end=' ')
print(f'SORTEANDO {jogos} JOGOS', end=' ')
print('-=' * 4)
for j in range(jogos):
b1, b2, b3 = randint(1, 60), randint(1, 60), randint(1, 60)
b4, b5, b6 = randint(1, 60), randint(1, 60), randint(1, 60)
while b2 == b1:
b2 = randint(1, 60)
while b3 == b2 or b3 == b1:
b3 = randint(1, 60)
while b4 == b3 or b4 == b2 or b4 == b1:
b4 = randint(1, 60)
while b5 == b4 or b5 == b3 or b5 == b2 or b5 == b1:
b5 = randint(1, 60)
while b6 == b5 or b6 == b4 or b6 == b3 or b6 == b2 or b6 == b1:
b6 = randint(1, 60)
sorteio.append(b1)
sorteio.append(b2)
sorteio.append(b3)
sorteio.append(b4)
sorteio.append(b5)
sorteio.append(b6)
sorteio.sort()
megasena.append(sorteio[:])
sorteio.clear()
print(f'Jogo {j + 1}: {megasena[j]}')
sleep(1)
print('-=' * 5, end=' ')
print('< BOA SORTE! >', end=' ')
print('-=' * 5)
| """
DESAFIO 088: Palpites Para a Mega Sena
Faça um programa que ajude um jogador da MEGA SENA a criar palpites. O programa vai perguntar quantos jogos
serão gerados e vai sortear 6 números entre 1 e 60 para cada jogo, cadastrando tudo em uma lista composta.
"""
from random import randint
from time import sleep
sorteio, megasena = list(), list()
print('-' * 30)
print(f'{"JOGA NA MEGA SENA":^30}')
print('-' * 30)
jogos = int(input('Quantos jogos você quer que eu sorteie? '))
print('-=' * 4, end=' ')
print(f'SORTEANDO {jogos} JOGOS', end=' ')
print('-=' * 4)
for j in range(jogos):
b1, b2, b3 = randint(1, 60), randint(1, 60), randint(1, 60)
b4, b5, b6 = randint(1, 60), randint(1, 60), randint(1, 60)
while b2 == b1:
b2 = randint(1, 60)
while b3 == b2 or b3 == b1:
b3 = randint(1, 60)
while b4 == b3 or b4 == b2 or b4 == b1:
b4 = randint(1, 60)
while b5 == b4 or b5 == b3 or b5 == b2 or b5 == b1:
b5 = randint(1, 60)
while b6 == b5 or b6 == b4 or b6 == b3 or b6 == b2 or b6 == b1:
b6 = randint(1, 60)
sorteio.append(b1)
sorteio.append(b2)
sorteio.append(b3)
sorteio.append(b4)
sorteio.append(b5)
sorteio.append(b6)
sorteio.sort()
megasena.append(sorteio[:])
sorteio.clear()
print(f'Jogo {j + 1}: {megasena[j]}')
sleep(1)
print('-=' * 5, end=' ')
print('< BOA SORTE! >', end=' ')
print('-=' * 5)
|
import socket
import select
import ast
HEADER_LENGTH = 10
# IP = "10.52.3.25"
IP = "127.0.0.1"
# IP = '25.135.227.60'
PORT = 5000
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ## just opening the socket for the server.
#Af: address family, inet is just internet.
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# this line allows you to reconnect to the server without it saying that the address is in use
server_socket.bind((IP, PORT)) #binding the ip and the port to the socket so I can use it as a server.
server_socket.listen(2) #the queue only takes 2 connections.
print("Server is running: \nWaiting for Connections...")
sockets_list = [server_socket] #since we already have the server socket as a socket, we put it here.
clients = {} #this is just to be able to print the user names and stuff in a better way so we will have the socket as a key and the user data as a value.
def recv_message(client_socket, metadata):
# try:
message_header = client_socket.recv(HEADER_LENGTH)
if 'filep:' in message_header.decode('utf-8'):
dataList = []
meta = ast.literal_eval(metadata)
data_size = sum([x[1] for x in meta])
data = client_socket.recv(1000)
while data_size > 0:
data = client_socket.recv(1000)
dataList.append(data)
data_size -= len(data)
print("Done Receiving.")
return ('$fileData$', dataList)
return False
if '$ou#+' in message_header.decode('utf-8'):
return ('sound', message_header)
if '$acceptDd$' in message_header.decode('utf-8'):
return ('accept', message_header)
if '$cancelDd$' in message_header.decode('utf-8'):
return ('cancel', message_header)
if '$file$l#' in message_header.decode('utf-8'):
fileLength = message_header.decode('utf-8').strip().split('#')[1]
filename = client_socket.recv(int(fileLength))
clean = client_socket.recv(10)
f_c = (filename, clean)
#metadata
lenPackets = client_socket.recv(10).decode().split('-')
read = lenPackets[1]
to_read = int(lenPackets[0]) - len(read)
metadata = client_socket.recv(to_read)
metadata = read + metadata.decode()
return ('file', message_header, f_c, metadata)
if not len(message_header):
return False
message_length = int(message_header.decode("utf-8").strip())
return {"header": message_header, "data": client_socket.recv(message_length)}
# except:
# print('$' * 10)
# return False
metadata = None
while True:
read_sockets, dummy_, exception_sockets = select.select(sockets_list, [], sockets_list)
#select takes in 3 parameters the "read" list and the "write" lists and the sockets we might error on.
for notified_socket in read_sockets:
if notified_socket == server_socket:
client_socket, client_address = server_socket.accept()
user = recv_message(client_socket, metadata)
if user is False:
continue
sockets_list.append(client_socket)
clients[client_socket] = user
print(f"accepted new connection from {client_address}:{client_address[1]} username: {user["data"].decode("utf-8")}")
else:
message_ = recv_message(notified_socket, metadata)
soundMessage = False
fileNameTransfer = False
fileTransfer = False
acceptDownload = False
cancelDownload = False
if type(message_) == tuple and message_[0] == '$fileData$':
fileTransfer = True
elif type(message_) == tuple and message_[0] == 'sound':
soundMessage = True
elif type(message_) == tuple and message_[0] == 'file':
metadata = message_[3]
fileNameTransfer = True
elif type(message_) == tuple and message_[0] == 'accept':
acceptDownload = True
elif type(message_) == tuple and message_[0] == 'cancel':
cancelDownload = True
elif message_ is False:
print(f"Closed connection from {clients[notified_socket]["data"].decode("utf-8")}")
sockets_list.remove(notified_socket)
del clients[notified_socket]
continue
user = clients[notified_socket]
for client_socket in clients:
if client_socket != notified_socket:
if soundMessage:
client_socket.send(message_[1])
elif fileNameTransfer:
client_socket.send(message_[1] + message_[2][0])
client_socket.send(message_[2][1])
client_socket.send(f'{len(str(message_[3]))}-'.encode())
client_socket.send(f'{message_[3]}'.encode())
elif fileTransfer:
client_socket.send(message_[0].encode('utf-8'))
for pk in message_[1]:
client_socket.send(pk)
elif acceptDownload:
client_socket.send(message_[1])
elif cancelDownload:
client_socket.send(message_[1])
else:
client_socket.send(user['header'] + user['data'] + message_['header'] + message_['data'])
for notified_socket in exception_sockets:
sockets_list.remove(notified_socket)
del clients[notified_socket]
| import socket
import select
import ast
HEADER_LENGTH = 10
# IP = "10.52.3.25"
IP = "127.0.0.1"
# IP = '25.135.227.60'
PORT = 5000
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ## just opening the socket for the server.
#Af: address family, inet is just internet.
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# this line allows you to reconnect to the server without it saying that the address is in use
server_socket.bind((IP, PORT)) #binding the ip and the port to the socket so I can use it as a server.
server_socket.listen(2) #the queue only takes 2 connections.
print("Server is running: \nWaiting for Connections...")
sockets_list = [server_socket] #since we already have the server socket as a socket, we put it here.
clients = {} #this is just to be able to print the user names and stuff in a better way so we will have the socket as a key and the user data as a value.
def recv_message(client_socket, metadata):
# try:
message_header = client_socket.recv(HEADER_LENGTH)
if 'filep:' in message_header.decode('utf-8'):
dataList = []
meta = ast.literal_eval(metadata)
data_size = sum([x[1] for x in meta])
data = client_socket.recv(1000)
while data_size > 0:
data = client_socket.recv(1000)
dataList.append(data)
data_size -= len(data)
print("Done Receiving.")
return ('$fileData$', dataList)
return False
if '$ou#+' in message_header.decode('utf-8'):
return ('sound', message_header)
if '$acceptDd$' in message_header.decode('utf-8'):
return ('accept', message_header)
if '$cancelDd$' in message_header.decode('utf-8'):
return ('cancel', message_header)
if '$file$l#' in message_header.decode('utf-8'):
fileLength = message_header.decode('utf-8').strip().split('#')[1]
filename = client_socket.recv(int(fileLength))
clean = client_socket.recv(10)
f_c = (filename, clean)
#metadata
lenPackets = client_socket.recv(10).decode().split('-')
read = lenPackets[1]
to_read = int(lenPackets[0]) - len(read)
metadata = client_socket.recv(to_read)
metadata = read + metadata.decode()
return ('file', message_header, f_c, metadata)
if not len(message_header):
return False
message_length = int(message_header.decode("utf-8").strip())
return {"header": message_header, "data": client_socket.recv(message_length)}
# except:
# print('$' * 10)
# return False
metadata = None
while True:
read_sockets, dummy_, exception_sockets = select.select(sockets_list, [], sockets_list)
#select takes in 3 parameters the "read" list and the "write" lists and the sockets we might error on.
for notified_socket in read_sockets:
if notified_socket == server_socket:
client_socket, client_address = server_socket.accept()
user = recv_message(client_socket, metadata)
if user is False:
continue
sockets_list.append(client_socket)
clients[client_socket] = user
print(f"accepted new connection from {client_address}:{client_address[1]} username: {user['data'].decode('utf-8')}")
else:
message_ = recv_message(notified_socket, metadata)
soundMessage = False
fileNameTransfer = False
fileTransfer = False
acceptDownload = False
cancelDownload = False
if type(message_) == tuple and message_[0] == '$fileData$':
fileTransfer = True
elif type(message_) == tuple and message_[0] == 'sound':
soundMessage = True
elif type(message_) == tuple and message_[0] == 'file':
metadata = message_[3]
fileNameTransfer = True
elif type(message_) == tuple and message_[0] == 'accept':
acceptDownload = True
elif type(message_) == tuple and message_[0] == 'cancel':
cancelDownload = True
elif message_ is False:
print(f"Closed connection from {clients[notified_socket]['data'].decode('utf-8')}")
sockets_list.remove(notified_socket)
del clients[notified_socket]
continue
user = clients[notified_socket]
for client_socket in clients:
if client_socket != notified_socket:
if soundMessage:
client_socket.send(message_[1])
elif fileNameTransfer:
client_socket.send(message_[1] + message_[2][0])
client_socket.send(message_[2][1])
client_socket.send(f'{len(str(message_[3]))}-'.encode())
client_socket.send(f'{message_[3]}'.encode())
elif fileTransfer:
client_socket.send(message_[0].encode('utf-8'))
for pk in message_[1]:
client_socket.send(pk)
elif acceptDownload:
client_socket.send(message_[1])
elif cancelDownload:
client_socket.send(message_[1])
else:
client_socket.send(user['header'] + user['data'] + message_['header'] + message_['data'])
for notified_socket in exception_sockets:
sockets_list.remove(notified_socket)
del clients[notified_socket]
|
#! /usr/bin/python3
from subprocess import PIPE, Popen
import fcntl
import os
import select
import time
import re
class MainTest(object):
def __init__(self, args):
self.process = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
flags = fcntl.fcntl(self.process.stdout, fcntl.F_GETFL)
fcntl.fcntl(self.process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def send(self, data, tail='\n'):
self.process.stdin.write((data+tail).encode())
self.process.stdin.flush()
def recv(self, t=0.1):
pr = self.process.stdout
while 1:
if not select.select([pr], [], [], 0)[0]:
time.sleep(t)
continue
content = pr.read()
return content.decode()
class LimitTest(object):
def __init__(self, arg):
self.sampleCapacity = "insert {id} username{id} username{id}@test.com"
self.sampleField = f"insert 1 {"a"*32} {"a"*255}"
self.exit = ".exit"
self.select = "select"
self.tester = MainTest(arg)
def function_test(self, i):
output = ''
cmds = [self.sampleCapacity.format(id=i), self.select]
for c in cmds:
self.tester.send(c)
output += self.tester.recv()
print(output)
def test_table_capacity(self):
max_capacity = 1400
output = ''
for i in range(max_capacity):
sample = self.sampleCapacity.format(id=i)
self.tester.send(sample)
output += self.tester.recv()
if re.search('Table full', output):
print("Table full before reach theoretical capacity")
self.tester.send(self.exit)
return
sample = self.sampleCapacity.format(id=max_capacity+1)
self.tester.send(sample)
output = self.tester.recv()
if re.search('Table full', output):
print("Table full as expected. Table capacity test succeeded.")
else:
print("Exceed table limitation. Table capacity test failed.")
self.tester.send(self.exit)
def test_field_capacity(self):
output = ''
self.tester.send(self.sampleField)
output += self.tester.recv()
self.tester.send(self.select)
output += self.tester.recv()
if re.search("a"*255, output) and re.search("a"*32, output):
print("Field capacity test succeeded.")
else:
print("Field capacity test failed.")
if __name__ == '__main__':
testArgs = ('./main',)
tester = LimitTest(testArgs)
for i in range(20):
tester.function_test(i)
tester.tester.send(tester.exit)
| #! /usr/bin/python3
from subprocess import PIPE, Popen
import fcntl
import os
import select
import time
import re
class MainTest(object):
def __init__(self, args):
self.process = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
flags = fcntl.fcntl(self.process.stdout, fcntl.F_GETFL)
fcntl.fcntl(self.process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def send(self, data, tail='\n'):
self.process.stdin.write((data+tail).encode())
self.process.stdin.flush()
def recv(self, t=0.1):
pr = self.process.stdout
while 1:
if not select.select([pr], [], [], 0)[0]:
time.sleep(t)
continue
content = pr.read()
return content.decode()
class LimitTest(object):
def __init__(self, arg):
self.sampleCapacity = "insert {id} username{id} username{id}@test.com"
self.sampleField = f"insert 1 {'a'*32} {'a'*255}"
self.exit = ".exit"
self.select = "select"
self.tester = MainTest(arg)
def function_test(self, i):
output = ''
cmds = [self.sampleCapacity.format(id=i), self.select]
for c in cmds:
self.tester.send(c)
output += self.tester.recv()
print(output)
def test_table_capacity(self):
max_capacity = 1400
output = ''
for i in range(max_capacity):
sample = self.sampleCapacity.format(id=i)
self.tester.send(sample)
output += self.tester.recv()
if re.search('Table full', output):
print("Table full before reach theoretical capacity")
self.tester.send(self.exit)
return
sample = self.sampleCapacity.format(id=max_capacity+1)
self.tester.send(sample)
output = self.tester.recv()
if re.search('Table full', output):
print("Table full as expected. Table capacity test succeeded.")
else:
print("Exceed table limitation. Table capacity test failed.")
self.tester.send(self.exit)
def test_field_capacity(self):
output = ''
self.tester.send(self.sampleField)
output += self.tester.recv()
self.tester.send(self.select)
output += self.tester.recv()
if re.search("a"*255, output) and re.search("a"*32, output):
print("Field capacity test succeeded.")
else:
print("Field capacity test failed.")
if __name__ == '__main__':
testArgs = ('./main',)
tester = LimitTest(testArgs)
for i in range(20):
tester.function_test(i)
tester.tester.send(tester.exit)
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2021 ARC Centre of Excellence for Climate Extremes
# author: Paola Petrelli <paola.petrelli@utas.edu.au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
from datetime import date
from os.path import expanduser
from exception import ZenException
def set_zenodo(ctx, production):
"""Add Zenodo details: api urls, communities, to context object
Parameters
----------
ctx: dict
Click context obj to pass arguments onto sub-commands
production: bool
If True using production api, if False using sandbox
Returns
-------
ctx: dict
Click context obj to pass arguments onto sub-commands
"""
if production:
base_url = 'https://zenodo.org/api'
else:
base_url = 'https://sandbox.zenodo.org/api'
# removing this for the moment as this doesn't filter based on user
#can lead to list all the zenodo if used without
ctx.obj['url'] = f'{base_url}/records'
ctx.obj['deposit'] = f'{base_url}/deposit/depositions'
ctx.obj['community'] = '&community='
return ctx
def upload_file(bucket_url, token, record_id, fpath):
"""Upload file to selected record
Parameters
----------
bucket_url : str
The url for the file bucket to which upload files
token : str
The authentication token for the zenodo or sandbox api
record_id : str
The id for record we want to upload files to
fpath : str
The path for file to upload
Returns
-------
r : requests object
The requests response object
"""
headers = {"Content-Type": "application/octet-stream"}
with open(fpath, "rb") as fp:
r = requests.put(
f"{bucket_url}/{fpath}",
data=fp,
params={'access_token': token},
headers=headers)
return r
def get_bibtex(token, out, community_id=""):
"""Get published records list in selected format
Parameters
----------
token : str
The authentication token for the api
out: str
Output type, defines the headers to use in the request
community : bool, optional
If True then retrieve all the draft records for the community
(default False)
community_id : str, optional
The community identifier to add to the url. It has to be present if community True
(default None)
Returns
output: str/json depending on input 'out'
The text/json request response
"""
headers_dict = {'biblio': {"Content-Type": "text/x-bibliography"},
'bibtex': {"Content-Type": "application/x-bibtex"},
'json': {"Content-Type": "application/json"}
}
test_url='https://sandbox.zenodo.org/api/deposit/depositions'
api_url='https://zenodo.org/api/deposit/depositions'
r = requests.get(api_url,
params={'access_token': token, 'status': 'published',
'communities': community_id},
headers=headers[out])
if out == 'json':
output = r.json
else:
output = r.text
return output
def process_author(author):
""" Create a author dictionary following the api requirements
Parameters
----------
author : dict
The author details
Returns
-------
author : dict
A modified version of the author dictionary following the api requirements
"""
bits = author['name'].split()
firstname = " ".join(bits[:-1])
surname = bits[-1]
#try:
# firstname, surname = author['name'].split()
# author['name'] = f"{surname}, {firstname}"
#except:
# log.info(f"Could not process name {author["name"]} because " +
# "there are more than 1 firstname or surname")
author['orcid'] = author['orcid'].split("/")[-1]
author['affiliation'] = ""
author.pop('email')
return author
def process_license(license):
"""If license is Creative Common return the zenodo style string
else return license as in plan
Parameters
----------
license : dict
A string defining the license
Returns
-------
zlicense : dict
A modified version of the license dictionary following the api requirements
"""
# not doing yet what it claims
ind = license.find('Attribution')
if ind == -1:
print('check license for this record')
zlicense = {'id': 'cc-by-4.0'}
else:
zlicense = {'id': license[0:ind].strip().replace(' ','-').lower() + '-4.0'}
return zlicense
def process_related_id(plan):
"""Add plan records and other references as references
"""
rids = []
relationship = {'geonetwork': 'isReferencedBy', 'rda': 'isAlternateIdentifier',
'related': 'describes'}
for k in ['geonetwork','rda']:
if any(x in plan[k] for x in ['http://', 'https://']):
rids.append({'identifier': plan[k], 'relation': relationship[k]})
for rel in plan['related']:
if any(x in rel for x in ['http://', 'https://']):
rids.append({'identifier': rel, 'relation': relationship['related']})
return rids
def process_keywords(keywords):
"""Add plan records and other references as references
"""
keys = keywords.split(",")
return keys
def process_zenodo_plan(plan, community_id):
"""
"""
global authors
metadata = {}
if plan['author']['name'] in authors.keys():
metadata['creators'] = authors[plan['author']['name']]
else:
metadata['creators'] = [process_author(plan['author'])]
authors[plan['author']['name']] = metadata['creators']
metadata['license'] = process_license(plan['license'])
metadata['related_identifiers'] = process_related_id(plan)
if 'keywords' in metadata.keys():
metadata['keywords'] = process_keywords(plan['keywords'])
metadata['notes'] = 'Preferred citation:\n' + plan['citation'] + \
"\n\nAccess to the data is via the NCI geonetwork record in related identifiers, details are also provided in the readme file."
#metadata['doi'] = '/'.join(plan['doi'].split('/')[-2:])
metadata['title'] = plan['title']
metadata['version'] = plan['version']
metadata['description'] = plan['description']
metadata['upload_type'] = 'dataset'
metadata['language'] = 'eng'
metadata['access_right'] = 'open'
metadata['communities'] = [{'identifier': community_id}]
#metadata['publication_date'] = date.today().strftime("%Y-%m-%d"),
final = {}
final['metadata'] = metadata
final['modified'] = date.today().strftime("%Y-%m-%d")
final['state'] = 'inprogress'
final['submitted'] = False
return final
def upload_meta(ctx, fname, auth_fname):
# define urls, input file and if loading to sandbox or production
global authors
# read a list of already processed authors from file, if new authors are found this gets updated at end of process
if auth_fname:
authors = read_json(auth_fname)
# get either sandbox or api token to connect
token = get_token(ctx['production'])
# read data from input json file and process plans in file
data = read_json(fname)
# process data for each plan and post records returned by process_plan()
for plan in data:
record = process_zenodo_plan(plan, ctx['community_id'])
print(plan['metadata']['title'])
r = post_json(ctx['url'], token, record)
print(r.status_code)
# optional dumping authors list
with open('latest_authors.json', 'w') as fp:
json.dump(authors, fp)
return
| #!/usr/bin/env python
# coding: utf-8
# Copyright 2021 ARC Centre of Excellence for Climate Extremes
# author: Paola Petrelli <paola.petrelli@utas.edu.au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
from datetime import date
from os.path import expanduser
from exception import ZenException
def set_zenodo(ctx, production):
"""Add Zenodo details: api urls, communities, to context object
Parameters
----------
ctx: dict
Click context obj to pass arguments onto sub-commands
production: bool
If True using production api, if False using sandbox
Returns
-------
ctx: dict
Click context obj to pass arguments onto sub-commands
"""
if production:
base_url = 'https://zenodo.org/api'
else:
base_url = 'https://sandbox.zenodo.org/api'
# removing this for the moment as this doesn't filter based on user
#can lead to list all the zenodo if used without
ctx.obj['url'] = f'{base_url}/records'
ctx.obj['deposit'] = f'{base_url}/deposit/depositions'
ctx.obj['community'] = '&community='
return ctx
def upload_file(bucket_url, token, record_id, fpath):
"""Upload file to selected record
Parameters
----------
bucket_url : str
The url for the file bucket to which upload files
token : str
The authentication token for the zenodo or sandbox api
record_id : str
The id for record we want to upload files to
fpath : str
The path for file to upload
Returns
-------
r : requests object
The requests response object
"""
headers = {"Content-Type": "application/octet-stream"}
with open(fpath, "rb") as fp:
r = requests.put(
f"{bucket_url}/{fpath}",
data=fp,
params={'access_token': token},
headers=headers)
return r
def get_bibtex(token, out, community_id=""):
"""Get published records list in selected format
Parameters
----------
token : str
The authentication token for the api
out: str
Output type, defines the headers to use in the request
community : bool, optional
If True then retrieve all the draft records for the community
(default False)
community_id : str, optional
The community identifier to add to the url. It has to be present if community True
(default None)
Returns
output: str/json depending on input 'out'
The text/json request response
"""
headers_dict = {'biblio': {"Content-Type": "text/x-bibliography"},
'bibtex': {"Content-Type": "application/x-bibtex"},
'json': {"Content-Type": "application/json"}
}
test_url='https://sandbox.zenodo.org/api/deposit/depositions'
api_url='https://zenodo.org/api/deposit/depositions'
r = requests.get(api_url,
params={'access_token': token, 'status': 'published',
'communities': community_id},
headers=headers[out])
if out == 'json':
output = r.json
else:
output = r.text
return output
def process_author(author):
""" Create a author dictionary following the api requirements
Parameters
----------
author : dict
The author details
Returns
-------
author : dict
A modified version of the author dictionary following the api requirements
"""
bits = author['name'].split()
firstname = " ".join(bits[:-1])
surname = bits[-1]
#try:
# firstname, surname = author['name'].split()
# author['name'] = f"{surname}, {firstname}"
#except:
# log.info(f"Could not process name {author['name']} because " +
# "there are more than 1 firstname or surname")
author['orcid'] = author['orcid'].split("/")[-1]
author['affiliation'] = ""
author.pop('email')
return author
def process_license(license):
"""If license is Creative Common return the zenodo style string
else return license as in plan
Parameters
----------
license : dict
A string defining the license
Returns
-------
zlicense : dict
A modified version of the license dictionary following the api requirements
"""
# not doing yet what it claims
ind = license.find('Attribution')
if ind == -1:
print('check license for this record')
zlicense = {'id': 'cc-by-4.0'}
else:
zlicense = {'id': license[0:ind].strip().replace(' ','-').lower() + '-4.0'}
return zlicense
def process_related_id(plan):
"""Add plan records and other references as references
"""
rids = []
relationship = {'geonetwork': 'isReferencedBy', 'rda': 'isAlternateIdentifier',
'related': 'describes'}
for k in ['geonetwork','rda']:
if any(x in plan[k] for x in ['http://', 'https://']):
rids.append({'identifier': plan[k], 'relation': relationship[k]})
for rel in plan['related']:
if any(x in rel for x in ['http://', 'https://']):
rids.append({'identifier': rel, 'relation': relationship['related']})
return rids
def process_keywords(keywords):
"""Add plan records and other references as references
"""
keys = keywords.split(",")
return keys
def process_zenodo_plan(plan, community_id):
"""
"""
global authors
metadata = {}
if plan['author']['name'] in authors.keys():
metadata['creators'] = authors[plan['author']['name']]
else:
metadata['creators'] = [process_author(plan['author'])]
authors[plan['author']['name']] = metadata['creators']
metadata['license'] = process_license(plan['license'])
metadata['related_identifiers'] = process_related_id(plan)
if 'keywords' in metadata.keys():
metadata['keywords'] = process_keywords(plan['keywords'])
metadata['notes'] = 'Preferred citation:\n' + plan['citation'] + \
"\n\nAccess to the data is via the NCI geonetwork record in related identifiers, details are also provided in the readme file."
#metadata['doi'] = '/'.join(plan['doi'].split('/')[-2:])
metadata['title'] = plan['title']
metadata['version'] = plan['version']
metadata['description'] = plan['description']
metadata['upload_type'] = 'dataset'
metadata['language'] = 'eng'
metadata['access_right'] = 'open'
metadata['communities'] = [{'identifier': community_id}]
#metadata['publication_date'] = date.today().strftime("%Y-%m-%d"),
final = {}
final['metadata'] = metadata
final['modified'] = date.today().strftime("%Y-%m-%d")
final['state'] = 'inprogress'
final['submitted'] = False
return final
def upload_meta(ctx, fname, auth_fname):
# define urls, input file and if loading to sandbox or production
global authors
# read a list of already processed authors from file, if new authors are found this gets updated at end of process
if auth_fname:
authors = read_json(auth_fname)
# get either sandbox or api token to connect
token = get_token(ctx['production'])
# read data from input json file and process plans in file
data = read_json(fname)
# process data for each plan and post records returned by process_plan()
for plan in data:
record = process_zenodo_plan(plan, ctx['community_id'])
print(plan['metadata']['title'])
r = post_json(ctx['url'], token, record)
print(r.status_code)
# optional dumping authors list
with open('latest_authors.json', 'w') as fp:
json.dump(authors, fp)
return
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
MENU
Menu class.
"""
# File constants no. 0
__all__ = ['Menu']
import math
import os
import sys
import time
import pygame
import pygame.gfxdraw as gfxdraw
import pygame_menu.events as _events
from pygame_menu._base import Base
from pygame_menu._decorator import Decorator
from pygame_menu._widgetmanager import WidgetManager
from pygame_menu.controls import Controller
from pygame_menu.locals import ALIGN_CENTER, ALIGN_LEFT, ALIGN_RIGHT, \
ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL, FINGERDOWN, FINGERUP, FINGERMOTION
from pygame_menu._scrollarea import ScrollArea, get_scrollbars_from_position
from pygame_menu.sound import Sound
from pygame_menu.themes import Theme, THEME_DEFAULT
from pygame_menu.utils import assert_vector, make_surface, warn, \
check_key_pressed_valid, mouse_motion_current_mouse_position, get_finger_pos, \
print_menu_widget_structure
from pygame_menu.widgets import Frame, Widget, MenuBar
from pygame_menu.widgets.core.widget import check_widget_mouseleave, WIDGET_MOUSEOVER
# Import types
from pygame_menu._types import Callable, Any, Dict, NumberType, VectorType, \
Vector2NumberType, Union, Tuple, List, Vector2IntType, Vector2BoolType, \
Tuple4Tuple2IntType, Tuple2IntType, MenuColumnMaxWidthType, MenuColumnMinWidthType, \
MenuRowsType, Optional, Tuple2BoolType, NumberInstance, VectorInstance, EventType, \
EventVectorType, EventListType, CallableNoArgsType
# Joy events
JOY_EVENT_LEFT = 1
JOY_EVENT_RIGHT = 2
JOY_EVENT_UP = 4
JOY_EVENT_DOWN = 8
# Select types
SELECT_KEY = 'key'
SELECT_MOUSE_BUTTON_DOWN = 'mouse_button_down'
SELECT_MOUSE_MOTION = 'mouse_motion'
SELECT_MOVE = 'move'
SELECT_OPEN = 'open'
SELECT_RECURSIVE = 'recursive'
SELECT_REMOVE = 'remove'
SELECT_RESET = 'reset'
SELECT_TOUCH = 'touch'
SELECT_WIDGET = 'widget'
class Menu(Base):
"""
Menu object.
Menu can receive many callbacks; callbacks ``onclose`` and ``onreset`` are fired
(if them are callable-type). They can only receive 1 argument maximum, if so,
the Menu instance is provided
.. code-block:: python
onclose(menu) <or> onclose()
onreset(menu) <or> onreset()
.. note::
Menu cannot be copied or deep-copied.
:param title: Title of the Menu
:param width: Width of the Menu in px
:param height: Height of the Menu in px
:param center_content: Auto centers the Menu on the vertical position after a widget is added/deleted
:param column_max_width: List/Tuple representing the maximum width of each column in px, ``None`` equals no limit. For example ``column_max_width=500`` (each column width can be 500px max), or ``column_max_width=(400,500)`` (first column 400px, second 500). If ``0`` uses the Menu width. This method does not resize the widgets, only determines the dynamic width of the column layout
:param column_min_width: List/Tuple representing the minimum width of each column in px. For example ``column_min_width=500`` (each column width is 500px min), or ``column_max_width=(400,500)`` (first column 400px, second 500). Negative values are not accepted
:param columns: Number of columns
:param enabled: Menu is enabled. If ``False`` the Menu cannot be drawn or updated
:param joystick_enabled: Enable/disable joystick events on the Menu
:param keyboard_enabled: Enable/disable keyboard events on the Menu
:param keyboard_ignore_nonphysical: Ignores non-physical keyboard buttons pressed
:param menu_id: ID of the Menu
:param mouse_enabled: Enable/disable mouse click inside the Menu
:param mouse_motion_selection: Select widgets using mouse motion. If ``True`` menu draws a ``focus`` on the selected widget
:param mouse_visible: Set mouse visible on Menu
:param onclose: Event or function executed when closing the Menu. If not ``None`` the menu disables and executes the event or function it points to. If a function (callable) is provided it can be both non-argument or single argument (Menu instance)
:param onreset: Function executed when resetting the Menu. The function must be non-argument or single argument (Menu instance)
:param overflow: Enables overflow on x/y axes. If ``False`` then scrollbars will not work and the maximum width/height of the scrollarea is the same as the Menu container. Style: (overflow_x, overflow_y). If ``False`` or ``True`` the value will be set on both axis
:param position: Position on x-axis and y-axis. If the value is only 2 elements, the position is relative to the window width (thus, values must be 0-100%); else, the third element defines if the position is relative or not. If ``(x, y, False)`` the values of ``(x, y)`` are in px
:param rows: Number of rows of each column, if there's only 1 column ``None`` can be used for no-limit. Also, a tuple can be provided for defining different number of rows for each column, for example ``rows=10`` (each column can have a maximum 10 widgets), or ``rows=[2, 3, 5]`` (first column has 2 widgets, second 3, and third 5)
:param screen_dimension: List/Tuple representing the dimensions the Menu should reference for sizing/positioning (width, height), if ``None`` pygame is queried for the display mode. This value defines the ``window_size`` of the Menu
:param theme: Menu theme
:param touchscreen: Enable/disable touch action inside the Menu. Only available on pygame 2
:param touchscreen_motion_selection: Select widgets using touchscreen motion. If ``True`` menu draws a ``focus`` on the selected widget
"""
_auto_centering: bool
_background_function: Tuple[bool, Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]]
_clock: 'pygame.time.Clock'
_column_max_width: VectorType
_column_max_width_zero: List[bool]
_column_min_width: VectorType
_column_pos_x: List[NumberType]
_column_widths: List[NumberType]
_columns: int
_ctrl: 'Controller'
_current: 'Menu'
_decorator: 'Decorator'
_disable_draw: bool
_disable_exit: bool
_disable_update: bool
_enabled: bool
_height: int
_index: int
_joy_event: int
_joy_event_repeat: int
_joystick: bool
_keyboard: bool
_keyboard_ignore_nonphysical: bool
_last_scroll_thickness: List[Union[Tuple2IntType, int]]
_last_selected_type: str
_last_update_mode: List[str]
_mainloop: bool
_max_row_column_elements: int
_menubar: 'MenuBar'
_mouse: bool
_mouse_motion_selection: bool
_mouse_visible: bool
_mouse_visible_default: bool
_mouseover: bool
_onbeforeopen: Optional[Callable[['Menu', 'Menu'], Any]]
_onclose: Optional[Union['_events.MenuAction', Callable[['Menu'], Any], CallableNoArgsType]]
_onmouseleave: Optional[Union[Callable[['Menu', EventType], Any], CallableNoArgsType]]
_onmouseover: Optional[Union[Callable[['Menu', EventType], Any], CallableNoArgsType]]
_onreset: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
_onupdate: Optional[Union[Callable[[EventListType, 'Menu'], Any], CallableNoArgsType]]
_onwidgetchange: Optional[Callable[['Menu', 'Widget'], Any]]
_onwindowmouseleave: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
_onwindowmouseover: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
_overflow: Tuple2BoolType
_position: Tuple2IntType
_position_default: Tuple2IntType
_position_relative: bool
_prev: Optional[List[Union['Menu', List['Menu']]]]
_runtime_errors: '_MenuRuntimeErrorConfig'
_scrollarea: 'ScrollArea'
_scrollarea_margin: List[int]
_sound: 'Sound'
_stats: '_MenuStats'
_submenus: Dict['Menu', List['Widget']]
_theme: 'Theme'
_top: 'Menu'
_touchscreen: bool
_touchscreen_motion_selection: bool
_translate: Tuple2IntType
_update_frames: List['Frame'] # Stores the reference of scrollable frames to check inputs
_update_widgets: List['Widget'] # Stores widgets which should always update
_used_columns: int
_validate_frame_widgetmove: bool
_widget_columns: Dict[int, List['Widget']]
_widget_max_position: Tuple2IntType
_widget_min_position: Tuple2IntType
_widget_offset: List[int]
_widget_selected_update: bool # Selected widget receives updates
_widget_surface_cache_enabled: bool
_widget_surface_cache_need_update: bool
_widgets: List['Widget']
_widgets_surface: Optional['pygame.Surface']
_widgets_surface_last: Tuple[int, int, Optional['pygame.Surface']]
_widgets_surface_need_update: bool
_width: int
_window_size: Tuple2IntType
add: 'WidgetManager'
def __init__(
self,
title: str,
width: NumberType,
height: NumberType,
center_content: bool = True,
column_max_width: MenuColumnMaxWidthType = None,
column_min_width: MenuColumnMinWidthType = 0,
columns: int = 1,
enabled: bool = True,
joystick_enabled: bool = True,
keyboard_enabled: bool = True,
keyboard_ignore_nonphysical: bool = True,
menu_id: str = '',
mouse_enabled: bool = True,
mouse_motion_selection: bool = False,
mouse_visible: bool = True,
onclose: Optional[Union['_events.MenuAction', Callable[['Menu'], Any], CallableNoArgsType]] = None,
onreset: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]] = None,
overflow: Union[Vector2BoolType, bool] = (True, True),
position: Union[Vector2NumberType, Tuple[NumberType, NumberType, bool]] = (50, 50, True),
rows: MenuRowsType = None,
screen_dimension: Optional[Vector2IntType] = None,
theme: 'Theme' = THEME_DEFAULT.copy(),
touchscreen: bool = False,
touchscreen_motion_selection: bool = False
) -> None:
super(Menu, self).__init__(object_id=menu_id)
assert isinstance(center_content, bool)
assert isinstance(column_max_width, (VectorInstance, type(None), NumberInstance))
assert isinstance(column_min_width, (VectorInstance, NumberInstance))
assert isinstance(columns, int)
assert isinstance(enabled, bool)
assert isinstance(joystick_enabled, bool)
assert isinstance(keyboard_enabled, bool)
assert isinstance(mouse_enabled, bool)
assert isinstance(mouse_motion_selection, bool)
assert isinstance(mouse_visible, bool)
assert isinstance(overflow, (VectorInstance, bool))
assert isinstance(rows, (int, type(None), VectorInstance))
assert isinstance(theme, Theme), \
'theme bust be a pygame_menu.themes.Theme object instance'
assert isinstance(touchscreen, bool)
assert isinstance(touchscreen_motion_selection, bool)
# Assert theme
theme.validate()
# Assert pygame was initialized
assert not hasattr(pygame, 'get_init') or pygame.get_init(), \
'pygame is not initialized'
# Assert python version is greater than 3.6
assert sys.version_info >= (3, 6, 0), \
'pygame-menu only supports python equal or greater than version 3.6.0'
# Column/row asserts
assert columns >= 1, \
f'the number of columns must be equal or greater than 1 (current={columns})'
if columns > 1:
assert rows is not None, \
'rows cannot be None if the number of columns is greater than 1'
if isinstance(rows, int):
assert rows >= 1, \
f'if number of columns is greater than 1 (current={columns}) then the ' \
f'number of rows must be equal or greater than 1 (current={rows})'
rows = [rows for _ in range(columns)]
assert isinstance(rows, VectorInstance), \
'if rows is not an integer it must be a tuple/list'
assert len(rows) == columns, \
f'the length of the rows vector must be the same as the number of' \
f' columns (current={rows}, expected={columns})'
for i in rows:
assert isinstance(i, int), \
'each item of rows tuple/list must be an integer'
assert i >= 1, \
'each item of the rows tuple/list must be equal or greater than one'
else:
if rows is None:
rows = 10000000 # Set rows as a big number
else:
assert isinstance(rows, int), \
'rows cannot be a tuple/list as there\'s only 1 column'
assert rows >= 1, \
'number of rows must be equal or greater than 1. If there is ' \
'no limit rows must be None'
rows = [rows]
# Set column min width
if isinstance(column_min_width, NumberInstance):
assert column_min_width >= 0, \
'column_min_width must be equal or greater than zero'
if columns != 1:
if column_min_width > 0: # Ignore the default value
warn(
f'column_min_width can be a single number if there is only '
f'1 column, but there is {columns} columns. Thus, column_min_width '
f'should be a vector of {columns} items. By default a vector has '
f'been created using the same value for each column'
)
column_min_width = [column_min_width for _ in range(columns)]
else:
column_min_width = [column_min_width]
assert len(column_min_width) == columns, \
f'column_min_width length must be the same as the number of columns, ' \
f'but size is different {len(column_min_width)}!={columns}'
for i in column_min_width:
assert isinstance(i, NumberInstance), \
'each item of column_min_width must be an integer/float'
assert i >= 0, \
'each item of column_min_width must be equal or greater than zero'
# Set column max width
if column_max_width is not None:
if isinstance(column_max_width, NumberInstance):
assert column_max_width >= 0, \
'column_max_width must be equal or greater than zero'
if columns != 1:
column_max_width = [column_max_width for _ in range(columns)]
else:
column_max_width = [column_max_width]
assert len(column_max_width) == columns, \
f'column_max_width length must be the same as the number of columns, ' \
f'but size is different {len(column_max_width)}!={columns}'
for i in column_max_width:
assert isinstance(i, type(None)) or isinstance(i, NumberInstance), \
'each item of column_max_width can be None (no limit) or an ' \
'integer/float'
assert i is None or i >= 0, \
'each item of column_max_width must be equal or greater than' \
' zero or None'
else:
column_max_width = [None for _ in range(columns)]
# Check that every column max width is equal or greater than minimum width
for i in range(len(column_max_width)):
if column_max_width[i] is not None:
assert column_max_width[i] >= column_min_width[i], \
f'item {i} of column_max_width ({column_max_width[i]}) must be equal or greater ' \
f'than column_min_width ({column_min_width[i]})'
# Element size and position asserts
if len(position) == 3:
# noinspection PyTypeChecker
self._position_relative = position[2]
position = position[0:2]
else:
self._position_relative = True
assert_vector(position, 2)
# Assert overflow
if isinstance(overflow, bool): # If single value
overflow = overflow, overflow
assert len(overflow) == 2, \
'overflow must be a 2-item tuple/list of booleans (x-axis, y-axis)'
assert isinstance(overflow[0], bool), \
'overflow on x-axis must be a boolean object'
assert isinstance(overflow[1], bool), \
'overflow on y-axis must be a boolean object'
# General properties of the Menu
self._auto_centering = center_content
self._background_function = (False, None) # Accept menu as argument, callable object
self._clock = pygame.time.Clock()
self._decorator = Decorator(self)
self._enabled = enabled # Menu is enabled or not. If disabled menu can't update or draw
self._index = -1 # Selected index, if -1 the widget does not have been selected yet
self._last_scroll_thickness = [(0, 0), 0] # scroll and the number of recursive states
self._last_selected_type = '' # Last type selection, used for test purposes
self._mainloop = False # Menu is in mainloop state
self._onclose = None # Function or event called on Menu close
self._sound = Sound()
self._stats = _MenuStats()
self._submenus = {}
self._theme = theme
# Set callbacks
self.set_onclose(onclose)
self.set_onreset(onreset)
self._onbeforeopen = None
self._onmouseleave = None
self._onmouseover = None
self._onupdate = None
self._onwidgetchange = None
self._onwindowmouseleave = None
self._onwindowmouseover = None
# Menu links (pointer to previous and next menus in nested submenus),
# for public methods accessing, self should be used through "_current",
# because user can move through submenus and self pointer should target
# the current Menu object. Private methods access through self
# (not _current) because these methods are called by public (_current) or
# by themselves. _top is only used when moving through menus (open, reset)
self._current = self # Current Menu
# Prev stores a list of Menu pointers, when accessing a submenu, prev grows
# as prev = [prev, new_pointer]
self._prev = None
# Top is the same for the menus and submenus if the user moves through them
self._top = self
# Menu widgets, it should not be accessed outside the object as strange
# issues can occur
self.add = WidgetManager(self)
self._widget_selected_update = True
self._widgets = [] # This list may change during execution (replaced by a new one)
# Stores the frames which receive update events, updated and managed only
# by the Frame class
self._update_frames = []
# Stores the widgets which receive update even if not selected or events
# is empty
self._update_widgets = []
# Widget surface
self._widgets_surface = None
self._widgets_surface_need_update = False
self._widgets_surface_last = (0, 0, None)
# Precache widgets surface draw, this method dramatically increases the
# performance of the menu rendering
self._widget_surface_cache_enabled = True
# This boolean variable, if True, forces the cache to be updated, after
# updating, _widget_surface_cache_need_update goes back again to False,
# thus, the state only is used once
self._widget_surface_cache_need_update = True
# Columns and rows
self._column_max_width_zero = []
for i in range(len(column_max_width)):
if column_max_width[i] == 0:
self._column_max_width_zero.append(True)
else:
self._column_max_width_zero.append(False)
self._column_max_width = column_max_width
self._column_min_width = column_min_width
self._column_pos_x = [] # Stores the center x position of each column
self._column_widths = []
self._columns = columns
self._max_row_column_elements = 0
self._rows = rows
self._used_columns = 0 # Total columns used in widget positioning
self._widget_columns = {}
self._widget_max_position = (0, 0)
self._widget_min_position = (0, 0)
for r in self._rows:
self._max_row_column_elements += r
# Position of Menu
self._position_default = position
self._position = (0, 0)
self._translate = (0, 0)
# Set the size
self.resize(
width=width,
height=height,
screen_dimension=screen_dimension
)
# Setups controller
self._ctrl = Controller()
# Init joystick
self._joystick = joystick_enabled
if self._joystick:
if not pygame.joystick.get_init():
pygame.joystick.init()
for i in range(pygame.joystick.get_count()):
pygame.joystick.Joystick(i).init()
self._joy_event = 0
self._joy_event_repeat = pygame.NUMEVENTS - 1
# Init keyboard
self._keyboard = keyboard_enabled
self._keyboard_ignore_nonphysical = keyboard_ignore_nonphysical
# Init mouse
if mouse_motion_selection:
assert mouse_enabled, \
'mouse motion selection cannot be enabled if mouse is disabled'
assert mouse_visible, \
'mouse motion cannot be enabled if mouse is not visible'
assert hasattr(pygame, 'MOUSEMOTION'), \
'pygame MOUSEMOTION does not exist, thus, mouse motion selection' \
' cannot be enabled'
self._mouse = mouse_enabled and mouse_visible
self._mouseover = False
self._mouse_motion_selection = mouse_motion_selection
self._mouse_visible = mouse_visible
self._mouse_visible_default = mouse_visible
# Init touchscreen
if touchscreen_motion_selection:
assert touchscreen, \
'touchscreen motion selection cannot be enabled if touchscreen is disabled'
self._touchscreen = touchscreen
self._touchscreen_motion_selection = touchscreen_motion_selection
# Create menubar (title)
self._menubar = MenuBar(
back_box=theme.title_close_button,
back_box_background_color=theme.title_close_button_background_color,
background_color=self._theme.title_background_color,
mode=self._theme.title_bar_style,
modify_scrollarea=self._theme.title_bar_modify_scrollarea,
offsetx=theme.title_offset[0],
offsety=theme.title_offset[1],
onreturn=self._back,
title=title,
width=self._width
)
self._menubar.set_menu(self)
self._menubar.set_font(
antialias=self._theme.title_font_antialias,
background_color=None,
color=self._theme.title_font_color,
font=self._theme.title_font,
font_size=self._theme.title_font_size,
readonly_color=self._theme.readonly_color,
readonly_selected_color=self._theme.readonly_selected_color,
selected_color=self._theme.title_font_color
)
self._menubar.set_cursor(self._theme.title_close_button_cursor)
self._menubar.set_font_shadow(
color=self._theme.title_font_shadow_color,
enabled=self._theme.title_font_shadow,
offset=self._theme.title_font_shadow_offset,
position=self._theme.title_font_shadow_position
)
self._menubar.set_controls(
joystick=self._joystick,
mouse=self._mouse,
touchscreen=self._touchscreen,
keyboard=self._keyboard
)
self._menubar.set_position(*self.get_position())
if self._theme.title_floating:
self._menubar.set_float()
if not self._theme.title:
self._menubar.hide()
self._menubar.configured = True
self._menubar.fixed = self._theme.title_fixed
# Scrolling area
menubar_height = self._menubar.get_height()
if self._height - menubar_height <= 0:
raise ValueError(f'menubar is higher than menu height ({menubar_height} > {self._height})')
extend_y = 0 if self._theme.title_fixed else menubar_height
self._scrollarea = ScrollArea(
area_color=self._theme.background_color,
area_height=self._height - extend_y,
area_width=self._width,
border_color=self._theme.border_color,
border_width=self._theme.border_width,
controls_joystick=self._joystick,
controls_keyboard=self._keyboard,
controls_mouse=self._mouse,
controls_touchscreen=self._touchscreen,
extend_y=extend_y,
menubar=self._menubar,
scrollbar_color=self._theme.scrollbar_color,
scrollbar_cursor=self._theme.scrollbar_cursor,
scrollbar_slider_color=self._theme.scrollbar_slider_color,
scrollbar_slider_hover_color=self._theme.scrollbar_slider_hover_color,
scrollbar_slider_pad=self._theme.scrollbar_slider_pad,
scrollbar_thick=self._theme.scrollbar_thick,
scrollbars=get_scrollbars_from_position(self._theme.scrollarea_position),
shadow=self._theme.scrollbar_shadow,
shadow_color=self._theme.scrollbar_shadow_color,
shadow_offset=self._theme.scrollbar_shadow_offset,
shadow_position=self._theme.scrollbar_shadow_position
)
self._scrollarea.set_menu(self)
self._scrollarea.set_position(*self.get_position())
self._overflow = tuple(overflow)
# Controls the behaviour of runtime errors
self._runtime_errors = _MenuRuntimeErrorConfig()
# Stores the last update
self._last_update_mode = []
# These can be changed without any major problem
self._disable_exit = False
self._disable_draw = False
self._disable_widget_update_mousepos_mouseselection = False
self._disable_update = False
self._validate_frame_widgetmove = True
def resize(
self,
width: NumberType,
height: NumberType,
screen_dimension: Optional[Vector2IntType] = None,
position: Optional[Union[Vector2NumberType, Tuple[NumberType, NumberType, bool]]] = None
) -> 'Menu':
"""
Resize the menu to another width/height
:param width: Menu width (px)
:param height: Menu height (px)
:param screen_dimension: List/Tuple representing the dimensions the Menu should reference for sizing/positioning (width, height), if ``None`` pygame is queried for the display mode. This value defines the ``window_size`` of the Menu
:param position: Position on x-axis and y-axis. If the value is only 2 elements, the position is relative to the window width (thus, values must be 0-100%); else, the third element defines if the position is relative or not. If ``(x, y, False)`` the values of ``(x, y)`` are in px. If ``None`` use the default from the menu constructor
:return: Self reference
"""
assert isinstance(width, NumberInstance)
assert isinstance(height, NumberInstance)
assert width > 0 and height > 0, \
'menu width and height must be greater than zero'
# Convert to int
width, height = int(width), int(height)
# Get window size if not given explicitly
if screen_dimension is not None:
assert_vector(screen_dimension, 2)
assert screen_dimension[0] > 0, 'screen width must be higher than zero'
assert screen_dimension[1] > 0, 'screen height must be higher than zero'
self._window_size = screen_dimension
else:
surface = pygame.display.get_surface()
if surface is None:
raise RuntimeError('pygame surface could not be retrieved, check '
'if pygame.display.set_mode() was called')
self._window_size = surface.get_size()
self._window_size = (int(self._window_size[0]), int(self._window_size[1]))
# Check menu sizing
window_width, window_height = self._window_size
assert width <= window_width and height <= window_height, \
f'menu size ({width}x{height}) must be lower or equal than the size of the ' \
f'window ({window_width}x{window_height})'
# Store width and height
self._height = height
self._width = width
# Compute widget offset
self._widget_offset = [self._theme.widget_offset[0], self._theme.widget_offset[1]]
if abs(self._widget_offset[0]) < 1:
self._widget_offset[0] *= self._width
if abs(self._widget_offset[1]) < 1:
self._widget_offset[1] *= self._height
# Cast to int offset
self._widget_offset[0] = int(self._widget_offset[0])
self._widget_offset[1] = int(self._widget_offset[1])
# If centering is enabled, but widget offset in the vertical is different
# from zero a warning is raised
if self._auto_centering and self._widget_offset[1] != 0:
warn(
f'menu is vertically centered (center_content=True), but widget '
f'offset (from theme) is different than zero ({self._widget_offset[1]}px). '
f'Auto-centering has been disabled'
)
self._auto_centering = False
# Scroll area outer margin
self._scrollarea_margin = [self._theme.scrollarea_outer_margin[0],
self._theme.scrollarea_outer_margin[1]]
if abs(self._scrollarea_margin[0]) < 1:
self._scrollarea_margin[0] *= self._width
if abs(self._scrollarea_margin[1]) < 1:
self._scrollarea_margin[1] *= self._height
self._scrollarea_margin[0] = int(self._scrollarea_margin[0])
self._scrollarea_margin[1] = int(self._scrollarea_margin[1])
# If centering is enabled, but ScrollArea margin in the vertical is
# different from zero a warning is raised
if self._auto_centering and self._scrollarea_margin[1] != 0:
warn(
f'menu is vertically centered (center_content=True), but '
f'ScrollArea outer margin (from theme) is different than zero '
f'({round(self._scrollarea_margin[1], 3)}px). Auto-centering has been disabled'
)
self._auto_centering = False
# Configure menubar
extend_y = 0
if hasattr(self, '_menubar'):
self._menubar._width = self._width
menubar_height = self._menubar.get_height()
if self._height - menubar_height <= 0:
raise ValueError(f'menubar is higher than menu height ({menubar_height} > {self._height})')
extend_y = 0 if self._theme.title_fixed else menubar_height
# Configure scrollbar
if hasattr(self, '_scrollarea'):
self._scrollarea.create_rect(self._width, self._height - extend_y)
# Update column max width
for i in range(len(self._column_max_width)):
if self._column_max_width_zero[i]:
self._column_max_width[i] = self._width
# Force the rendering
if self._widgets_surface is not None:
self._widgets_surface_need_update = True
# Update the menu position
if position is None:
position = self._position_default
else:
if len(position) == 3:
# noinspection PyTypeChecker
self._position_relative = position[2]
else:
self._position_relative = True
if self._position_relative:
self.set_relative_position(position[0], position[1])
else:
self.set_absolute_position(position[0], position[1])
return self
def __copy__(self) -> 'Menu':
"""
Copy method.
:return: Raises copy exception
"""
raise _MenuCopyException('Menu class cannot be copied')
def __deepcopy__(self, memodict: Dict) -> 'Menu':
"""
Deep-copy method.
:param memodict: Memo dict
:return: Raises copy exception
"""
raise _MenuCopyException('Menu class cannot be deep-copied')
def force_surface_update(self) -> 'Menu':
"""
Forces current Menu surface update after next rendering call.
.. note::
This method is expensive, as menu surface update forces re-rendering
of all widgets (because them can change in size, position, etc...).
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().update(...)``.
:return: Self reference
"""
self._current._widgets_surface_need_update = True
return self
def force_surface_cache_update(self) -> 'Menu':
"""
Forces current Menu surface cache to update after next drawing call.
.. note::
This method only updates the surface cache, without forcing re-rendering
of all Menu widgets.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().update(...)``.
:return: Self reference
"""
self._current._widget_surface_cache_need_update = True
self._current._decorator.force_cache_update()
return self
def set_onbeforeopen(
self,
onbeforeopen: Optional[Callable[['Menu', 'Menu'], Any]]
) -> 'Menu':
"""
Set ``onbeforeopen`` callback. Callback is executed before opening the
Menu, it receives the current Menu and the next Menu:
.. code-block:: python
onbeforeopen(current Menu <from>, next Menu <to>)
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param onbeforeopen: Onbeforeopen callback, it can be a function or None
:return: Self reference
"""
assert callable(onbeforeopen) or onbeforeopen is None, \
'onbeforeopen must be callable (function-type) or None'
self._onbeforeopen = onbeforeopen
return self
def set_onupdate(
self,
onupdate: Optional[Union[Callable[[EventListType, 'Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onupdate`` callback. Callback is executed before updating the Menu,
it receives the event list and the Menu reference; also, ``onupdate`` can
receive zero arguments:
.. code-block:: python
onupdate(event_list, menu) <or> onupdate()
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param onupdate: Onupdate callback, it can be a function or None
:return: Self reference
"""
assert callable(onupdate) or onupdate is None, \
'onupdate must be a callable (function-type) or None'
self._onupdate = onupdate
return self
def set_onclose(
self,
onclose: Optional[Union['_events.MenuAction', Callable[['Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onclose`` callback. Callback can only receive 1 argument maximum
(if not ``None``), if so, the Menu instance is provided:
.. code-block:: python
onclose(menu) <or> onclose()
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param onclose: Onclose callback, it can be a function, a pygame-menu event, or None
:return: Self reference
"""
assert callable(onclose) or _events.is_event(onclose) or onclose is None, \
'onclose must be a MenuAction (event), callable (function-type), or None'
if onclose == _events.NONE:
onclose = None
self._onclose = onclose
return self
def set_onreset(
self,
onreset: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onreset`` callback. Callback can only receive 1 argument maximum
(if not ``None``), if so, the Menu instance is provided:
.. code-block:: python
onreset(menu) <or> onreset()
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param onreset: Onreset callback, it can be a function or None
:return: Self reference
"""
assert callable(onreset) or onreset is None, \
'onreset must be a callable (function-type) or None'
self._onreset = onreset
return self
def set_onwindowmouseover(
self,
onwindowmouseover: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onwindowmouseover`` callback. This method is executed in
:py:meth:`pygame_menu.menu.Menu.update` method. The callback function
receives the following arguments:
.. code-block:: python
onwindowmouseover(menu) <or> onwindowmouseover()
:param onwindowmouseover: Callback executed if user enters the window with the mouse; it can be a function or None
:return: Self reference
"""
if onwindowmouseover is not None:
assert callable(onwindowmouseover), \
'onwindowmouseover must be callable (function-type) or None'
self._onwindowmouseover = onwindowmouseover
return self
def set_onwindowmouseleave(
self,
onwindowmouseleave: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onwindowmouseleave`` callback. This method is executed in
:py:meth:`pygame_menu.menu.Menu.update` method. The callback function
receives the following arguments:
.. code-block:: python
onwindowmouseleave(menu) <or> onwindowmouseleave()
:param onwindowmouseleave: Callback executed if user leaves the window with the mouse; it can be a function or None
:return: Self reference
"""
if onwindowmouseleave is not None:
assert callable(onwindowmouseleave), \
'onwindowmouseleave must be callable (function-type) or None'
self._onwindowmouseleave = onwindowmouseleave
return self
def set_onwidgetchange(
self,
onwidgetchange: Optional[Callable[['Menu', 'Widget'], Any]]
) -> 'Menu':
"""
Set ``onwidgetchange`` callback. This method is executed if any appended
widget changes its value. The callback function receives the following
arguments:
.. code-block:: python
onwidgetchange(menu, widget)
:param onwidgetchange: Callback executed if an appended widget changes its value
:return: Self reference
"""
if onwidgetchange is not None:
assert callable(onwidgetchange), \
'onwidgetchange must be callable (function-type) or None'
self._onwidgetchange = onwidgetchange
return self
def set_onmouseover(
self,
onmouseover: Optional[Union[Callable[['Menu', EventType], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onmouseover`` callback. This method is executed in
:py:meth:`pygame_menu.menu.Menu.update` method. The callback function
receives the following arguments:
.. code-block:: python
onmouseover(menu, event) <or> onmouseover()
:param onmouseover: Callback executed if user enters the Menu with the mouse; it can be a function or None
:return: Self reference
"""
if onmouseover is not None:
assert callable(onmouseover), \
'onmouseover must be callable (function-type) or None'
self._onmouseover = onmouseover
return self
def set_onmouseleave(
self,
onmouseleave: Optional[Union[Callable[['Menu', EventType], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onmouseleave`` callback. This method is executed in
:py:meth:`pygame_menu.menu.Menu.update` method. The callback function
receives the following arguments:
.. code-block:: python
onmouseleave(menu, event) <or> onmouseleave()
:param onmouseleave: Callback executed if user leaves the Menu with the mouse; it can be a function or None
:return: Self reference
"""
if onmouseleave is not None:
assert callable(onmouseleave), \
'onmouseleave must be callable (function-type) or None'
self._onmouseleave = onmouseleave
return self
def get_current(self) -> 'Menu':
"""
Get the **current** active Menu. If the user has not opened any submenu the
pointer object must be the same as the base. If not, this will return the
opened Menu pointer.
:return: Menu object **(current)**
"""
return self._current
def translate(self, x: NumberType, y: NumberType) -> 'Menu':
"""
Translate to (+x, +y) according to the default position.
.. note::
To revert changes, only set to ``(0, 0)``.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param x: +X in px
:param y: +Y in px
"""
assert isinstance(x, NumberInstance)
assert isinstance(y, NumberInstance)
self._translate = (int(x), int(y))
self._widgets_surface = None
self._render()
return self
def get_translate(self) -> Tuple2IntType:
"""
Get Menu translate on x-axis and y-axis (x, y) in px.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Translation on both axis
"""
return self._translate
def get_position(self) -> Tuple2IntType:
"""
Return the menu position (constructor + translation).
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Position on x-axis and y-axis (x,y) in px
"""
return self._position[0] + self._translate[0], self._position[1] + self._translate[1]
def select_widget(self, widget: Optional[Union['Widget', str]]) -> 'Menu':
"""
Select a widget from the Menu. If ``None`` unselect the current one.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param widget: Widget to be selected or Widget ID. If ``None`` unselect the current
:return: Self reference
"""
if widget is None:
for w in self._widgets:
w.select(False)
self._index = -1
return self
if isinstance(widget, str):
widget = self.get_widget(widget)
assert isinstance(widget, Widget)
if not widget.is_selectable:
raise ValueError(f'{widget.get_class_id()} is not selectable')
if not widget.is_visible(): # Considers frame
raise ValueError(f'{widget.get_class_id()} is not visible')
try:
index = self._widgets.index(widget) # If not exists this raises ValueError
except ValueError:
raise ValueError(f'{widget.get_class_id()} is not in Menu, check if exists on the current '
f'with menu.get_current().remove_widget(widget)')
self._select(index, 1, SELECT_WIDGET, False)
self.force_surface_cache_update()
return self
def unselect_widget(self) -> 'Menu':
"""
Unselects the current widget.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
return self.select_widget(None)
def remove_widget(self, widget: Union['Widget', str]) -> 'Menu':
"""
Remove the ``widget`` from the Menu. If widget not exists on Menu this
method raises a ``ValueError`` exception.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param widget: Widget object or Widget ID
:return: Self reference
"""
if isinstance(widget, str):
widget = self.get_widget(widget)
assert isinstance(widget, Widget)
try:
index = self._widgets.index(widget) # If not exists this raises ValueError
except ValueError:
raise ValueError('widget is not in Menu, check if exists on the current '
'with menu.get_current().remove_widget(widget)')
self._widgets.pop(index)
self._update_after_remove_or_hidden(index) # Forces surface update
self._stats.removed_widgets += 1
# If widget is within a frame, remove from frame
frame = widget.get_frame()
if frame is not None:
frame.unpack(widget)
# If widget points to a hook, remove the submenu
# noinspection PyProtectedMember
menu_hook = widget._menu_hook
if menu_hook in self._submenus.keys():
self._remove_submenu(menu_hook, widget)
widget._menu_hook = None
widget.on_remove_from_menu()
# Removes Menu reference from widget. If Frame, it removes from _update_frames
widget.set_menu(None)
# Remove widget from update lists
if widget in self._update_widgets:
self._update_widgets.remove(widget)
check_widget_mouseleave()
return self
def get_sound(self) -> 'Sound':
"""
Return the Menu sound engine.
:return: Sound API
"""
return self._sound
def _update_after_remove_or_hidden(
self,
index: int,
update_surface: bool = True
) -> None:
"""
Update widgets after removal or hidden.
:param index: Removed index, if ``-1`` then select next index, if equal to ``self._index`` select the same
:param update_surface: Updates Menu surface
"""
# Check if there's more selectable widgets
n_select = 0
last_selectable = 0
for indx in range(len(self._widgets)):
wid = self._widgets[indx]
if wid.is_selectable and wid.is_visible(): # Considers frame
n_select += 1
last_selectable = indx
# Any widget is selected
if n_select == 0:
self._index = -1
# Select the unique selectable option
elif n_select == 1:
self._select(last_selectable, 0, SELECT_REMOVE, False)
# There is at least 1 option to select from
elif n_select > 1:
if index == -1: # Index was hidden
self._select(self._index + 1, 1, SELECT_REMOVE, False)
elif self._index > index: # If the selected widget was after this
self._select(self._index - 1, -1, SELECT_REMOVE, False)
else:
self._select(self._index, 1, SELECT_REMOVE, False)
self._update_widget_position()
if update_surface:
# If added on execution time forces the update of the surface
self._widgets_surface = None
def _back(self) -> None:
"""
Go to previous Menu or close if the top Menu is currently displayed.
"""
if self._top._prev is not None:
self.reset(1)
else:
self._close()
def _update_selection_if_hidden(self) -> None:
"""
Updates the Menu widget selection if a widget was hidden.
"""
if len(self._widgets) > 0:
if self._index != -1:
selected_widget = self._widgets[self._index % len(self._widgets)]
if not selected_widget.is_visible(): # Considers frame
selected_widget.select(False) # Unselect
self._update_after_remove_or_hidden(-1, update_surface=False)
else:
self._update_after_remove_or_hidden(0, update_surface=False)
def _update_widget_position(self) -> None:
"""
Update the position of each widget. Also checks widget consistency.
"""
# Column widgets
self._widget_columns = {}
for i in range(self._columns):
self._widget_columns[i] = []
# Set the column widths (minimum values), safe for certain widgets that
# request the width on rendering
self._column_widths = []
column_widths = [self._column_min_width[i] for i in range(self._columns)]
# Set column/row of each widget and compute maximum width of each column if None
self._used_columns = 0
max_elements_msg = \
f'total visible/non-floating widgets ([widg]) cannot exceed columns*rows' \
f'({self._max_row_column_elements} elements). Menu position update failed.' \
f' If using frames, please pack before adding new widgets'
i_index = 0
has_frame = False
# Checks for widget selection consistency
has_selected_widget = False
invalid_selection_widgets: List[str] = []
selected_widget = None
for index in range(len(self._widgets)):
widget = self._widgets[index]
# Check widget selection
if widget.is_selected():
if not has_selected_widget:
has_selected_widget = True
selected_widget = widget.get_class_id()
self._index = index
else:
widget.select(False)
invalid_selection_widgets.append(widget.get_class_id())
# If widget is frame
if isinstance(widget, Frame):
try:
widget.update_position()
except:
warn(f'{widget.get_class_id()} failed to update')
raise
has_frame = True
# If not visible, or within frame, continue to the next widget
if not widget.is_visible() or widget.get_frame() is not None:
widget.set_col_row_index(-1, -1, index)
continue
# Check if the maximum number of elements was reached, if so raise an exception
# If menu has frames, this check is disabled
if not has_frame and not i_index < self._max_row_column_elements:
raise _MenuWidgetOverflow(max_elements_msg.replace('[widg]', str(i_index)))
# Set the widget column/row position
row = i_index
col = 0
max_rows = 0
for col in range(self._columns): # Find which column it belongs to
max_rows += self._rows[col]
if i_index < max_rows:
break
row -= self._rows[col] # Subtract the number of rows of such column
# Important before getting widget width as some widgets require the
# column max width
widget.set_col_row_index(col, row, index)
self._widget_columns[col].append(widget)
# Update used columns
self._used_columns = max(self._used_columns, col + 1)
# Get the next widget; if it doesn't exist, use the same
next_widget = widget
if index < len(self._widgets) - 1:
next_widget = self._widgets[index + 1]
# If widget is floating don't update the next
if not (next_widget.is_floating() and next_widget.get_frame() is None):
i_index += 1
# If floating, don't contribute to the column width
else:
continue
column_widths[col] = max(
column_widths[col],
widget.get_width(apply_selection=True) # This forces rendering
)
if len(invalid_selection_widgets) > 0:
self._index = -1
raise _MenuMultipleSelectedWidgetsException(
f'several widgets are selected at the same time, current selected '
f'(sorted by index): {selected_widget}, but the following are also'
f' selected: {', '.join(invalid_selection_widgets)}. If widget is'
f' selected outside the menu, use widget.select(update_menu=True)'
)
# Apply max width column limit
for col in range(self._used_columns):
if self._column_max_width[col] is not None:
column_widths[col] = min(column_widths[col], self._column_max_width[col])
# If some columns were not used, set these widths to zero
for col in range(self._used_columns, self._columns):
column_widths.pop()
del self._widget_columns[col]
# If the total weight is less than the window width (so there's no horizontal
# scroll), scale the columns. Only None column_max_widths and columns less
# than the maximum are scaled
sum_width_columns = sum(column_widths)
max_width = self.get_width(inner=True)
if 0 <= sum_width_columns < max_width and len(self._widgets) > 0:
# First, scale columns to its maximum
sum_contrib = []
for col in range(self._used_columns):
if self._column_max_width[col] is None:
sum_contrib.append(0)
elif column_widths[col] < self._column_max_width[col]:
sum_contrib.append(self._column_max_width[col] - column_widths[col])
else:
sum_contrib.append(0)
delta = max_width - sum(sum_contrib) - sum_width_columns
if delta < 0: # Scale contrib back
scale = (max_width - sum_width_columns) / sum(sum_contrib)
sum_contrib = [sum_contrib[i] * scale for i in range(len(sum_contrib))]
# Increase to its maximums
for col in range(self._used_columns):
if sum_contrib[col] > 0:
column_widths[col] += sum_contrib[col]
# Scale column widths if None
sum_width_columns = sum(column_widths)
sum_contrib = []
for col in range(self._used_columns):
if self._column_max_width[col] is None:
sum_contrib.append(column_widths[col])
else:
sum_contrib.append(0)
delta = max_width - sum_width_columns
if delta > 0:
for col in range(self._used_columns):
if sum_contrib[col] > 0:
column_widths[col] += delta * sum_contrib[col] / sum(sum_contrib)
# Re-compute sum
sum_width_columns = sum(column_widths)
# If column width still 0, set all the column the same width (only used)
# This only can happen if column_min_width was not set
if sum_width_columns < max_width and self._used_columns >= 1:
# The width it would be added for each column
mod_width = max_width # Available left width for non-max columns
non_max = self._used_columns
# First fill all maximum width columns
for col in range(self._used_columns):
if self._column_max_width[col] is not None:
column_widths[col] = min(self._column_max_width[col],
max_width / self._used_columns)
mod_width -= column_widths[col]
non_max -= 1
# Now, update the rest (non-maximum set)
if non_max > 0:
for col in range(self._used_columns):
if self._column_max_width[col] is None:
column_widths[col] = mod_width / non_max
# Cast to int
for col in range(self._used_columns):
column_widths[col] = int(math.ceil(column_widths[col]))
# Final column width
total_col_width = sum(column_widths)
if self._used_columns > 1:
# Calculate column width scale (weights)
column_weights = tuple(
float(column_widths[i]) / max(total_col_width, 1) for i in range(self._used_columns))
# Calculate the position of each column
self._column_pos_x = []
cumulative = 0
for i in range(self._used_columns):
w = column_weights[i]
self._column_pos_x.append(int(total_col_width * (cumulative + 0.5 * w)))
cumulative += w
else:
self._column_pos_x = [total_col_width * 0.5]
column_widths = [total_col_width]
# Now updates the column width's
self._column_widths = column_widths
# Update title position
self._menubar.set_position(*self.get_position())
# Widget max/min position
min_max_updated = False
max_x, max_y = -1e8, -1e8
min_x, min_y = 1e8, 1e8
# Cache rects
rects_cache: Dict[str, 'pygame.Rect'] = {}
def get_rect(wid: 'Widget') -> 'pygame.Rect':
"""
Get rect cache from widget.
:param wid: Widget
:return: Rect cache
"""
try:
return rects_cache[wid.get_id()]
except KeyError:
rects_cache[wid.get_id()] = wid.get_rect(render=True)
return rects_cache[wid.get_id()]
# Get menubar height, if fixed then move all widgets within area
menubar_height = self._menubar.get_height() if self._menubar.fixed else 0
# Update appended widgets
for index in range(len(self._widgets)):
widget = self._widgets[index]
align = widget.get_alignment()
margin = widget.get_margin()
padding = widget.get_padding()
selection_effect_margin = widget.get_selection_effect().get_margin()
width = get_rect(widget).width
if not widget.is_visible():
widget.set_position(0, 0)
continue
# If widget within frame update col/row position
if widget.get_frame() is not None:
# noinspection PyProtectedMember
widget._set_position_relative_to_frame(index)
continue
# Get column and row position
col, row, _ = widget.get_col_row_index()
# Calculate X position
column_width = self._column_widths[col]
selection_margin = 0
dx = 0
sm_left, sm_right = selection_effect_margin[1], selection_effect_margin[3]
if align == ALIGN_CENTER:
dx = -(width + sm_right - sm_left) / 2
elif align == ALIGN_LEFT:
selection_margin = sm_left
dx = -column_width / 2 + selection_margin
elif align == ALIGN_RIGHT:
selection_margin = sm_right
dx = column_width / 2 - width - selection_margin
d_border = int(math.ceil(widget.get_border()[1] / 2))
# self._column_pos_x points at the middle of each column
x_coord = self._column_pos_x[col] + dx + margin[0] + padding[3]
x_coord = max(selection_margin, x_coord)
x_coord += max(0, self._widget_offset[0]) + d_border
# Check if widget width exceeds column max width
max_column_width = self._column_max_width[col]
if max_column_width is not None and width > max_column_width:
raise _MenuSizingException(
f'{widget.get_class_id()} widget width ({width}) exceeds column {col + 1} max width ({max_column_width})'
)
# Calculate Y position
y_sum = 1 # Compute the total height from the current row position to the top of the column
for r_widget in self._widget_columns[col]:
_, r, _ = r_widget.get_col_row_index()
if r >= row:
break
if r_widget.is_visible() and \
not r_widget.is_floating() and \
not r_widget.get_frame() is not None:
y_sum += get_rect(r_widget).height # Height
y_sum += r_widget.get_margin()[1] # Vertical margin (bottom)
# If no widget is before add the selection effect
y_sel_h = r_widget.get_selection_effect().get_margin()[0]
if r == 0 and self._widget_offset[1] <= y_sel_h:
if r_widget.is_selectable:
y_sum += y_sel_h - self._widget_offset[1]
# If the widget offset is zero, then add the selection effect to the height
# of the widget to avoid visual glitches
y_sel_h = widget.get_selection_effect().get_margin()[0]
if y_sum == 1 and self._widget_offset[1] <= y_sel_h: # No widget is before
if widget.is_selectable: # Add top margin
y_sum += y_sel_h - self._widget_offset[1]
y_coord = max(0, self._widget_offset[1]) + y_sum + padding[0] + menubar_height
# If the widget is floating and has origin-position
# noinspection PyProtectedMember
if widget.is_floating() and widget._floating_origin_position:
widget.set_position(
x=max(0, self._widget_offset[0]) + padding[3],
y=menubar_height + padding[0] + d_border)
continue
# Update the position of the widget
widget.set_position(x_coord, y_coord)
# Add the widget translation to the widget for computing the min/max position. This
# feature does not work as intended as there's edge cases not covered, and centering makes
# the translation more difficult
# tx, ty = widget.get_translate()
tx, ty = 0, 0
# Update max/min position, minus padding
min_max_updated = True
max_x = max(max_x, x_coord + width - padding[1] + tx + sm_right) # minus right padding
max_y = max(max_y, y_coord + get_rect(widget).height - padding[2] + ty) # minus bottom padding
min_x = min(min_x, x_coord - padding[3] - sm_left)
min_y = min(min_y, y_coord - padding[0])
# Update position
if min_max_updated:
self._widget_max_position = (max_x, max_y)
self._widget_min_position = (min_x, min_y)
else:
self._widget_max_position = (0, 0)
self._widget_min_position = (0, 0)
self._stats.position_update += 1
def _build_widget_surface(self) -> None:
"""
Create the surface used to draw widgets according the required width and
height.
"""
self._stats.build_surface += 1
t0 = time.time()
# Update internals
self._update_selection_if_hidden()
self._update_widget_position()
menubar_height = self._menubar.get_height() if not self._menubar.fixed else 0
max_x, max_y = self._widget_max_position
# Get scrollbars size
sx, sy = self._get_scrollbar_thickness()
# Remove the thick of the scrollbar to avoid displaying a horizontal one
# If overflow on both axis
if max_x > self._width - sy and max_y > self._height - sx - menubar_height:
width, height = max_x, max_y
if not self._mouse_visible:
self._mouse_visible = True
# If horizontal overflow
elif max_x > self._width - sy:
width, height = max_x, self._height - menubar_height - sx
self._mouse_visible = self._mouse_visible_default
# If vertical overflow
elif max_y > self._height - sx - menubar_height:
width, height = self._width - sy, max_y
if not self._mouse_visible:
self._mouse_visible = True
# No overflow
else:
width, height = self._width, self._height - menubar_height
self._mouse_visible = self._mouse_visible_default
# Checks overflow
if not self._overflow[0]:
width = self._width
if not self._overflow[1]:
height = self._height - menubar_height
# Adds ScrollArea margin
width += self._scrollarea_margin[0]
height += self._scrollarea_margin[1]
# Cast to int
width = int(width)
height = int(height)
# Get the previous surface if the width/height is the same
if width == self._widgets_surface_last[0] and \
height == self._widgets_surface_last[1]:
self._widgets_surface = self._widgets_surface_last[2]
else:
self._widgets_surface = make_surface(width, height)
self._widgets_surface_last = (width, height, self._widgets_surface)
# Set position
self._scrollarea.set_world(self._widgets_surface)
self._scrollarea.set_position(*self.get_position())
# Check if the scrollbars changed
sx, sy = self._get_scrollbar_thickness()
if (sx, sy) != self._last_scroll_thickness[0] and \
self._last_scroll_thickness[1] == 0:
self._last_scroll_thickness[0] = (sx, sy)
self._last_scroll_thickness[1] += 1
self._widgets_surface_need_update = True
self._render()
else:
self._last_scroll_thickness[1] = 0
# Update times
dt = time.time() - t0
self._stats.total_building_time += dt
self._stats.last_build_surface_time = dt
def _check_id_duplicated(self, widget_id: str) -> None:
"""
Check if widget ID is duplicated. Throws ``IndexError`` if the index is
duplicated.
:param widget_id: New widget ID
"""
assert isinstance(widget_id, str)
for widget in self._widgets:
if widget.get_id() == widget_id:
raise IndexError(
f'widget id "{widget_id}" already exists on the current menu ({widget.get_class_id()})'
)
def _close(self) -> bool:
"""
Execute close callbacks and disable the Menu, only if ``onclose`` is not
None (or :py:mod:`pygame_menu.events.NONE`).
:return: ``True`` if the Menu has executed the ``onclose`` callback
"""
onclose = self._onclose
# Apply action
if onclose is None or onclose == _events.NONE:
return False
else:
# Closing disables the Menu
self.disable()
# If action is an event
if _events.is_event(onclose):
# Sort through events
if onclose == _events.BACK:
self.reset(1)
elif onclose == _events.CLOSE:
pass
elif onclose == _events.EXIT:
self._exit()
elif onclose == _events.RESET:
self.full_reset()
# If action is callable (function)
elif callable(onclose):
try:
onclose(self)
except TypeError:
onclose()
return True
def close(self) -> bool:
"""
Closes the **current** Menu firing ``onclose`` callback. If ``callback=None``
this method does nothing.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().reset(...)``.
:return: ``True`` if the Menu has executed the ``onclose`` callback
"""
if not self.is_enabled():
self._current._runtime_errors.throw(
self._current._runtime_errors.close, 'menu already closed'
)
return self._current._close()
def _get_depth(self) -> int:
"""
Return the Menu depth.
:return: Menu depth
"""
prev = self._top._prev
depth = 0
if prev is not None:
while True:
if prev is not None:
prev = prev[0]
depth += 1
else:
break
return depth
def disable(self) -> 'Menu':
"""
Disables the Menu *(doesn't check events and draw on the surface)*.
.. note::
This method does not fire ``onclose`` callback. Use ``Menu.close()``
instead.
:return: Self reference
"""
check_widget_mouseleave(force=True)
self._top._enabled = False
return self
def set_absolute_position(self, position_x: NumberType, position_y: NumberType) -> 'Menu':
"""
Set the absolute Menu position.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param position_x: Left position of the window
:param position_y: Top position of the window
:return: Self reference
"""
assert isinstance(position_x, NumberInstance)
assert isinstance(position_y, NumberInstance)
self._position = (position_x, position_y)
self._widgets_surface = None # This forces an update of the widgets
return self
def set_relative_position(self, position_x: NumberType, position_y: NumberType) -> 'Menu':
"""
Set the Menu position relative to the window.
.. note::
- Menu left position (x) must be between ``0`` and ``100``, if ``0``
the margin is at the left of the window, if ``100`` the Menu is at
the right of the window.
- Menu top position (y) must be between ``0`` and ``100``, if ``0``
the margin is at the top of the window, if ``100`` the margin is at
the bottom of the window.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param position_x: Left position of the window
:param position_y: Top position of the window
:return: Self reference
"""
assert isinstance(position_x, NumberInstance)
assert isinstance(position_y, NumberInstance)
assert 0 <= position_x <= 100
assert 0 <= position_y <= 100
position_x = float(position_x) / 100
position_y = float(position_y) / 100
window_width, window_height = self._window_size
self._position = (int((window_width - self._width) * position_x),
int((window_height - self._height) * position_y))
self._widgets_surface = None # This forces an update of the widgets
return self
def center_content(self) -> 'Menu':
"""
Centers the content of the Menu vertically. This action rewrites ``widget_offset``.
.. note::
If the height of the widgets is greater than the height of the Menu,
the drawing region will cover all Menu inner surface.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
self._stats.center_content += 1
if len(self._widgets) == 0: # If this happens, get_widget_max returns an immense value
self._widget_offset[1] = 0
return self
if self._widgets_surface is None:
self._update_widget_position() # For position (max/min)
available = self.get_height(inner=True)
widget_height = self.get_height(widget=True)
if widget_height >= available: # There's nothing to center
if self._widget_offset[1] != 0:
self._widgets_surface = None
self._widget_offset[1] = 0
return self
new_offset = int(max(float(available - widget_height) / 2, 0))
if abs(new_offset - self._widget_offset[1]) > 1:
self._widget_offset[1] = new_offset
self._widgets_surface = None # Rebuild on the next draw
return self
def _get_scrollbar_thickness(self) -> Tuple2IntType:
"""
Return the scrollbar thickness from x-axis and y-axis (horizontal and vertical).
:return: Scrollbar thickness in px
"""
return self._scrollarea.get_scrollbar_thickness(ORIENTATION_HORIZONTAL), \
self._scrollarea.get_scrollbar_thickness(ORIENTATION_VERTICAL)
def get_width(self, inner: bool = False, widget: bool = False, border: bool = False) -> int:
"""
Get the Menu width.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param inner: If ``True`` returns the available width (menu width minus scroll if visible)
:param widget: If ``True`` returns the total width used by the widgets
:param border: If ``True`` add the mmenu border width. Only applied if both ``inner`` and ``widget`` are ``False``
:return: Width in px
"""
if widget:
return int(self._widget_max_position[0] - self._widget_min_position[0])
if not inner:
bw = 0 if not border else 2 * self._scrollarea.get_border_size()[0]
return int(self._width) + bw
return int(self._width - self._get_scrollbar_thickness()[1])
def get_height(self, inner: bool = False, widget: bool = False, border: bool = False) -> int:
"""
Get the Menu height.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param inner: If ``True`` returns the available height (menu height minus scroll and menubar)
:param widget: If ``True`` returns the total height used by the widgets
:param border: If ``True`` add the menu border height. Only applied if both ``inner`` and ``widget`` are ``False``
:return: Height in px
"""
if widget:
return int(self._widget_max_position[1] - self._widget_min_position[1])
if not inner:
bh = 0 if not border else 2 * self._scrollarea.get_border_size()[1]
return int(self._height) + bh
return int(self._height - self._menubar.get_height() - self._get_scrollbar_thickness()[0])
def get_size(self, inner: bool = False, widget: bool = False, border: bool = False) -> Vector2IntType:
"""
Return the Menu size as a tuple of (width, height) in px.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param inner: If ``True`` returns the available size (width, height) (menu height minus scroll and menubar)
:param widget: If ``True`` returns the total (width, height) used by the widgets
:param border: If ``True`` add the border size to the dimensions (width, height). Only applied if both ``inner`` and ``widget`` are ``False``
:return: Tuple of (width, height) in px
"""
return self.get_width(inner=inner, widget=widget, border=border), \
self.get_height(inner=inner, widget=widget, border=border)
def render(self) -> 'Menu':
"""
Force the **current** Menu to render. Useful to force widget update.
.. note::
This method should not be called if the Menu is being drawn as this
method is called by :py:meth:`pygame_menu.menu.Menu.draw`
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().render(...)``
:return: Self reference **(current)**
"""
self._current._widgets_surface = None
self._current._render()
self._current._stats.render_public += 1
return self
def _render(self) -> bool:
"""
Menu rendering.
:return: ``True`` if the surface has changed (if it was ``None``)
"""
t0 = time.time()
changed = False
if self._widgets_surface_need_update:
self._widgets_surface = None
if self._widgets_surface is None:
self._widgets_surface_need_update = False
if self._auto_centering:
self.center_content()
self._build_widget_surface()
self._stats.render_private += 1
changed = True
self._stats.total_rendering_time += time.time() - t0
return changed
def draw(self, surface: 'pygame.Surface', clear_surface: bool = False) -> 'Menu':
"""
Draw the **current** Menu into the given surface.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().draw(...)``
:param surface: Pygame surface to draw the Menu
:param clear_surface: Clear surface using theme ``surface_clear_color``
:return: Self reference **(current)**
"""
assert isinstance(surface, pygame.Surface)
assert isinstance(clear_surface, bool)
if not self.is_enabled():
self._current._runtime_errors.throw(self._current._runtime_errors.draw, 'menu is not enabled')
return self._current
if self._current._disable_draw:
return self._current
# Render menu; if True, the surface widget has changed, thus cache should
# change if enabled
render = self._current._render()
# Updates title
if self._current._theme.title_updates_pygame_display and \
pygame.display.get_caption()[0] != self._current.get_title():
pygame.display.set_caption(self._current.get_title())
# Clear surface
if clear_surface:
surface.fill(self._current._theme.surface_clear_color)
# Call background function (set from mainloop)
if self._top._background_function[1] is not None:
if self._top._background_function[0]:
self._top._background_function[1](self._current)
else:
self._top._background_function[1]()
# Draw the prev decorator
self._current._decorator.draw_prev(surface)
# Draw widgets, update cache if enabled
if not self._current._widget_surface_cache_enabled or \
(render or self._current._widget_surface_cache_need_update):
# This should be updated before drawing widgets. As widget
# draw may trigger surface cache updating. Don't move this
# line or unexpected errors may occur
self._current._widget_surface_cache_need_update = False
# Fill the scrolling surface (clear previous state)
self._current._widgets_surface.fill((255, 255, 255, 0))
# Call scrollarea draw decorator. This must be done before filling the
# surface. ScrollArea post decorator is drawn on _scroll.draw(surface) call
scrollarea_decorator = self._current._scrollarea.get_decorator()
scrollarea_decorator.force_cache_update()
scrollarea_decorator.draw_prev(self._current._widgets_surface)
# Iterate through widgets and draw them
selected_widget_draw: Tuple[Optional['Widget'], Optional['pygame.Surface']] = (None, None)
for widget in self._current._widgets:
# Widgets within frames are not drawn as it's frame draw these widgets
if widget.get_frame() is not None:
continue
if widget.is_selected():
selected_widget_draw = widget, self._current._widgets_surface
widget.draw(self._current._widgets_surface)
if isinstance(widget, Frame):
f_selected_widget = widget.selected_widget_draw
if f_selected_widget[0] is not None:
selected_widget_draw = f_selected_widget
if selected_widget_draw[0] is not None:
selected_widget_draw[0].draw_after_if_selected(selected_widget_draw[1])
self._current._stats.draw_update_cached += 1
self._current._scrollarea.draw(surface)
self._current._menubar.draw(surface)
# Draw focus on selected if the widget is active
self._current._draw_focus_widget(surface, self._current.get_selected_widget())
self._current._decorator.draw_post(surface)
self._current._stats.draw += 1
# Update cursor if not mainloop
if self._current._mainloop:
check_widget_mouseleave()
return self._current
def _draw_focus_widget(
self,
surface: 'pygame.Surface',
widget: Optional['Widget'],
force: bool = False
) -> Optional[Dict[int, Tuple4Tuple2IntType]]:
"""
Draw the focus background from a given widget. Widget must be selectable,
active, selected. Not all widgets requests the active status, then focus
may not be drawn.
:param surface: Pygame surface to draw the Menu
:param widget: Focused widget
:param force: If ``True`` forces focus without any checks
:return: The focus region, ``None`` if the focus could not be possible
"""
assert isinstance(surface, pygame.Surface)
assert isinstance(widget, (Widget, type(None)))
force = force or (widget is not None and widget.active and widget.force_menu_draw_focus)
if not force and (widget is None
or not widget.active
or not widget.is_selectable
or not widget.is_selected()
or not (self._mouse_motion_selection or self._touchscreen_motion_selection)
or not widget.is_visible()):
return
window_width, window_height = self._window_size
self._render() # Surface may be none, then update the positioning
rect = widget.get_focus_rect()
# Apply selection effect
rect = widget.get_selection_effect().inflate(rect)
if rect.width == 0 or rect.height == 0:
return
x1, y1, x2, y2 = rect.topleft + rect.bottomright
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
coords = {}
if abs(y1 - y2) <= 4 or abs(x1 - x2) <= 4:
# If the area of the selected widget is too small, draw focus over the entire menu
# .------------------.
# | |
# | 1 |
# | |
# .------------------.
coords[1] = (0, 0), (window_width, 0), (window_width, window_height), (0, window_height)
else:
# Draw 4 areas:
# .------------------.
# |________1_________|
# | 2 |******| 3 |
# |_____|******|_____|
# | 4 |
# .------------------.
coords[1] = (0, 0), (window_width, 0), (window_width, y1 - 1), (0, y1 - 1)
coords[2] = (0, y1), (x1 - 1, y1), (x1 - 1, y2 - 1), (0, y2 - 1)
coords[3] = (x2, y1), (window_width, y1), (window_width, y2 - 1), (x2, y2 - 1)
coords[4] = (0, y2), (window_width, y2), (window_width, window_height), (0, window_height)
for area in coords:
gfxdraw.filled_polygon(surface, coords[area], self._theme.focus_background_color)
return coords
def set_controller(self, controller: 'Controller') -> 'Menu':
"""
Set a new controller object.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param controller: Controller
:return: Self reference
"""
self._ctrl = controller
return self
def enable(self) -> 'Menu':
"""
Enables Menu (can check events and draw).
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
self._top._enabled = True
return self
def toggle(self) -> 'Menu':
"""
Switch between enable/disable Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
self._top._enabled = not self._top._enabled
return self
def _exit(self) -> None:
"""
Internal exit function.
"""
if self._disable_exit:
return
self.disable()
pygame.quit()
try:
sys.exit(0)
except SystemExit:
# noinspection PyUnresolvedReferences,PyProtectedMember
os._exit(1)
# This should be unreachable
exit(0)
def is_enabled(self) -> bool:
"""
Return ``True`` if the Menu is enabled.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Menu enabled status
"""
return self._top._enabled
def _sort_update_frames(self) -> None:
"""
Sort the update frames (frames which receive updates).
"""
if len(self._update_frames) <= 1:
return
# Sort frames by depth
widgets: List[Tuple[int, 'Frame']] = []
for w in self._update_frames:
assert isinstance(w, Frame)
widgets.append((-w.get_frame_depth(), w))
widgets.sort(key=lambda x: x[0])
# Sort frames with same depth by index
frame_depths: Dict[int, List[Tuple[int, 'Frame']]] = {}
for w in widgets:
w_depth = w[0]
if w_depth not in frame_depths.keys():
frame_depths[w_depth] = []
if w[1] in self._widgets:
frame_depths[w_depth].append((self._widgets.index(w[1]), w[1]))
else:
frame_depths[w_depth].append((0, w[1]))
self._update_frames = []
for d in frame_depths.keys():
frame_depths[d].sort(key=lambda x: x[0])
for w in frame_depths[d]:
self._update_frames.append(w[1])
def _move_selected_left_right(self, pos: int, apply_sound: bool = False) -> bool:
"""
Move the selected widget index to left/right position (column support).
:param pos: If ``+1`` selects right column, ``-1`` left column
:param apply_sound: Apply sound on widget selection
:return: ``True`` if the widget changed
"""
if not (pos == 1 or pos == -1):
raise ValueError('pos must be +1 or -1')
def _default() -> bool:
if pos == -1:
return self._select(0, 1, SELECT_KEY, apply_sound)
return self._select(-1, -1, SELECT_KEY, apply_sound)
if self._used_columns > 1:
# Get current widget
sel_widget = self.get_selected_widget()
# No widget is selected
if sel_widget is None:
return _default()
# Get column row position
col, row, _ = sel_widget.get_col_row_index()
# Move column to position
col = (col + pos) % self._used_columns
# Get the first similar row in that column, if no widget is found
# then select the first widget
for widget in self._widget_columns[col]:
c, r, i = widget.get_col_row_index()
if r == row:
return self._select(i, pos, SELECT_KEY, apply_sound)
# If no widget is in that column
if len(self._widget_columns[col]) == 0:
return _default()
# If the number of rows in that column is less than current,
# select the first one
first_widget = self._widget_columns[col][0]
_, _, i = first_widget.get_col_row_index()
return self._select(i, pos, SELECT_KEY, apply_sound)
else:
return _default()
def _handle_joy_event(self, apply_sound: bool = False) -> bool:
"""
Handle joy events.
:param apply_sound: Apply sound on widget selection
:return: ``True`` if widget changed
"""
if self._joy_event & JOY_EVENT_UP:
return self._select(self._index - 1, -1, SELECT_KEY, apply_sound)
if self._joy_event & JOY_EVENT_DOWN:
return self._select(self._index + 1, 1, SELECT_KEY, apply_sound)
if self._joy_event & JOY_EVENT_LEFT:
return self._move_selected_left_right(-1, apply_sound)
if self._joy_event & JOY_EVENT_RIGHT:
return self._move_selected_left_right(1, apply_sound)
def _up(self, apply_sound: bool = False) -> bool:
"""
Process up key event.
:param apply_sound: Apply selection sound
:return: ``True`` if widget selected
"""
if not apply_sound:
self._sound.play_key_add()
return self._select(self._index + 1, 1, SELECT_KEY, apply_sound)
def _down(self, apply_sound: bool = False) -> bool:
"""
Process down key event.
:param apply_sound: Apply selection sound
:return: ``True`` if widget selected
"""
if not apply_sound:
self._sound.play_key_add()
return self._select(self._index - 1, -1, SELECT_KEY, apply_sound)
def _left(self, apply_sound: bool = False) -> bool:
"""
Process left key event.
:param apply_sound: Apply selection sound
:return: ``True`` if widget selected
"""
if not apply_sound:
self._sound.play_key_add()
# Get frame properties
selected_widget = self.get_selected_widget()
selected_widget_in_frame_horizontal = selected_widget is not None and \
selected_widget.get_frame() is not None and \
selected_widget.get_frame().horizontal
selected_widget_first_in_frame = selected_widget_in_frame_horizontal and \
selected_widget.get_frame().first_index == self._index
# If current selected in within a horizontal frame
if selected_widget_in_frame_horizontal and not selected_widget_first_in_frame:
return self._current._select(self._current._index - 1, -1, SELECT_KEY, False)
elif self._current._used_columns > 1:
return self._current._move_selected_left_right(-1)
return False
def _right(self, apply_sound: bool = False) -> bool:
"""
Process left key event.
:param apply_sound: Apply selection sound
:return: ``True`` if widget selected
"""
if not apply_sound:
self._sound.play_key_add()
# Get frame properties
selected_widget = self.get_selected_widget()
selected_in_frame_horizontal = selected_widget is not None and \
selected_widget.get_frame() is not None and \
selected_widget.get_frame().horizontal
selected_last_in_frame = selected_in_frame_horizontal and \
selected_widget.get_frame().last_index == self._current._index
# If current selected in within a horizontal frame
if selected_in_frame_horizontal and not selected_last_in_frame:
return self._current._select(self._current._index + 1, 1, SELECT_KEY, False)
elif self._current._used_columns > 1:
return self._current._move_selected_left_right(1)
return False
def get_last_update_mode(self) -> List[str]:
"""
Return the update mode.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().update(...)``.
:return: Returns a string that represents the update status, see ``pygame_menu.events``. Some also indicate which widget updated in the format ``EVENT_NAME#widget_id``
"""
if len(self._current._last_update_mode) == 0:
return [_events.MENU_LAST_NONE]
return self._current._last_update_mode
def update(self, events: EventVectorType) -> bool:
"""
Update the status of the Menu using external events. The update event is
applied only on the **current** Menu.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().update(...)``.
:param events: List of pygame events
:return: ``True`` if the menu updated (or a widget)
"""
# Check events
assert isinstance(events, list)
self._current._last_update_mode = []
# If menu is not enabled
if not self.is_enabled():
self._current._runtime_errors.throw(self._current._runtime_errors.update,
'menu is not enabled')
self._current._stats.update += 1
# Call onupdate callback
if self._current._onupdate is not None:
try:
self._current._onupdate(events, self._current)
except TypeError:
self._current._onupdate()
if self._current._disable_update:
self._current._last_update_mode.append(_events.MENU_LAST_DISABLE_UPDATE)
return False
# If any widget status changes, set the status as True
updated = False
# Update mouse
pygame.mouse.set_visible(self._current._mouse_visible)
mouse_motion_event = None
selected_widget = self._current.get_selected_widget()
selected_widget_disable_frame_update = \
(False if selected_widget is None else selected_widget.active) and \
self._current._mouse_motion_selection or \
selected_widget is not None and selected_widget.active and \
selected_widget.force_menu_draw_focus
selected_widget_scrollarea = None if selected_widget is None else selected_widget.get_scrollarea()
# First, check update frames
frames_updated = False
if not selected_widget_disable_frame_update:
for frame in self._current._update_frames:
frames_updated = frames_updated or frame.update(events)
# Update widgets on update list
for widget in self._current._update_widgets:
widget.update(events)
# Frames have updated
if frames_updated:
self._current._last_update_mode.append(_events.MENU_LAST_FRAMES)
updated = True
# Update scroll bars
elif not selected_widget_disable_frame_update and self._current._scrollarea.update(events):
self._current._last_update_mode.append(_events.MENU_LAST_SCROLL_AREA)
updated = True
# Update the menubar, it may change the status of the widget because
# of the button back/close
elif self._current._menubar.update(events):
self._current._last_update_mode.append(_events.MENU_LAST_MENUBAR)
updated = True
# Check selected widget
elif selected_widget is not None and self._current._widget_selected_update and \
selected_widget.update(events):
self._current._last_update_mode.append(
f'{_events.MENU_LAST_SELECTED_WIDGET_EVENT}#{selected_widget.get_id()}'
)
updated = True
# Check others
else:
# If mouse motion enabled, add the current mouse position to the events list
if self._current._mouse and self._current._mouse_motion_selection:
events.append(mouse_motion_current_mouse_position())
for event in events:
# User closes window
close_altf4 = event.type == pygame.KEYDOWN and event.key == pygame.K_F4 and (
event.mod == pygame.KMOD_LALT or event.mod == pygame.KMOD_RALT)
if event.type == _events.PYGAME_QUIT or close_altf4 or event.type == _events.PYGAME_WINDOWCLOSE:
self._current._last_update_mode.append(_events.MENU_LAST_QUIT)
self._current._exit()
return True
# User press key
elif event.type == pygame.KEYDOWN and self._current._keyboard:
# Check key event is valid
if self._keyboard_ignore_nonphysical and not check_key_pressed_valid(event):
continue
if self._ctrl.move_down(event, self):
if self._current._down(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_DOWN)
updated = True
break
elif self._ctrl.move_up(event, self):
if self._current._up(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_UP)
updated = True
break
elif self._ctrl.left(event, self):
if self._current._left(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_LEFT)
updated = True
break
elif self._ctrl.right(event, self):
if self._current._right(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_RIGHT)
updated = True
break
elif self._ctrl.back(event, self) and self._top._prev is not None:
self._current._sound.play_close_menu()
self.reset(1) # public, do not use _current
self._current._last_update_mode.append(_events.MENU_LAST_MENU_BACK)
updated = True
elif self._ctrl.close_menu(event, self):
self._current._sound.play_close_menu()
if self._current._close():
self._current._last_update_mode.append(_events.MENU_LAST_MENU_CLOSE)
updated = True
# User moves hat joystick
elif event.type == pygame.JOYHATMOTION and self._current._joystick:
if self._ctrl.joy_up(event, self):
if self._current._down(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_DOWN)
updated = True
break
elif self._ctrl.joy_down(event, self):
if self._current._up(apply_sound=True):
self._current._last_update_mode = _events.MENU_LAST_MOVE_UP
updated = True
break
elif self._ctrl.joy_left(event, self):
if self._current._left(apply_sound=True):
self._current._last_update_mode = _events.MENU_LAST_MOVE_LEFT
updated = True
break
elif self._ctrl.joy_right(event, self):
if self._current._right(apply_sound=True):
self._current._last_update_mode = _events.MENU_LAST_MOVE_RIGHT
updated = True
break
# User moves joy axis motion
elif event.type == pygame.JOYAXISMOTION and self._current._joystick and \
hasattr(event, 'axis'):
prev = self._current._joy_event
self._current._joy_event = 0
if self._ctrl.joy_axis_y_up(event, self):
self._current._joy_event |= JOY_EVENT_UP
elif self._ctrl.joy_axis_y_down(event, self):
self._current._joy_event |= JOY_EVENT_DOWN
elif self._ctrl.joy_axis_x_left(event, self) and self._current._used_columns > 1:
self._current._joy_event |= JOY_EVENT_LEFT
elif self._ctrl.joy_axis_x_right(event, self) and self._current._used_columns > 1:
self._current._joy_event |= JOY_EVENT_RIGHT
if self._current._joy_event:
sel = self._current._handle_joy_event(True)
if self._current._joy_event == prev:
pygame.time.set_timer(self._current._joy_event_repeat, self._ctrl.joy_repeat)
else:
pygame.time.set_timer(self._current._joy_event_repeat, self._ctrl.joy_delay)
if sel:
self._current._last_update_mode.append(_events.MENU_LAST_JOY_REPEAT)
updated = True
break
else:
pygame.time.set_timer(self._current._joy_event_repeat, 0)
# User repeats previous joy event input
elif event.type == self._current._joy_event_repeat:
if self._current._joy_event:
sel = self._current._handle_joy_event(True)
pygame.time.set_timer(self._current._joy_event_repeat, self._ctrl.joy_repeat)
if sel:
self._current._last_update_mode.append(_events.MENU_LAST_JOY_REPEAT)
updated = True
break
else:
pygame.time.set_timer(self._current._joy_event_repeat, 0)
# Select widget by clicking
elif event.type == pygame.MOUSEBUTTONDOWN and self._current._mouse and \
event.button in (1, 2, 3): # Don't consider the mouse wheel (button 4 & 5)
# If the mouse motion selection is disabled then select a widget by clicking
if not self._current._mouse_motion_selection:
sel = False
for index in range(len(self._current._widgets)):
widget = self._current._widgets[index]
if isinstance(widget, Frame): # Frame does not accept click
continue
if widget.is_selectable and widget.is_visible() and \
widget.get_scrollarea().collide(widget, event):
sel = self._current._select(index, 1, SELECT_MOUSE_BUTTON_DOWN, True)
break
if sel:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_SELECT}#{self._current.get_selected_widget().get_id()}'
)
updated = True
break
# If mouse motion selection, clicking will disable the active state
# only if the user clicked outside the widget
else:
if selected_widget is not None and selected_widget.active:
focus_rect = selected_widget.get_focus_rect()
if not selected_widget_scrollarea.collide(focus_rect, event):
selected_widget.active = False
selected_widget.render() # Some widgets need to be rendered
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_DISABLE_ACTIVE_STATE}#{selected_widget.get_id()}'
)
updated = True
break
# Mouse enters or leaves the window
elif event.type == pygame.ACTIVEEVENT and hasattr(event, 'gain'):
if event.gain == 1: # Enter
if self._current._onwindowmouseover is not None:
try:
self._current._onwindowmouseover(self._current)
except TypeError:
self._current._onwindowmouseover()
check_widget_mouseleave()
self._current._last_update_mode.append(_events.MENU_LAST_MOUSE_ENTER_WINDOW)
else: # Leave
if self._current._onwindowmouseleave is not None:
try:
self._current._onwindowmouseleave(self._current)
except TypeError:
self._current._onwindowmouseleave()
if self._current._mouseover:
self._current._mouseover = False
if self._current._onmouseleave is not None:
try:
self._current._onmouseleave(self._current, event)
except TypeError:
self._current._onmouseleave()
check_widget_mouseleave(force=True)
self._current._last_update_mode.append(_events.MENU_LAST_MOUSE_LEAVE_WINDOW)
# Mouse motion. It changes the cursor of the mouse if enabled
elif event.type == pygame.MOUSEMOTION and self._current._mouse:
mouse_motion_event = event
# Check if mouse over menu
if not self._current._mouseover:
if self._current.collide(event):
self._current._mouseover = True
if self._current._onmouseover is not None:
try:
self._current._onmouseover(self._current, event)
except TypeError:
self._current._onmouseover()
self._current._last_update_mode.append(_events.MENU_LAST_MOUSE_ENTER_MENU)
else:
if not self._current.collide(event):
self._current._mouseover = False
if self._current._onmouseleave is not None:
try:
self._current._onmouseleave(self._current, event)
except TypeError:
self._current._onmouseleave()
mouse_motion_event = None
check_widget_mouseleave(force=True)
self._current._last_update_mode.append(_events.MENU_LAST_MOUSE_LEAVE_MENU)
# If selected widget is active then motion should not select
# or change mouseover widget
if self._current._mouse_motion_selection and \
selected_widget is not None and selected_widget.active:
continue
# Check if "rel" exists within the event
if not hasattr(event, 'rel'):
continue
# Select if mouse motion
sel = False # Widget has been selected
for index in range(len(self._current._widgets)):
widget = self._current._widgets[index]
if widget.is_visible() and widget.get_scrollarea().collide(widget, event):
if self._current._mouse_motion_selection and \
widget.is_selectable and \
not isinstance(widget, Frame):
sel = self._current._select(index, 1, SELECT_MOUSE_MOTION, True)
# noinspection PyProtectedMember
widget._check_mouseover(event)
if sel:
break
if sel:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_SELECT_MOTION}#{self._current.get_selected_widget().get_id()}'
)
updated = True
break
# Mouse events in selected widget; don't consider the mouse wheel (button 4 & 5)
elif event.type == pygame.MOUSEBUTTONUP and self._current._mouse and \
selected_widget is not None and event.button in (1, 2, 3):
self._current._sound.play_click_mouse()
if selected_widget_scrollarea.collide(selected_widget, event):
updated = selected_widget.update([event])
if updated:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_SELECTED_WIDGET_BUTTON_UP}#{selected_widget.get_id()}'
)
break
# Touchscreen event:
elif event.type == FINGERDOWN and self._current._touchscreen:
# If the touchscreen motion selection is disabled then select
# a widget by clicking
if not self._current._touchscreen_motion_selection:
sel = False
for index in range(len(self._current._widgets)):
widget = self._current._widgets[index]
if isinstance(widget, Frame): # Frame does not accept touch
continue
if widget.is_selectable and widget.is_visible() and \
widget.get_scrollarea().collide(widget, event):
sel = self._current._select(index, 1, SELECT_TOUCH, True)
if not isinstance(widget, Frame):
break
if sel:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_SELECT}#{self._current.get_selected_widget().get_id()}'
)
updated = True
break
# If touchscreen motion selection, clicking will disable the
# active state only if the user clicked outside the widget
else:
if selected_widget is not None and selected_widget.active:
if not selected_widget_scrollarea.collide(selected_widget, event):
selected_widget.active = False
selected_widget.render() # Some widgets need to be rendered
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_DISABLE_ACTIVE_STATE}#{selected_widget.get_id()}'
)
updated = True
break
# Touchscreen events in selected widget
elif event.type == FINGERUP and self._current._touchscreen and \
selected_widget is not None:
self._current._sound.play_click_touch()
if selected_widget_scrollarea.collide(selected_widget, event):
updated = selected_widget.update([event])
if updated:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_SELECTED_WIDGET_FINGER_UP}#{selected_widget.get_id()}'
)
break
# Select widgets by touchscreen motion, this is valid only if the
# current selected widget is not active and the pointed widget is
# selectable
elif event.type == FINGERMOTION and self._current._touchscreen_motion_selection:
# If selected widget is active then motion should not select
# any widget
if selected_widget is not None and selected_widget.active:
continue
sel = False
for index in range(len(self._current._widgets)):
widget = self._current._widgets[index]
if isinstance(widget, Frame): # Frame does not accept touch
continue
if widget.is_selectable and widget.is_visible() and \
widget.get_scrollarea().collide(widget, event):
sel = self._current._select(index, 1, SELECT_TOUCH, True)
if not isinstance(widget, Frame):
break
if sel:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_SELECT_MOTION}#{self._current.get_selected_widget().get_id()}'
)
updated = True
break
if mouse_motion_event is not None:
check_widget_mouseleave(event=mouse_motion_event)
# If cache is enabled, always force a rendering (user may have changed any status)
if self._current._widget_surface_cache_enabled and updated:
self._current._widget_surface_cache_need_update = True
# A widget has closed the Menu
if not self.is_enabled():
updated = True
return updated
def collide(self, event: EventType) -> bool:
"""
Check if user event collides the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param event: Pygame event
:return: ``True`` if collide
"""
return bool(self.get_rect().collidepoint(*get_finger_pos(self, event)))
def mainloop(
self,
surface: 'pygame.Surface',
bgfun: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]] = None,
**kwargs
) -> 'Menu':
"""
Main loop of the **current** Menu. In this function, the Menu handle
exceptions and draw. The Menu pauses the application and checks :py:mod:`pygame`
events itself.
This method returns until the Menu is updated (a widget status has changed).
The execution of the mainloop is at the current Menu level.
.. code-block:: python
menu = pygame_menu.Menu(...)
menu.mainloop(surface)
The ``bgfun`` callable (if not None) can receive 1 argument maximum, if so,
the Menu instance is provided:
.. code-block:: python
draw(...):
bgfun(menu) <or> bgfun()
Finally, mainloop can be disabled externally if menu.disable() is called.
kwargs (Optional)
- ``clear_surface`` (bool) – If ``True`` surface is cleared using ``theme.surface_clear_color``
- ``disable_loop`` (bool) – If ``True`` the mainloop only runs once. Use for running draw and update in a single call
- ``fps_limit`` (int) – Maximum FPS of the loop. Default equals to ``theme.fps``. If ``0`` there's no limit
- ``wait_for_event`` (bool) – Holds the loop until an event is provided, useful to save CPU power
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().mainloop(...)``.
:param surface: Pygame surface to draw the Menu
:param bgfun: Background function called on each loop iteration before drawing the Menu
:param kwargs: Optional keyword arguments
:return: Self reference **(current)**
"""
# Unpack kwargs
clear_surface = kwargs.get('clear_surface', True)
disable_loop = kwargs.get('disable_loop', False)
fps_limit = kwargs.get('fps_limit', self._theme.fps)
wait_for_event = kwargs.get('wait_for_event', False)
assert isinstance(clear_surface, bool)
assert isinstance(disable_loop, bool)
assert isinstance(fps_limit, NumberInstance)
assert isinstance(surface, pygame.Surface)
assert isinstance(wait_for_event, bool)
assert fps_limit >= 0, 'fps limit cannot be negative'
# NOTE: For Menu accessor, use only _current, as the Menu pointer can
# change through the execution
if not self.is_enabled():
self._current._runtime_errors.throw(
self._current._runtime_errors.mainloop, 'menu is not enabled'
)
return self._current
# Check background function
bgfun_accept_menu = False
if bgfun:
assert callable(bgfun), \
'background function must be callable (function-type) object'
try:
bgfun(self._current)
bgfun_accept_menu = True
except TypeError:
pass
self._current._background_function = (bgfun_accept_menu, bgfun)
# Change state
self._current._mainloop = True
# Force rendering before loop
self._current._widgets_surface = None
# Start loop
while True:
self._current._stats.loop += 1
self._current._clock.tick(fps_limit)
# Draw the menu
self.draw(surface=surface, clear_surface=clear_surface)
# Gather events by Menu
if wait_for_event:
self.update([pygame.event.wait()])
if (not wait_for_event or pygame.event.peek()) and self.is_enabled():
self.update(pygame.event.get())
# Flip contents to screen
pygame.display.flip()
# Menu closed or disabled
if not self.is_enabled() or disable_loop:
self._current._mainloop = False
check_widget_mouseleave(force=True)
return self._current
def get_input_data(self, recursive: bool = False) -> Dict[str, Any]:
"""
Return input data from a Menu. The results are given as a dict object.
The keys are the ID of each element.
With ``recursive=True`` it collects also data inside the all sub-menus.
.. note::
This is applied only to the base Menu (not the currently displayed),
for such behaviour apply to :py:meth:`pygame_menu.menu.Menu.get_current` object.
:param recursive: Look in Menu and sub-menus
:return: Input dict e.g.: ``{'id1': value, 'id2': value, ...}``
"""
assert isinstance(recursive, bool)
return self._get_input_data(recursive, depth=0)
def _get_input_data(self, recursive: bool, depth: int) -> Dict[str, Any]:
"""
Return input data from a Menu. The results are given as a dict object.
The keys are the ID of each element.
With ``recursive=True``: it collects also data inside the all sub-menus.
:param recursive: Look in Menu and sub-menus
:param depth: Depth of the input data
:return: Input dict e.g.: ``{'id1': value, 'id2': value, ...}``
"""
data = {}
for widget in self._widgets:
try:
data[widget.get_id()] = widget.get_value()
except ValueError: # Widget does not return data
pass
if recursive:
depth += 1
for menu in self._submenus.keys():
# noinspection PyProtectedMember
data_submenu = menu._get_input_data(recursive=recursive, depth=depth)
# Check if there is a collision between keys
data_keys = data.keys()
sub_data_keys = data_submenu.keys()
for key in sub_data_keys:
if key in data_keys:
raise ValueError(f'collision between widget data ID="{key}" at depth={depth}')
# Update data
data.update(data_submenu)
return data
def get_rect(self) -> 'pygame.Rect':
"""
Return the :py:class:`pygame.Rect` object of the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Rect
"""
x, y = self.get_position()
return pygame.Rect(x, y, int(self._width), int(self._height))
def set_sound(self, sound: Optional['Sound'], recursive: bool = False) -> 'Menu':
"""
Add a sound engine to the Menu. If ``recursive=True``, the sound is
applied to all submenus.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param sound: Sound object
:param recursive: Set the sound engine to all submenus
:return: Self reference
"""
assert isinstance(sound, (type(self._sound), type(None))), \
'sound must be pygame_menu.Sound type or None'
if sound is None:
sound = Sound()
self._sound = sound
for widget in self._widgets:
widget.set_sound(sound)
if recursive:
for menu in self._submenus.keys():
menu.set_sound(sound, recursive=True)
return self
def get_title(self) -> str:
"""
Return the title of the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Menu title
"""
return self._menubar.get_title()
def set_title(self, title: Any, offset: Optional[Vector2NumberType] = None) -> 'Menu':
"""
Set the title of the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param title: New menu title
:param offset: If ``None`` uses theme offset, else it defines the title offset on x-axis and y-axis (x, y)
:return: Self reference
"""
if offset is None:
offset = self._theme.title_offset
else:
assert_vector(offset, 2)
self._menubar.set_title(title, offsetx=offset[0], offsety=offset[1])
return self
def full_reset(self) -> 'Menu':
"""
Reset the Menu back to the first opened Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
depth = self._get_depth()
if depth > 0:
self.reset(depth)
return self
def clear(self, reset: bool = True) -> 'Menu':
"""
Clears all widgets.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param reset: If ``True`` the menu full-resets
:return: Self reference
"""
if reset:
self.full_reset()
for w in self._widgets.copy():
self.remove_widget(w)
del self._widgets[:]
del self._submenus
self._submenus = {}
self._index = -1
self._stats.clear += 1
self._render()
return self
def _open(self, menu: 'Menu') -> None:
"""
Open the given Menu.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().reset(...)``.
:param menu: Menu object
"""
current = self
# Update pointers
menu._top = self._top
self._top._current = menu._current
self._top._prev = [self._top._prev, current]
# Call event
if menu._onbeforeopen is not None:
menu._onbeforeopen(current, menu)
# Select the first widget
self._current._select(0, 1, SELECT_OPEN, False, update_mouse_position=False)
# Re-render menu
check_widget_mouseleave(force=True)
self._render()
def reset(self, total: int) -> 'Menu':
"""
Go back in Menu history a certain number of times from the **current** Menu.
This method operates through the **current** Menu pointer.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().reset(...)``.
:param total: How many menus to go back
:return: Self reference **(current)**
"""
assert isinstance(total, int)
assert total > 0, 'total must be greater than zero'
i = 0
if self._top._prev is not None:
while True:
if self._top._prev is not None:
prev = self._top._prev
self._top._current = prev[1] # This changes the "current" pointer
self._top._prev = prev[0] # Eventually will reach None
i += 1
if i == total:
break
else:
break
# Execute onreset callback
if self._current._onreset is not None:
try:
self._current._onreset(self._current)
except TypeError:
self._current._onreset()
self._current._widgets_surface = None
check_widget_mouseleave(force=True)
self._current._select(self._top._current._index, 1, SELECT_RESET, False,
update_mouse_position=False)
self._current._stats.reset += 1
return self._current
def _select(
self,
new_index: int,
dwidget: int,
select_type: str,
apply_sound: bool,
**kwargs
) -> bool:
"""
Select the widget at the given index and unselect others. Selection forces
rendering of the widget. Also play widget selection sound. This is applied
to the base Menu pointer.
kwargs (Optional)
- ``last_index`` (int) – Last index in recursive call on Frames
- ``update_mouse_position`` (bool) – Update mouse position
:param new_index: Widget index
:param dwidget: Direction to search if ``new_index`` widget is non-selectable
:param select_type: Select type identifier
:param apply_sound: Apply widget sound if selected
:param kwargs: Optional keyword arguments
:return: ``True`` if the widget changed
"""
self._stats.select += 1
self._last_selected_type = select_type
if len(self._widgets) == 0:
return False
# This stores +/-1 if the index increases or decreases, used by non-selectable selection
if dwidget == 0:
if new_index < self._index:
dwidget = -1
else:
dwidget = 1
# Limit the index to the length
new_index %= len(self._widgets)
# Get both widgets
if self._index >= len(self._widgets): # Menu length changed during execution time
for i in range(len(self._widgets)): # Unselect all possible candidates
self._widgets[i].select(False)
self._index = 0
old_widget = self._widgets[self._index]
new_widget = self._widgets[new_index]
if old_widget == new_widget and self._index != -1 and old_widget.is_selected():
return False
# If new widget is not selectable or visible
if not new_widget.is_selectable or not new_widget.is_visible():
# If it is a frame, select the first selectable object
if isinstance(new_widget, Frame):
if dwidget == 1:
min_index = new_widget.first_index
else:
min_index = new_widget.last_index
current_frame = self._widgets[self._index].get_frame()
same_frame = current_frame is not None and current_frame == new_widget # Ignore cycles
# Check if recursive but same index as before
last_index = kwargs.get('last_index', -1)
if select_type == SELECT_RECURSIVE and last_index == min_index:
min_index += 2 * dwidget
# A selectable widget has been found within frame
if min_index != -1 and not same_frame and min_index != self._index:
kwargs['last_index'] = new_index
return self._select(min_index, dwidget, SELECT_RECURSIVE,
apply_sound, **kwargs)
# There's at least 1 selectable option
if self._index >= 0:
kwargs['last_index'] = new_index
return self._select(new_index + dwidget, dwidget, SELECT_RECURSIVE,
apply_sound, **kwargs)
# No selectable options, quit
else:
return False
# Selecting widgets forces rendering
old_widget.select(False)
self._index = new_index # Update selected index
new_widget.select()
self.scroll_to_widget(new_widget)
# Play widget selection sound
if old_widget != new_widget and apply_sound:
self._sound.play_widget_selection()
# Update mouse position if selected using keys
if select_type in (SELECT_KEY, SELECT_RECURSIVE) and \
self._mouse_motion_selection and \
not self._disable_widget_update_mousepos_mouseselection and \
not new_widget.is_floating() and \
self._mouseover and \
kwargs.get('update_mouse_position', True):
pygame.mouse.set_pos(new_widget.get_rect(to_real_position=True).center)
return True
def scroll_to_widget(
self,
widget: Optional['Widget'],
scroll_parent: bool = True
) -> 'Menu':
"""
Scroll the Menu to the given widget.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param widget: Widget to request scroll. If ``None`` scrolls to the selected widget
:param scroll_parent: If ``True`` parent scroll also scrolls to rect
:return: Self reference
"""
if widget is None:
widget = self.get_selected_widget()
if widget is None: # No widget is selected, scroll to top
self.get_scrollarea().scroll_to(ORIENTATION_VERTICAL, 0)
self.get_scrollarea().scroll_to(ORIENTATION_HORIZONTAL, 0)
return self
assert isinstance(widget, Widget), \
'widget to scroll from must be a Widget class, not None'
widget_scroll = widget.get_scrollarea()
if widget_scroll is None:
warn(f'{widget.get_class_id()} scrollarea is None, thus, scroll to widget cannot be performed')
return self
# Scroll to rect
rect = widget.get_rect()
widget_frame = widget.get_frame()
widget_border = widget.get_border()[1]
# Compute margin depending on widget position
_, ry = widget_scroll.get_widget_position_relative_to_view_rect(widget)
mx = 0
my = 0
if ry < 0.15 and self._menubar.fixed:
my = -self._menubar.get_height() - widget_border
# Call scroll parent container
if widget_frame is not None and widget_frame.is_scrollable:
widget_frame.scroll_to_widget((mx, my), scroll_parent)
# The first set the scrolls
widget_scroll.scroll_to_rect(rect, (mx, my), scroll_parent)
# The latter updates to active object
widget_scroll.scroll_to_rect(rect, (mx, my), scroll_parent)
return self
def get_window_size(self) -> Tuple2IntType:
"""
Return the window size as a tuple of (width, height).
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Window size in px
"""
return self._window_size
def get_submenus(self, recursive: bool = False) -> Tuple['Menu', ...]:
"""
Return the Menu submenus as a tuple.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param recursive: If ``True`` return all submenus in a recursive fashion
:return: Submenus tuple
"""
assert isinstance(recursive, bool)
if not recursive:
return tuple(self._submenus.keys())
sm = list(self._submenus.keys())
for m in self._submenus:
m_sm = m.get_submenus(recursive=recursive)
for i in m_sm:
if i not in sm:
sm.append(i)
return tuple(sm)
def get_menubar(self) -> 'MenuBar':
"""
Return menubar widget.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: MenuBar widget
"""
return self._menubar
def get_scrollarea(self) -> 'ScrollArea':
"""
Return the Menu ScrollArea.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: ScrollArea object
"""
return self._scrollarea
def get_widget(
self,
widget_id: str,
recursive: bool = False
) -> Optional['Widget']:
"""
Return a widget by a given ID from the Menu.
With ``recursive=True`` it looks for a widget in the Menu and all sub-menus.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
.. note::
``None`` is returned if no widget is found.
:param widget_id: Widget ID
:param recursive: Look in Menu and submenus
:return: Widget object
"""
assert isinstance(widget_id, str)
assert isinstance(recursive, bool)
for widget in self._widgets:
if widget.get_id() == widget_id:
return widget
if recursive:
for menu in self._submenus.keys():
widget = menu.get_widget(widget_id, recursive)
if widget:
return widget
return None
def get_widgets_column(self, col: int) -> Tuple['Widget', ...]:
"""
Return all the widgets within column which are visible.
:param col: Column number (start from zero)
:return: Widget list
"""
return tuple(self._widget_columns[col])
def get_widgets(self, ids: Optional[Union[List[str], Tuple[str, ...]]] = None) -> Tuple['Widget', ...]:
"""
Return the Menu widgets as a tuple.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param ids: Widget id list. If ``None``, return all the widgets, otherwise, return the widgets from that list
:return: Widgets tuple
"""
if not ids:
return tuple(self._widgets)
widgets = []
for i in ids:
widgets.append(self.get_widget(i, recursive=True))
return tuple(widgets)
def reset_value(self, recursive: bool = False) -> 'Menu':
"""
Reset all widget values to default.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param recursive: Set value recursively
:return: Self reference
"""
for widget in self._widgets:
widget.reset_value()
if recursive:
for sm in self._submenus.keys():
sm.reset_value(recursive)
return self
def in_submenu(self, menu: 'Menu', recursive: bool = False) -> bool:
"""
Return ``True`` if ``menu`` is a submenu of the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param menu: Menu to check
:param recursive: Check recursively
:return: ``True`` if ``menu`` is in the submenus
"""
if menu in self._submenus.keys():
return True
if recursive:
for sm in self._submenus.keys():
if sm.in_submenu(menu, recursive):
return True
return False
def _remove_submenu(
self,
menu: 'Menu',
hook: 'Widget',
recursive: bool = False
) -> bool:
"""
Removes Menu from submenu if ``menu`` is a submenu of the Menu.
:param menu: Menu to remove
:param hook: Widget associated with the menu
:param recursive: Check recursively
:return: ``True`` if ``menu`` was removed
"""
assert isinstance(menu, Menu)
assert isinstance(hook, Widget)
if menu in self._submenus.keys():
# Remove hook if in list
if hook in self._submenus[menu]:
self._submenus[menu].remove(hook)
hook._menu_hook = None
# If total hooks are empty, remove the menu
if len(self._submenus[menu]) == 0:
del self._submenus[menu]
self._update_after_remove_or_hidden(self._index)
return True
if recursive:
for sm in self._submenus:
if sm._remove_submenu(menu, hook, recursive):
return True
return False
def get_theme(self) -> 'Theme':
"""
Return the Menu theme.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
.. warning::
Use with caution, changing the theme may affect other menus or
widgets if not properly copied.
:return: Menu theme
"""
return self._theme
def get_clock(self) -> 'pygame.time.Clock':
"""
Return the pygame Menu timer.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Pygame clock object
"""
return self._clock
def get_index(self) -> int:
"""
Get selected widget index from the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Selected widget index
"""
return self._index
def get_mouseover_widget(self, filter_appended: bool = True) -> Optional['Widget']:
"""
Return the mouseover widget on the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param filter_appended: If ``True`` return the widget only if it's appended to the base Menu
:return: Widget object, ``None`` if no widget is mouseover
"""
widget = WIDGET_MOUSEOVER[0]
if widget is None or filter_appended and widget.get_menu() != self:
return
return widget
def get_selected_widget(self) -> Optional['Widget']:
"""
Return the selected widget on the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Widget object, ``None`` if no widget is selected
"""
if not isinstance(self._index, int):
self._index = 0
return None
if self._index < 0:
return None
try:
return self._widgets[self._index % len(self._widgets)]
except (IndexError, ZeroDivisionError):
return None
def get_decorator(self) -> 'Decorator':
"""
Return the Menu decorator API.
.. note::
``prev`` menu decorator may not draw because :py:class:`pygame_menu.widgets.MenuBar`
and :py:class:`pygame_menu._scrollarea.ScrollArea` objects draw over
it. If it's desired to draw a decorator behind widgets, use the ScrollArea
decorator, for example: :py:data:`menu.get_scrollarea().get_decorator()`.
The menu drawing order is:
1. Menu background color/image
2. Menu ``prev`` decorator
3. Menu ScrollArea ``prev`` decorator
4. Menu ScrollArea widgets
5. Menu ScrollArea ``post`` decorator
6. Menu title
7. Menu ``post`` decorator
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Decorator API
"""
return self._decorator
def _test_widgets_status(self) -> Tuple[Tuple[Any, ...], ...]:
"""
Get the status of each widget as a tuple (position, indices, values, etc.).
:return: Widget status
"""
self.render()
data = []
for w in self._widgets:
# noinspection PyProtectedMember
data.append(w._get_status())
return tuple(data)
# noinspection PyProtectedMember
def move_widget_index(
self,
widget: Optional['Widget'],
index: Optional[Union['Widget', int]] = None,
render: bool = True,
**kwargs
) -> Optional[Tuple2IntType]:
"""
Move a given widget to a certain index. ``index`` can be another widget,
a numerical position, or ``None``; if ``None`` the widget is pushed to
the last widget list position.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param widget: Widget to move. If ``None`` the widgets are flipped or reversed and returns ``None``
:param index: Target index. It can be a widget, a numerical index, or ``None``; if ``None`` the widget is pushed to the last position
:param render: Force menu rendering after update
:param kwargs: Optional keyword arguments
:return: The new indices of the widget and the previous index element
"""
depth = kwargs.get('depth', 0)
# Update only selected index
if kwargs.get('update_selected_index', False):
self._index = -1
has_selected = False
invalid_w: List[str] = []
selected = None
for w in self._widgets:
if w.is_selected():
if not has_selected:
self._select(self._widgets.index(w), 1, SELECT_MOVE, False)
has_selected = True
selected = w.get_class_id()
else:
w.select(False)
invalid_w.append(w.get_class_id())
if len(invalid_w) > 0:
raise _MenuMultipleSelectedWidgetsException(
f'several widgets are selected at the same time, current '
f'selected (sorted by index): {selected}, but the following '
f'are also selected: {', '.join(invalid_w)}'
)
return
selected_widget = self.get_selected_widget()
# Reverse widgets
if widget is None:
new_widgets = []
lw = len(self._widgets)
j_limit = -1 # Last position containing non frame
for i in range(lw):
j = lw - 1 - i
if self._widgets[j].get_frame() is None:
new_widgets.append(self._widgets[j])
if j_limit != -1:
for k in range(j + 1, j_limit + 1):
new_widgets.append(self._widgets[k])
j_limit = -1
else:
if j_limit == -1:
j_limit = j
if j_limit != -1:
for k in range(j_limit):
new_widgets.append(self._widgets[k])
self._widgets = new_widgets
if selected_widget is not None:
selected_widget.select(False)
self._select(self._widgets.index(selected_widget), 1, SELECT_MOVE, False)
if len(self._update_frames) > 0:
self._update_frames[0]._sort_menu_update_frames()
if render:
self._widgets_surface = None
self._render()
check_widget_mouseleave()
return
# Asserts
assert len(self._widgets) >= 2, \
'menu must contain at least 2 widgets to perform this task'
try:
widget_index = self._widgets.index(widget)
except ValueError:
raise ValueError(f'{widget.get_class_id()} widget is not on widgets list')
assert widget in self._widgets, \
f'{widget.get_class_id()} does not exist on current menu widgets list'
assert isinstance(index, (Widget, int, type(None)))
if isinstance(index, Widget):
assert index in self._widgets, \
f'{index.get_class_id()} does not exist on current menu widgets list'
index = self._widgets.index(index)
elif isinstance(index, int):
assert 0 <= index < len(self._widgets), \
f'index {index} must be between 0 and the number of widgets ({len(self._widgets)})'
elif index is None:
index = len(self._widgets) - 1
else:
raise ValueError('index must be a widget, int, or None')
assert widget_index != index, \
f'target index must be different than the current widget index ({index})'
target_index = index
target_widget = self._widgets[target_index]
# If target widget is frame, find the latest index
both_frames = isinstance(target_widget, Frame) and isinstance(widget, Frame)
check_if_last = both_frames and self._validate_frame_widgetmove and target_index != 0
if check_if_last:
w_last = target_widget
while True:
target_index = w_last.last_index
w_last = self._widgets[w_last.last_index]
target_widget = w_last
if not (isinstance(w_last, Frame) and w_last.get_indices() != (-1, -1)) or \
w_last.get_menu() is None:
break
to_last_position = target_index == len(self._widgets) - 1
if not to_last_position and check_if_last:
target_index = index
target_widget = self._widgets[target_index]
if both_frames and self._validate_frame_widgetmove and \
not kwargs.get('swap_search', False):
return self.move_widget_index(
target_widget, widget, render=render, swap_search=True, depth=depth + 1
)
# Check both widgets are within frame if widget to move is frame
if self._validate_frame_widgetmove and not to_last_position and not both_frames:
assert widget.get_frame() == target_widget.get_frame(), \
'both widgets must be within same frame'
self._widgets.pop(widget_index)
self._widgets.insert(target_index, widget)
new_widget_index = self._widgets.index(widget)
assert new_widget_index != widget_index, 'widget index has not changed'
assert widget != target_widget, 'widget must be different than target'
# If frame is moved, move all sub-elements
if self._validate_frame_widgetmove:
if isinstance(widget, Frame):
self._validate_frame_widgetmove = False
for w in widget.get_widgets(unpack_subframes_include_frame=True,
reverse=not to_last_position):
if w.get_menu() is None:
continue
if not to_last_position:
self.move_widget_index(
w, self._widgets.index(widget) + 1, render=False, depth=depth + 1
)
else:
self.move_widget_index(w, render=False, depth=depth + 1)
self._validate_frame_widgetmove = True
# Sort frame widget list
if widget.get_frame() is not None:
prev_frame_widgs = widget.get_frame().get_widgets(unpack_subframes=False)
# Get none-menu widgets for ordering
none_menu_widgs: Dict[Optional['Widget'], List['Widget']] = {}
prev_wig: Optional['Widget'] = None
for i in range(len(prev_frame_widgs)):
if prev_frame_widgs[i].get_menu() is None:
if prev_wig not in none_menu_widgs.keys():
none_menu_widgs[prev_wig] = []
none_menu_widgs[prev_wig].append(prev_frame_widgs[i])
else:
prev_wig = prev_frame_widgs[i]
for i in none_menu_widgs.keys():
none_menu_widgs[i].reverse()
# Get all widgets within given frame
new_list = []
for w in self._widgets:
if w.get_frame() == widget.get_frame():
new_list.append(w)
# Create new list considering non-menu widgets
new_list_non_menu = []
if None in none_menu_widgs.keys():
for w in none_menu_widgs[None]:
new_list_non_menu.append(w)
for w in new_list:
new_list_non_menu.append(w)
if w in none_menu_widgs.keys():
for ww in none_menu_widgs[w]:
new_list_non_menu.append(ww)
# Make dict and update frame widgets dict
new_dict = {}
for w in new_list_non_menu:
new_dict[w.get_id()] = w
widget.get_frame()._widgets = new_dict
# Update selected widget
if selected_widget is not None and selected_widget.is_selectable and \
self._validate_frame_widgetmove:
self._index = -1
selected_widget.select(False)
self._select(self._widgets.index(selected_widget), 1, SELECT_MOVE, False)
if render:
self._widgets_surface = None
self._render()
if self._validate_frame_widgetmove:
if isinstance(widget, Frame) or isinstance(target_widget, Frame):
if isinstance(widget, Frame):
widget._sort_menu_update_frames()
else:
target_widget._sort_menu_update_frames()
check_widget_mouseleave()
return new_widget_index, target_index
def _test_print_widgets(self) -> None:
"""
Test printing widgets order.
"""
print_menu_widget_structure(self._widgets, self._index)
def _copy_theme(self) -> None:
"""
Updates theme reference with a copied one.
"""
self._theme = self._theme.copy()
class _MenuStats(object):
"""
Menu stats.
"""
def __init__(self) -> None:
# Widget update
self.added_widgets = 0
self.removed_widgets = 0
# Widget position
self.build_surface = 0
self.position_update = 0
self.center_content = 0
# Render
self.last_build_surface_time = 0
self.render_private = 0
self.render_public = 0
self.total_building_time = 0
self.total_rendering_time = 0
# Other
self.clear = 0
self.draw = 0
self.draw_update_cached = 0
self.loop = 0
self.reset = 0
self.select = 0
self.update = 0
class _MenuCopyException(Exception):
"""
If user tries to copy a Menu.
"""
pass
class _MenuRuntimeErrorConfig(object):
"""
Controls the runtime errors of the Menu.
"""
def __init__(self) -> None:
self.close = True
self.draw = True
self.mainloop = True
self.update = True # It should be True, as non-active Menus SHOULD NOT receive updates
@staticmethod
def throw(throw_runtime: bool, msg: str) -> None:
"""
Throws an error, if ``throw_runtime=True`` throws a ``RuntimeError``, otherwise
only a warning.
:param throw_runtime: If error is raised
:param msg: Message
"""
if throw_runtime:
raise RuntimeError(msg)
warn(msg)
class _MenuSizingException(Exception):
"""
Exception thrown if widget exceeds maximum size of column/row layout.
"""
pass
class _MenuWidgetOverflow(Exception):
"""
Exception thrown if adding more widgets than menu can contain on row/column layout.
"""
pass
class _MenuMultipleSelectedWidgetsException(Exception):
"""
Exception thrown if multiple widgets are selected at the same time.
"""
pass
| """
pygame-menu
https://github.com/ppizarror/pygame-menu
MENU
Menu class.
"""
# File constants no. 0
__all__ = ['Menu']
import math
import os
import sys
import time
import pygame
import pygame.gfxdraw as gfxdraw
import pygame_menu.events as _events
from pygame_menu._base import Base
from pygame_menu._decorator import Decorator
from pygame_menu._widgetmanager import WidgetManager
from pygame_menu.controls import Controller
from pygame_menu.locals import ALIGN_CENTER, ALIGN_LEFT, ALIGN_RIGHT, \
ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL, FINGERDOWN, FINGERUP, FINGERMOTION
from pygame_menu._scrollarea import ScrollArea, get_scrollbars_from_position
from pygame_menu.sound import Sound
from pygame_menu.themes import Theme, THEME_DEFAULT
from pygame_menu.utils import assert_vector, make_surface, warn, \
check_key_pressed_valid, mouse_motion_current_mouse_position, get_finger_pos, \
print_menu_widget_structure
from pygame_menu.widgets import Frame, Widget, MenuBar
from pygame_menu.widgets.core.widget import check_widget_mouseleave, WIDGET_MOUSEOVER
# Import types
from pygame_menu._types import Callable, Any, Dict, NumberType, VectorType, \
Vector2NumberType, Union, Tuple, List, Vector2IntType, Vector2BoolType, \
Tuple4Tuple2IntType, Tuple2IntType, MenuColumnMaxWidthType, MenuColumnMinWidthType, \
MenuRowsType, Optional, Tuple2BoolType, NumberInstance, VectorInstance, EventType, \
EventVectorType, EventListType, CallableNoArgsType
# Joy events
JOY_EVENT_LEFT = 1
JOY_EVENT_RIGHT = 2
JOY_EVENT_UP = 4
JOY_EVENT_DOWN = 8
# Select types
SELECT_KEY = 'key'
SELECT_MOUSE_BUTTON_DOWN = 'mouse_button_down'
SELECT_MOUSE_MOTION = 'mouse_motion'
SELECT_MOVE = 'move'
SELECT_OPEN = 'open'
SELECT_RECURSIVE = 'recursive'
SELECT_REMOVE = 'remove'
SELECT_RESET = 'reset'
SELECT_TOUCH = 'touch'
SELECT_WIDGET = 'widget'
class Menu(Base):
"""
Menu object.
Menu can receive many callbacks; callbacks ``onclose`` and ``onreset`` are fired
(if them are callable-type). They can only receive 1 argument maximum, if so,
the Menu instance is provided
.. code-block:: python
onclose(menu) <or> onclose()
onreset(menu) <or> onreset()
.. note::
Menu cannot be copied or deep-copied.
:param title: Title of the Menu
:param width: Width of the Menu in px
:param height: Height of the Menu in px
:param center_content: Auto centers the Menu on the vertical position after a widget is added/deleted
:param column_max_width: List/Tuple representing the maximum width of each column in px, ``None`` equals no limit. For example ``column_max_width=500`` (each column width can be 500px max), or ``column_max_width=(400,500)`` (first column 400px, second 500). If ``0`` uses the Menu width. This method does not resize the widgets, only determines the dynamic width of the column layout
:param column_min_width: List/Tuple representing the minimum width of each column in px. For example ``column_min_width=500`` (each column width is 500px min), or ``column_max_width=(400,500)`` (first column 400px, second 500). Negative values are not accepted
:param columns: Number of columns
:param enabled: Menu is enabled. If ``False`` the Menu cannot be drawn or updated
:param joystick_enabled: Enable/disable joystick events on the Menu
:param keyboard_enabled: Enable/disable keyboard events on the Menu
:param keyboard_ignore_nonphysical: Ignores non-physical keyboard buttons pressed
:param menu_id: ID of the Menu
:param mouse_enabled: Enable/disable mouse click inside the Menu
:param mouse_motion_selection: Select widgets using mouse motion. If ``True`` menu draws a ``focus`` on the selected widget
:param mouse_visible: Set mouse visible on Menu
:param onclose: Event or function executed when closing the Menu. If not ``None`` the menu disables and executes the event or function it points to. If a function (callable) is provided it can be both non-argument or single argument (Menu instance)
:param onreset: Function executed when resetting the Menu. The function must be non-argument or single argument (Menu instance)
:param overflow: Enables overflow on x/y axes. If ``False`` then scrollbars will not work and the maximum width/height of the scrollarea is the same as the Menu container. Style: (overflow_x, overflow_y). If ``False`` or ``True`` the value will be set on both axis
:param position: Position on x-axis and y-axis. If the value is only 2 elements, the position is relative to the window width (thus, values must be 0-100%); else, the third element defines if the position is relative or not. If ``(x, y, False)`` the values of ``(x, y)`` are in px
:param rows: Number of rows of each column, if there's only 1 column ``None`` can be used for no-limit. Also, a tuple can be provided for defining different number of rows for each column, for example ``rows=10`` (each column can have a maximum 10 widgets), or ``rows=[2, 3, 5]`` (first column has 2 widgets, second 3, and third 5)
:param screen_dimension: List/Tuple representing the dimensions the Menu should reference for sizing/positioning (width, height), if ``None`` pygame is queried for the display mode. This value defines the ``window_size`` of the Menu
:param theme: Menu theme
:param touchscreen: Enable/disable touch action inside the Menu. Only available on pygame 2
:param touchscreen_motion_selection: Select widgets using touchscreen motion. If ``True`` menu draws a ``focus`` on the selected widget
"""
_auto_centering: bool
_background_function: Tuple[bool, Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]]
_clock: 'pygame.time.Clock'
_column_max_width: VectorType
_column_max_width_zero: List[bool]
_column_min_width: VectorType
_column_pos_x: List[NumberType]
_column_widths: List[NumberType]
_columns: int
_ctrl: 'Controller'
_current: 'Menu'
_decorator: 'Decorator'
_disable_draw: bool
_disable_exit: bool
_disable_update: bool
_enabled: bool
_height: int
_index: int
_joy_event: int
_joy_event_repeat: int
_joystick: bool
_keyboard: bool
_keyboard_ignore_nonphysical: bool
_last_scroll_thickness: List[Union[Tuple2IntType, int]]
_last_selected_type: str
_last_update_mode: List[str]
_mainloop: bool
_max_row_column_elements: int
_menubar: 'MenuBar'
_mouse: bool
_mouse_motion_selection: bool
_mouse_visible: bool
_mouse_visible_default: bool
_mouseover: bool
_onbeforeopen: Optional[Callable[['Menu', 'Menu'], Any]]
_onclose: Optional[Union['_events.MenuAction', Callable[['Menu'], Any], CallableNoArgsType]]
_onmouseleave: Optional[Union[Callable[['Menu', EventType], Any], CallableNoArgsType]]
_onmouseover: Optional[Union[Callable[['Menu', EventType], Any], CallableNoArgsType]]
_onreset: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
_onupdate: Optional[Union[Callable[[EventListType, 'Menu'], Any], CallableNoArgsType]]
_onwidgetchange: Optional[Callable[['Menu', 'Widget'], Any]]
_onwindowmouseleave: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
_onwindowmouseover: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
_overflow: Tuple2BoolType
_position: Tuple2IntType
_position_default: Tuple2IntType
_position_relative: bool
_prev: Optional[List[Union['Menu', List['Menu']]]]
_runtime_errors: '_MenuRuntimeErrorConfig'
_scrollarea: 'ScrollArea'
_scrollarea_margin: List[int]
_sound: 'Sound'
_stats: '_MenuStats'
_submenus: Dict['Menu', List['Widget']]
_theme: 'Theme'
_top: 'Menu'
_touchscreen: bool
_touchscreen_motion_selection: bool
_translate: Tuple2IntType
_update_frames: List['Frame'] # Stores the reference of scrollable frames to check inputs
_update_widgets: List['Widget'] # Stores widgets which should always update
_used_columns: int
_validate_frame_widgetmove: bool
_widget_columns: Dict[int, List['Widget']]
_widget_max_position: Tuple2IntType
_widget_min_position: Tuple2IntType
_widget_offset: List[int]
_widget_selected_update: bool # Selected widget receives updates
_widget_surface_cache_enabled: bool
_widget_surface_cache_need_update: bool
_widgets: List['Widget']
_widgets_surface: Optional['pygame.Surface']
_widgets_surface_last: Tuple[int, int, Optional['pygame.Surface']]
_widgets_surface_need_update: bool
_width: int
_window_size: Tuple2IntType
add: 'WidgetManager'
def __init__(
self,
title: str,
width: NumberType,
height: NumberType,
center_content: bool = True,
column_max_width: MenuColumnMaxWidthType = None,
column_min_width: MenuColumnMinWidthType = 0,
columns: int = 1,
enabled: bool = True,
joystick_enabled: bool = True,
keyboard_enabled: bool = True,
keyboard_ignore_nonphysical: bool = True,
menu_id: str = '',
mouse_enabled: bool = True,
mouse_motion_selection: bool = False,
mouse_visible: bool = True,
onclose: Optional[Union['_events.MenuAction', Callable[['Menu'], Any], CallableNoArgsType]] = None,
onreset: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]] = None,
overflow: Union[Vector2BoolType, bool] = (True, True),
position: Union[Vector2NumberType, Tuple[NumberType, NumberType, bool]] = (50, 50, True),
rows: MenuRowsType = None,
screen_dimension: Optional[Vector2IntType] = None,
theme: 'Theme' = THEME_DEFAULT.copy(),
touchscreen: bool = False,
touchscreen_motion_selection: bool = False
) -> None:
super(Menu, self).__init__(object_id=menu_id)
assert isinstance(center_content, bool)
assert isinstance(column_max_width, (VectorInstance, type(None), NumberInstance))
assert isinstance(column_min_width, (VectorInstance, NumberInstance))
assert isinstance(columns, int)
assert isinstance(enabled, bool)
assert isinstance(joystick_enabled, bool)
assert isinstance(keyboard_enabled, bool)
assert isinstance(mouse_enabled, bool)
assert isinstance(mouse_motion_selection, bool)
assert isinstance(mouse_visible, bool)
assert isinstance(overflow, (VectorInstance, bool))
assert isinstance(rows, (int, type(None), VectorInstance))
assert isinstance(theme, Theme), \
'theme bust be a pygame_menu.themes.Theme object instance'
assert isinstance(touchscreen, bool)
assert isinstance(touchscreen_motion_selection, bool)
# Assert theme
theme.validate()
# Assert pygame was initialized
assert not hasattr(pygame, 'get_init') or pygame.get_init(), \
'pygame is not initialized'
# Assert python version is greater than 3.6
assert sys.version_info >= (3, 6, 0), \
'pygame-menu only supports python equal or greater than version 3.6.0'
# Column/row asserts
assert columns >= 1, \
f'the number of columns must be equal or greater than 1 (current={columns})'
if columns > 1:
assert rows is not None, \
'rows cannot be None if the number of columns is greater than 1'
if isinstance(rows, int):
assert rows >= 1, \
f'if number of columns is greater than 1 (current={columns}) then the ' \
f'number of rows must be equal or greater than 1 (current={rows})'
rows = [rows for _ in range(columns)]
assert isinstance(rows, VectorInstance), \
'if rows is not an integer it must be a tuple/list'
assert len(rows) == columns, \
f'the length of the rows vector must be the same as the number of' \
f' columns (current={rows}, expected={columns})'
for i in rows:
assert isinstance(i, int), \
'each item of rows tuple/list must be an integer'
assert i >= 1, \
'each item of the rows tuple/list must be equal or greater than one'
else:
if rows is None:
rows = 10000000 # Set rows as a big number
else:
assert isinstance(rows, int), \
'rows cannot be a tuple/list as there\'s only 1 column'
assert rows >= 1, \
'number of rows must be equal or greater than 1. If there is ' \
'no limit rows must be None'
rows = [rows]
# Set column min width
if isinstance(column_min_width, NumberInstance):
assert column_min_width >= 0, \
'column_min_width must be equal or greater than zero'
if columns != 1:
if column_min_width > 0: # Ignore the default value
warn(
f'column_min_width can be a single number if there is only '
f'1 column, but there is {columns} columns. Thus, column_min_width '
f'should be a vector of {columns} items. By default a vector has '
f'been created using the same value for each column'
)
column_min_width = [column_min_width for _ in range(columns)]
else:
column_min_width = [column_min_width]
assert len(column_min_width) == columns, \
f'column_min_width length must be the same as the number of columns, ' \
f'but size is different {len(column_min_width)}!={columns}'
for i in column_min_width:
assert isinstance(i, NumberInstance), \
'each item of column_min_width must be an integer/float'
assert i >= 0, \
'each item of column_min_width must be equal or greater than zero'
# Set column max width
if column_max_width is not None:
if isinstance(column_max_width, NumberInstance):
assert column_max_width >= 0, \
'column_max_width must be equal or greater than zero'
if columns != 1:
column_max_width = [column_max_width for _ in range(columns)]
else:
column_max_width = [column_max_width]
assert len(column_max_width) == columns, \
f'column_max_width length must be the same as the number of columns, ' \
f'but size is different {len(column_max_width)}!={columns}'
for i in column_max_width:
assert isinstance(i, type(None)) or isinstance(i, NumberInstance), \
'each item of column_max_width can be None (no limit) or an ' \
'integer/float'
assert i is None or i >= 0, \
'each item of column_max_width must be equal or greater than' \
' zero or None'
else:
column_max_width = [None for _ in range(columns)]
# Check that every column max width is equal or greater than minimum width
for i in range(len(column_max_width)):
if column_max_width[i] is not None:
assert column_max_width[i] >= column_min_width[i], \
f'item {i} of column_max_width ({column_max_width[i]}) must be equal or greater ' \
f'than column_min_width ({column_min_width[i]})'
# Element size and position asserts
if len(position) == 3:
# noinspection PyTypeChecker
self._position_relative = position[2]
position = position[0:2]
else:
self._position_relative = True
assert_vector(position, 2)
# Assert overflow
if isinstance(overflow, bool): # If single value
overflow = overflow, overflow
assert len(overflow) == 2, \
'overflow must be a 2-item tuple/list of booleans (x-axis, y-axis)'
assert isinstance(overflow[0], bool), \
'overflow on x-axis must be a boolean object'
assert isinstance(overflow[1], bool), \
'overflow on y-axis must be a boolean object'
# General properties of the Menu
self._auto_centering = center_content
self._background_function = (False, None) # Accept menu as argument, callable object
self._clock = pygame.time.Clock()
self._decorator = Decorator(self)
self._enabled = enabled # Menu is enabled or not. If disabled menu can't update or draw
self._index = -1 # Selected index, if -1 the widget does not have been selected yet
self._last_scroll_thickness = [(0, 0), 0] # scroll and the number of recursive states
self._last_selected_type = '' # Last type selection, used for test purposes
self._mainloop = False # Menu is in mainloop state
self._onclose = None # Function or event called on Menu close
self._sound = Sound()
self._stats = _MenuStats()
self._submenus = {}
self._theme = theme
# Set callbacks
self.set_onclose(onclose)
self.set_onreset(onreset)
self._onbeforeopen = None
self._onmouseleave = None
self._onmouseover = None
self._onupdate = None
self._onwidgetchange = None
self._onwindowmouseleave = None
self._onwindowmouseover = None
# Menu links (pointer to previous and next menus in nested submenus),
# for public methods accessing, self should be used through "_current",
# because user can move through submenus and self pointer should target
# the current Menu object. Private methods access through self
# (not _current) because these methods are called by public (_current) or
# by themselves. _top is only used when moving through menus (open, reset)
self._current = self # Current Menu
# Prev stores a list of Menu pointers, when accessing a submenu, prev grows
# as prev = [prev, new_pointer]
self._prev = None
# Top is the same for the menus and submenus if the user moves through them
self._top = self
# Menu widgets, it should not be accessed outside the object as strange
# issues can occur
self.add = WidgetManager(self)
self._widget_selected_update = True
self._widgets = [] # This list may change during execution (replaced by a new one)
# Stores the frames which receive update events, updated and managed only
# by the Frame class
self._update_frames = []
# Stores the widgets which receive update even if not selected or events
# is empty
self._update_widgets = []
# Widget surface
self._widgets_surface = None
self._widgets_surface_need_update = False
self._widgets_surface_last = (0, 0, None)
# Precache widgets surface draw, this method dramatically increases the
# performance of the menu rendering
self._widget_surface_cache_enabled = True
# This boolean variable, if True, forces the cache to be updated, after
# updating, _widget_surface_cache_need_update goes back again to False,
# thus, the state only is used once
self._widget_surface_cache_need_update = True
# Columns and rows
self._column_max_width_zero = []
for i in range(len(column_max_width)):
if column_max_width[i] == 0:
self._column_max_width_zero.append(True)
else:
self._column_max_width_zero.append(False)
self._column_max_width = column_max_width
self._column_min_width = column_min_width
self._column_pos_x = [] # Stores the center x position of each column
self._column_widths = []
self._columns = columns
self._max_row_column_elements = 0
self._rows = rows
self._used_columns = 0 # Total columns used in widget positioning
self._widget_columns = {}
self._widget_max_position = (0, 0)
self._widget_min_position = (0, 0)
for r in self._rows:
self._max_row_column_elements += r
# Position of Menu
self._position_default = position
self._position = (0, 0)
self._translate = (0, 0)
# Set the size
self.resize(
width=width,
height=height,
screen_dimension=screen_dimension
)
# Setups controller
self._ctrl = Controller()
# Init joystick
self._joystick = joystick_enabled
if self._joystick:
if not pygame.joystick.get_init():
pygame.joystick.init()
for i in range(pygame.joystick.get_count()):
pygame.joystick.Joystick(i).init()
self._joy_event = 0
self._joy_event_repeat = pygame.NUMEVENTS - 1
# Init keyboard
self._keyboard = keyboard_enabled
self._keyboard_ignore_nonphysical = keyboard_ignore_nonphysical
# Init mouse
if mouse_motion_selection:
assert mouse_enabled, \
'mouse motion selection cannot be enabled if mouse is disabled'
assert mouse_visible, \
'mouse motion cannot be enabled if mouse is not visible'
assert hasattr(pygame, 'MOUSEMOTION'), \
'pygame MOUSEMOTION does not exist, thus, mouse motion selection' \
' cannot be enabled'
self._mouse = mouse_enabled and mouse_visible
self._mouseover = False
self._mouse_motion_selection = mouse_motion_selection
self._mouse_visible = mouse_visible
self._mouse_visible_default = mouse_visible
# Init touchscreen
if touchscreen_motion_selection:
assert touchscreen, \
'touchscreen motion selection cannot be enabled if touchscreen is disabled'
self._touchscreen = touchscreen
self._touchscreen_motion_selection = touchscreen_motion_selection
# Create menubar (title)
self._menubar = MenuBar(
back_box=theme.title_close_button,
back_box_background_color=theme.title_close_button_background_color,
background_color=self._theme.title_background_color,
mode=self._theme.title_bar_style,
modify_scrollarea=self._theme.title_bar_modify_scrollarea,
offsetx=theme.title_offset[0],
offsety=theme.title_offset[1],
onreturn=self._back,
title=title,
width=self._width
)
self._menubar.set_menu(self)
self._menubar.set_font(
antialias=self._theme.title_font_antialias,
background_color=None,
color=self._theme.title_font_color,
font=self._theme.title_font,
font_size=self._theme.title_font_size,
readonly_color=self._theme.readonly_color,
readonly_selected_color=self._theme.readonly_selected_color,
selected_color=self._theme.title_font_color
)
self._menubar.set_cursor(self._theme.title_close_button_cursor)
self._menubar.set_font_shadow(
color=self._theme.title_font_shadow_color,
enabled=self._theme.title_font_shadow,
offset=self._theme.title_font_shadow_offset,
position=self._theme.title_font_shadow_position
)
self._menubar.set_controls(
joystick=self._joystick,
mouse=self._mouse,
touchscreen=self._touchscreen,
keyboard=self._keyboard
)
self._menubar.set_position(*self.get_position())
if self._theme.title_floating:
self._menubar.set_float()
if not self._theme.title:
self._menubar.hide()
self._menubar.configured = True
self._menubar.fixed = self._theme.title_fixed
# Scrolling area
menubar_height = self._menubar.get_height()
if self._height - menubar_height <= 0:
raise ValueError(f'menubar is higher than menu height ({menubar_height} > {self._height})')
extend_y = 0 if self._theme.title_fixed else menubar_height
self._scrollarea = ScrollArea(
area_color=self._theme.background_color,
area_height=self._height - extend_y,
area_width=self._width,
border_color=self._theme.border_color,
border_width=self._theme.border_width,
controls_joystick=self._joystick,
controls_keyboard=self._keyboard,
controls_mouse=self._mouse,
controls_touchscreen=self._touchscreen,
extend_y=extend_y,
menubar=self._menubar,
scrollbar_color=self._theme.scrollbar_color,
scrollbar_cursor=self._theme.scrollbar_cursor,
scrollbar_slider_color=self._theme.scrollbar_slider_color,
scrollbar_slider_hover_color=self._theme.scrollbar_slider_hover_color,
scrollbar_slider_pad=self._theme.scrollbar_slider_pad,
scrollbar_thick=self._theme.scrollbar_thick,
scrollbars=get_scrollbars_from_position(self._theme.scrollarea_position),
shadow=self._theme.scrollbar_shadow,
shadow_color=self._theme.scrollbar_shadow_color,
shadow_offset=self._theme.scrollbar_shadow_offset,
shadow_position=self._theme.scrollbar_shadow_position
)
self._scrollarea.set_menu(self)
self._scrollarea.set_position(*self.get_position())
self._overflow = tuple(overflow)
# Controls the behaviour of runtime errors
self._runtime_errors = _MenuRuntimeErrorConfig()
# Stores the last update
self._last_update_mode = []
# These can be changed without any major problem
self._disable_exit = False
self._disable_draw = False
self._disable_widget_update_mousepos_mouseselection = False
self._disable_update = False
self._validate_frame_widgetmove = True
def resize(
self,
width: NumberType,
height: NumberType,
screen_dimension: Optional[Vector2IntType] = None,
position: Optional[Union[Vector2NumberType, Tuple[NumberType, NumberType, bool]]] = None
) -> 'Menu':
"""
Resize the menu to another width/height
:param width: Menu width (px)
:param height: Menu height (px)
:param screen_dimension: List/Tuple representing the dimensions the Menu should reference for sizing/positioning (width, height), if ``None`` pygame is queried for the display mode. This value defines the ``window_size`` of the Menu
:param position: Position on x-axis and y-axis. If the value is only 2 elements, the position is relative to the window width (thus, values must be 0-100%); else, the third element defines if the position is relative or not. If ``(x, y, False)`` the values of ``(x, y)`` are in px. If ``None`` use the default from the menu constructor
:return: Self reference
"""
assert isinstance(width, NumberInstance)
assert isinstance(height, NumberInstance)
assert width > 0 and height > 0, \
'menu width and height must be greater than zero'
# Convert to int
width, height = int(width), int(height)
# Get window size if not given explicitly
if screen_dimension is not None:
assert_vector(screen_dimension, 2)
assert screen_dimension[0] > 0, 'screen width must be higher than zero'
assert screen_dimension[1] > 0, 'screen height must be higher than zero'
self._window_size = screen_dimension
else:
surface = pygame.display.get_surface()
if surface is None:
raise RuntimeError('pygame surface could not be retrieved, check '
'if pygame.display.set_mode() was called')
self._window_size = surface.get_size()
self._window_size = (int(self._window_size[0]), int(self._window_size[1]))
# Check menu sizing
window_width, window_height = self._window_size
assert width <= window_width and height <= window_height, \
f'menu size ({width}x{height}) must be lower or equal than the size of the ' \
f'window ({window_width}x{window_height})'
# Store width and height
self._height = height
self._width = width
# Compute widget offset
self._widget_offset = [self._theme.widget_offset[0], self._theme.widget_offset[1]]
if abs(self._widget_offset[0]) < 1:
self._widget_offset[0] *= self._width
if abs(self._widget_offset[1]) < 1:
self._widget_offset[1] *= self._height
# Cast to int offset
self._widget_offset[0] = int(self._widget_offset[0])
self._widget_offset[1] = int(self._widget_offset[1])
# If centering is enabled, but widget offset in the vertical is different
# from zero a warning is raised
if self._auto_centering and self._widget_offset[1] != 0:
warn(
f'menu is vertically centered (center_content=True), but widget '
f'offset (from theme) is different than zero ({self._widget_offset[1]}px). '
f'Auto-centering has been disabled'
)
self._auto_centering = False
# Scroll area outer margin
self._scrollarea_margin = [self._theme.scrollarea_outer_margin[0],
self._theme.scrollarea_outer_margin[1]]
if abs(self._scrollarea_margin[0]) < 1:
self._scrollarea_margin[0] *= self._width
if abs(self._scrollarea_margin[1]) < 1:
self._scrollarea_margin[1] *= self._height
self._scrollarea_margin[0] = int(self._scrollarea_margin[0])
self._scrollarea_margin[1] = int(self._scrollarea_margin[1])
# If centering is enabled, but ScrollArea margin in the vertical is
# different from zero a warning is raised
if self._auto_centering and self._scrollarea_margin[1] != 0:
warn(
f'menu is vertically centered (center_content=True), but '
f'ScrollArea outer margin (from theme) is different than zero '
f'({round(self._scrollarea_margin[1], 3)}px). Auto-centering has been disabled'
)
self._auto_centering = False
# Configure menubar
extend_y = 0
if hasattr(self, '_menubar'):
self._menubar._width = self._width
menubar_height = self._menubar.get_height()
if self._height - menubar_height <= 0:
raise ValueError(f'menubar is higher than menu height ({menubar_height} > {self._height})')
extend_y = 0 if self._theme.title_fixed else menubar_height
# Configure scrollbar
if hasattr(self, '_scrollarea'):
self._scrollarea.create_rect(self._width, self._height - extend_y)
# Update column max width
for i in range(len(self._column_max_width)):
if self._column_max_width_zero[i]:
self._column_max_width[i] = self._width
# Force the rendering
if self._widgets_surface is not None:
self._widgets_surface_need_update = True
# Update the menu position
if position is None:
position = self._position_default
else:
if len(position) == 3:
# noinspection PyTypeChecker
self._position_relative = position[2]
else:
self._position_relative = True
if self._position_relative:
self.set_relative_position(position[0], position[1])
else:
self.set_absolute_position(position[0], position[1])
return self
def __copy__(self) -> 'Menu':
"""
Copy method.
:return: Raises copy exception
"""
raise _MenuCopyException('Menu class cannot be copied')
def __deepcopy__(self, memodict: Dict) -> 'Menu':
"""
Deep-copy method.
:param memodict: Memo dict
:return: Raises copy exception
"""
raise _MenuCopyException('Menu class cannot be deep-copied')
def force_surface_update(self) -> 'Menu':
"""
Forces current Menu surface update after next rendering call.
.. note::
This method is expensive, as menu surface update forces re-rendering
of all widgets (because them can change in size, position, etc...).
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().update(...)``.
:return: Self reference
"""
self._current._widgets_surface_need_update = True
return self
def force_surface_cache_update(self) -> 'Menu':
"""
Forces current Menu surface cache to update after next drawing call.
.. note::
This method only updates the surface cache, without forcing re-rendering
of all Menu widgets.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().update(...)``.
:return: Self reference
"""
self._current._widget_surface_cache_need_update = True
self._current._decorator.force_cache_update()
return self
def set_onbeforeopen(
self,
onbeforeopen: Optional[Callable[['Menu', 'Menu'], Any]]
) -> 'Menu':
"""
Set ``onbeforeopen`` callback. Callback is executed before opening the
Menu, it receives the current Menu and the next Menu:
.. code-block:: python
onbeforeopen(current Menu <from>, next Menu <to>)
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param onbeforeopen: Onbeforeopen callback, it can be a function or None
:return: Self reference
"""
assert callable(onbeforeopen) or onbeforeopen is None, \
'onbeforeopen must be callable (function-type) or None'
self._onbeforeopen = onbeforeopen
return self
def set_onupdate(
self,
onupdate: Optional[Union[Callable[[EventListType, 'Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onupdate`` callback. Callback is executed before updating the Menu,
it receives the event list and the Menu reference; also, ``onupdate`` can
receive zero arguments:
.. code-block:: python
onupdate(event_list, menu) <or> onupdate()
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param onupdate: Onupdate callback, it can be a function or None
:return: Self reference
"""
assert callable(onupdate) or onupdate is None, \
'onupdate must be a callable (function-type) or None'
self._onupdate = onupdate
return self
def set_onclose(
self,
onclose: Optional[Union['_events.MenuAction', Callable[['Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onclose`` callback. Callback can only receive 1 argument maximum
(if not ``None``), if so, the Menu instance is provided:
.. code-block:: python
onclose(menu) <or> onclose()
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param onclose: Onclose callback, it can be a function, a pygame-menu event, or None
:return: Self reference
"""
assert callable(onclose) or _events.is_event(onclose) or onclose is None, \
'onclose must be a MenuAction (event), callable (function-type), or None'
if onclose == _events.NONE:
onclose = None
self._onclose = onclose
return self
def set_onreset(
self,
onreset: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onreset`` callback. Callback can only receive 1 argument maximum
(if not ``None``), if so, the Menu instance is provided:
.. code-block:: python
onreset(menu) <or> onreset()
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param onreset: Onreset callback, it can be a function or None
:return: Self reference
"""
assert callable(onreset) or onreset is None, \
'onreset must be a callable (function-type) or None'
self._onreset = onreset
return self
def set_onwindowmouseover(
self,
onwindowmouseover: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onwindowmouseover`` callback. This method is executed in
:py:meth:`pygame_menu.menu.Menu.update` method. The callback function
receives the following arguments:
.. code-block:: python
onwindowmouseover(menu) <or> onwindowmouseover()
:param onwindowmouseover: Callback executed if user enters the window with the mouse; it can be a function or None
:return: Self reference
"""
if onwindowmouseover is not None:
assert callable(onwindowmouseover), \
'onwindowmouseover must be callable (function-type) or None'
self._onwindowmouseover = onwindowmouseover
return self
def set_onwindowmouseleave(
self,
onwindowmouseleave: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onwindowmouseleave`` callback. This method is executed in
:py:meth:`pygame_menu.menu.Menu.update` method. The callback function
receives the following arguments:
.. code-block:: python
onwindowmouseleave(menu) <or> onwindowmouseleave()
:param onwindowmouseleave: Callback executed if user leaves the window with the mouse; it can be a function or None
:return: Self reference
"""
if onwindowmouseleave is not None:
assert callable(onwindowmouseleave), \
'onwindowmouseleave must be callable (function-type) or None'
self._onwindowmouseleave = onwindowmouseleave
return self
def set_onwidgetchange(
self,
onwidgetchange: Optional[Callable[['Menu', 'Widget'], Any]]
) -> 'Menu':
"""
Set ``onwidgetchange`` callback. This method is executed if any appended
widget changes its value. The callback function receives the following
arguments:
.. code-block:: python
onwidgetchange(menu, widget)
:param onwidgetchange: Callback executed if an appended widget changes its value
:return: Self reference
"""
if onwidgetchange is not None:
assert callable(onwidgetchange), \
'onwidgetchange must be callable (function-type) or None'
self._onwidgetchange = onwidgetchange
return self
def set_onmouseover(
self,
onmouseover: Optional[Union[Callable[['Menu', EventType], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onmouseover`` callback. This method is executed in
:py:meth:`pygame_menu.menu.Menu.update` method. The callback function
receives the following arguments:
.. code-block:: python
onmouseover(menu, event) <or> onmouseover()
:param onmouseover: Callback executed if user enters the Menu with the mouse; it can be a function or None
:return: Self reference
"""
if onmouseover is not None:
assert callable(onmouseover), \
'onmouseover must be callable (function-type) or None'
self._onmouseover = onmouseover
return self
def set_onmouseleave(
self,
onmouseleave: Optional[Union[Callable[['Menu', EventType], Any], CallableNoArgsType]]
) -> 'Menu':
"""
Set ``onmouseleave`` callback. This method is executed in
:py:meth:`pygame_menu.menu.Menu.update` method. The callback function
receives the following arguments:
.. code-block:: python
onmouseleave(menu, event) <or> onmouseleave()
:param onmouseleave: Callback executed if user leaves the Menu with the mouse; it can be a function or None
:return: Self reference
"""
if onmouseleave is not None:
assert callable(onmouseleave), \
'onmouseleave must be callable (function-type) or None'
self._onmouseleave = onmouseleave
return self
def get_current(self) -> 'Menu':
"""
Get the **current** active Menu. If the user has not opened any submenu the
pointer object must be the same as the base. If not, this will return the
opened Menu pointer.
:return: Menu object **(current)**
"""
return self._current
def translate(self, x: NumberType, y: NumberType) -> 'Menu':
"""
Translate to (+x, +y) according to the default position.
.. note::
To revert changes, only set to ``(0, 0)``.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param x: +X in px
:param y: +Y in px
"""
assert isinstance(x, NumberInstance)
assert isinstance(y, NumberInstance)
self._translate = (int(x), int(y))
self._widgets_surface = None
self._render()
return self
def get_translate(self) -> Tuple2IntType:
"""
Get Menu translate on x-axis and y-axis (x, y) in px.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Translation on both axis
"""
return self._translate
def get_position(self) -> Tuple2IntType:
"""
Return the menu position (constructor + translation).
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Position on x-axis and y-axis (x,y) in px
"""
return self._position[0] + self._translate[0], self._position[1] + self._translate[1]
def select_widget(self, widget: Optional[Union['Widget', str]]) -> 'Menu':
"""
Select a widget from the Menu. If ``None`` unselect the current one.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param widget: Widget to be selected or Widget ID. If ``None`` unselect the current
:return: Self reference
"""
if widget is None:
for w in self._widgets:
w.select(False)
self._index = -1
return self
if isinstance(widget, str):
widget = self.get_widget(widget)
assert isinstance(widget, Widget)
if not widget.is_selectable:
raise ValueError(f'{widget.get_class_id()} is not selectable')
if not widget.is_visible(): # Considers frame
raise ValueError(f'{widget.get_class_id()} is not visible')
try:
index = self._widgets.index(widget) # If not exists this raises ValueError
except ValueError:
raise ValueError(f'{widget.get_class_id()} is not in Menu, check if exists on the current '
f'with menu.get_current().remove_widget(widget)')
self._select(index, 1, SELECT_WIDGET, False)
self.force_surface_cache_update()
return self
def unselect_widget(self) -> 'Menu':
"""
Unselects the current widget.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
return self.select_widget(None)
def remove_widget(self, widget: Union['Widget', str]) -> 'Menu':
"""
Remove the ``widget`` from the Menu. If widget not exists on Menu this
method raises a ``ValueError`` exception.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param widget: Widget object or Widget ID
:return: Self reference
"""
if isinstance(widget, str):
widget = self.get_widget(widget)
assert isinstance(widget, Widget)
try:
index = self._widgets.index(widget) # If not exists this raises ValueError
except ValueError:
raise ValueError('widget is not in Menu, check if exists on the current '
'with menu.get_current().remove_widget(widget)')
self._widgets.pop(index)
self._update_after_remove_or_hidden(index) # Forces surface update
self._stats.removed_widgets += 1
# If widget is within a frame, remove from frame
frame = widget.get_frame()
if frame is not None:
frame.unpack(widget)
# If widget points to a hook, remove the submenu
# noinspection PyProtectedMember
menu_hook = widget._menu_hook
if menu_hook in self._submenus.keys():
self._remove_submenu(menu_hook, widget)
widget._menu_hook = None
widget.on_remove_from_menu()
# Removes Menu reference from widget. If Frame, it removes from _update_frames
widget.set_menu(None)
# Remove widget from update lists
if widget in self._update_widgets:
self._update_widgets.remove(widget)
check_widget_mouseleave()
return self
def get_sound(self) -> 'Sound':
"""
Return the Menu sound engine.
:return: Sound API
"""
return self._sound
def _update_after_remove_or_hidden(
self,
index: int,
update_surface: bool = True
) -> None:
"""
Update widgets after removal or hidden.
:param index: Removed index, if ``-1`` then select next index, if equal to ``self._index`` select the same
:param update_surface: Updates Menu surface
"""
# Check if there's more selectable widgets
n_select = 0
last_selectable = 0
for indx in range(len(self._widgets)):
wid = self._widgets[indx]
if wid.is_selectable and wid.is_visible(): # Considers frame
n_select += 1
last_selectable = indx
# Any widget is selected
if n_select == 0:
self._index = -1
# Select the unique selectable option
elif n_select == 1:
self._select(last_selectable, 0, SELECT_REMOVE, False)
# There is at least 1 option to select from
elif n_select > 1:
if index == -1: # Index was hidden
self._select(self._index + 1, 1, SELECT_REMOVE, False)
elif self._index > index: # If the selected widget was after this
self._select(self._index - 1, -1, SELECT_REMOVE, False)
else:
self._select(self._index, 1, SELECT_REMOVE, False)
self._update_widget_position()
if update_surface:
# If added on execution time forces the update of the surface
self._widgets_surface = None
def _back(self) -> None:
"""
Go to previous Menu or close if the top Menu is currently displayed.
"""
if self._top._prev is not None:
self.reset(1)
else:
self._close()
def _update_selection_if_hidden(self) -> None:
"""
Updates the Menu widget selection if a widget was hidden.
"""
if len(self._widgets) > 0:
if self._index != -1:
selected_widget = self._widgets[self._index % len(self._widgets)]
if not selected_widget.is_visible(): # Considers frame
selected_widget.select(False) # Unselect
self._update_after_remove_or_hidden(-1, update_surface=False)
else:
self._update_after_remove_or_hidden(0, update_surface=False)
def _update_widget_position(self) -> None:
"""
Update the position of each widget. Also checks widget consistency.
"""
# Column widgets
self._widget_columns = {}
for i in range(self._columns):
self._widget_columns[i] = []
# Set the column widths (minimum values), safe for certain widgets that
# request the width on rendering
self._column_widths = []
column_widths = [self._column_min_width[i] for i in range(self._columns)]
# Set column/row of each widget and compute maximum width of each column if None
self._used_columns = 0
max_elements_msg = \
f'total visible/non-floating widgets ([widg]) cannot exceed columns*rows' \
f'({self._max_row_column_elements} elements). Menu position update failed.' \
f' If using frames, please pack before adding new widgets'
i_index = 0
has_frame = False
# Checks for widget selection consistency
has_selected_widget = False
invalid_selection_widgets: List[str] = []
selected_widget = None
for index in range(len(self._widgets)):
widget = self._widgets[index]
# Check widget selection
if widget.is_selected():
if not has_selected_widget:
has_selected_widget = True
selected_widget = widget.get_class_id()
self._index = index
else:
widget.select(False)
invalid_selection_widgets.append(widget.get_class_id())
# If widget is frame
if isinstance(widget, Frame):
try:
widget.update_position()
except:
warn(f'{widget.get_class_id()} failed to update')
raise
has_frame = True
# If not visible, or within frame, continue to the next widget
if not widget.is_visible() or widget.get_frame() is not None:
widget.set_col_row_index(-1, -1, index)
continue
# Check if the maximum number of elements was reached, if so raise an exception
# If menu has frames, this check is disabled
if not has_frame and not i_index < self._max_row_column_elements:
raise _MenuWidgetOverflow(max_elements_msg.replace('[widg]', str(i_index)))
# Set the widget column/row position
row = i_index
col = 0
max_rows = 0
for col in range(self._columns): # Find which column it belongs to
max_rows += self._rows[col]
if i_index < max_rows:
break
row -= self._rows[col] # Subtract the number of rows of such column
# Important before getting widget width as some widgets require the
# column max width
widget.set_col_row_index(col, row, index)
self._widget_columns[col].append(widget)
# Update used columns
self._used_columns = max(self._used_columns, col + 1)
# Get the next widget; if it doesn't exist, use the same
next_widget = widget
if index < len(self._widgets) - 1:
next_widget = self._widgets[index + 1]
# If widget is floating don't update the next
if not (next_widget.is_floating() and next_widget.get_frame() is None):
i_index += 1
# If floating, don't contribute to the column width
else:
continue
column_widths[col] = max(
column_widths[col],
widget.get_width(apply_selection=True) # This forces rendering
)
if len(invalid_selection_widgets) > 0:
self._index = -1
raise _MenuMultipleSelectedWidgetsException(
f'several widgets are selected at the same time, current selected '
f'(sorted by index): {selected_widget}, but the following are also'
f' selected: {", ".join(invalid_selection_widgets)}. If widget is'
f' selected outside the menu, use widget.select(update_menu=True)'
)
# Apply max width column limit
for col in range(self._used_columns):
if self._column_max_width[col] is not None:
column_widths[col] = min(column_widths[col], self._column_max_width[col])
# If some columns were not used, set these widths to zero
for col in range(self._used_columns, self._columns):
column_widths.pop()
del self._widget_columns[col]
# If the total weight is less than the window width (so there's no horizontal
# scroll), scale the columns. Only None column_max_widths and columns less
# than the maximum are scaled
sum_width_columns = sum(column_widths)
max_width = self.get_width(inner=True)
if 0 <= sum_width_columns < max_width and len(self._widgets) > 0:
# First, scale columns to its maximum
sum_contrib = []
for col in range(self._used_columns):
if self._column_max_width[col] is None:
sum_contrib.append(0)
elif column_widths[col] < self._column_max_width[col]:
sum_contrib.append(self._column_max_width[col] - column_widths[col])
else:
sum_contrib.append(0)
delta = max_width - sum(sum_contrib) - sum_width_columns
if delta < 0: # Scale contrib back
scale = (max_width - sum_width_columns) / sum(sum_contrib)
sum_contrib = [sum_contrib[i] * scale for i in range(len(sum_contrib))]
# Increase to its maximums
for col in range(self._used_columns):
if sum_contrib[col] > 0:
column_widths[col] += sum_contrib[col]
# Scale column widths if None
sum_width_columns = sum(column_widths)
sum_contrib = []
for col in range(self._used_columns):
if self._column_max_width[col] is None:
sum_contrib.append(column_widths[col])
else:
sum_contrib.append(0)
delta = max_width - sum_width_columns
if delta > 0:
for col in range(self._used_columns):
if sum_contrib[col] > 0:
column_widths[col] += delta * sum_contrib[col] / sum(sum_contrib)
# Re-compute sum
sum_width_columns = sum(column_widths)
# If column width still 0, set all the column the same width (only used)
# This only can happen if column_min_width was not set
if sum_width_columns < max_width and self._used_columns >= 1:
# The width it would be added for each column
mod_width = max_width # Available left width for non-max columns
non_max = self._used_columns
# First fill all maximum width columns
for col in range(self._used_columns):
if self._column_max_width[col] is not None:
column_widths[col] = min(self._column_max_width[col],
max_width / self._used_columns)
mod_width -= column_widths[col]
non_max -= 1
# Now, update the rest (non-maximum set)
if non_max > 0:
for col in range(self._used_columns):
if self._column_max_width[col] is None:
column_widths[col] = mod_width / non_max
# Cast to int
for col in range(self._used_columns):
column_widths[col] = int(math.ceil(column_widths[col]))
# Final column width
total_col_width = sum(column_widths)
if self._used_columns > 1:
# Calculate column width scale (weights)
column_weights = tuple(
float(column_widths[i]) / max(total_col_width, 1) for i in range(self._used_columns))
# Calculate the position of each column
self._column_pos_x = []
cumulative = 0
for i in range(self._used_columns):
w = column_weights[i]
self._column_pos_x.append(int(total_col_width * (cumulative + 0.5 * w)))
cumulative += w
else:
self._column_pos_x = [total_col_width * 0.5]
column_widths = [total_col_width]
# Now updates the column width's
self._column_widths = column_widths
# Update title position
self._menubar.set_position(*self.get_position())
# Widget max/min position
min_max_updated = False
max_x, max_y = -1e8, -1e8
min_x, min_y = 1e8, 1e8
# Cache rects
rects_cache: Dict[str, 'pygame.Rect'] = {}
def get_rect(wid: 'Widget') -> 'pygame.Rect':
"""
Get rect cache from widget.
:param wid: Widget
:return: Rect cache
"""
try:
return rects_cache[wid.get_id()]
except KeyError:
rects_cache[wid.get_id()] = wid.get_rect(render=True)
return rects_cache[wid.get_id()]
# Get menubar height, if fixed then move all widgets within area
menubar_height = self._menubar.get_height() if self._menubar.fixed else 0
# Update appended widgets
for index in range(len(self._widgets)):
widget = self._widgets[index]
align = widget.get_alignment()
margin = widget.get_margin()
padding = widget.get_padding()
selection_effect_margin = widget.get_selection_effect().get_margin()
width = get_rect(widget).width
if not widget.is_visible():
widget.set_position(0, 0)
continue
# If widget within frame update col/row position
if widget.get_frame() is not None:
# noinspection PyProtectedMember
widget._set_position_relative_to_frame(index)
continue
# Get column and row position
col, row, _ = widget.get_col_row_index()
# Calculate X position
column_width = self._column_widths[col]
selection_margin = 0
dx = 0
sm_left, sm_right = selection_effect_margin[1], selection_effect_margin[3]
if align == ALIGN_CENTER:
dx = -(width + sm_right - sm_left) / 2
elif align == ALIGN_LEFT:
selection_margin = sm_left
dx = -column_width / 2 + selection_margin
elif align == ALIGN_RIGHT:
selection_margin = sm_right
dx = column_width / 2 - width - selection_margin
d_border = int(math.ceil(widget.get_border()[1] / 2))
# self._column_pos_x points at the middle of each column
x_coord = self._column_pos_x[col] + dx + margin[0] + padding[3]
x_coord = max(selection_margin, x_coord)
x_coord += max(0, self._widget_offset[0]) + d_border
# Check if widget width exceeds column max width
max_column_width = self._column_max_width[col]
if max_column_width is not None and width > max_column_width:
raise _MenuSizingException(
f'{widget.get_class_id()} widget width ({width}) exceeds column {col + 1} max width ({max_column_width})'
)
# Calculate Y position
y_sum = 1 # Compute the total height from the current row position to the top of the column
for r_widget in self._widget_columns[col]:
_, r, _ = r_widget.get_col_row_index()
if r >= row:
break
if r_widget.is_visible() and \
not r_widget.is_floating() and \
not r_widget.get_frame() is not None:
y_sum += get_rect(r_widget).height # Height
y_sum += r_widget.get_margin()[1] # Vertical margin (bottom)
# If no widget is before add the selection effect
y_sel_h = r_widget.get_selection_effect().get_margin()[0]
if r == 0 and self._widget_offset[1] <= y_sel_h:
if r_widget.is_selectable:
y_sum += y_sel_h - self._widget_offset[1]
# If the widget offset is zero, then add the selection effect to the height
# of the widget to avoid visual glitches
y_sel_h = widget.get_selection_effect().get_margin()[0]
if y_sum == 1 and self._widget_offset[1] <= y_sel_h: # No widget is before
if widget.is_selectable: # Add top margin
y_sum += y_sel_h - self._widget_offset[1]
y_coord = max(0, self._widget_offset[1]) + y_sum + padding[0] + menubar_height
# If the widget is floating and has origin-position
# noinspection PyProtectedMember
if widget.is_floating() and widget._floating_origin_position:
widget.set_position(
x=max(0, self._widget_offset[0]) + padding[3],
y=menubar_height + padding[0] + d_border)
continue
# Update the position of the widget
widget.set_position(x_coord, y_coord)
# Add the widget translation to the widget for computing the min/max position. This
# feature does not work as intended as there's edge cases not covered, and centering makes
# the translation more difficult
# tx, ty = widget.get_translate()
tx, ty = 0, 0
# Update max/min position, minus padding
min_max_updated = True
max_x = max(max_x, x_coord + width - padding[1] + tx + sm_right) # minus right padding
max_y = max(max_y, y_coord + get_rect(widget).height - padding[2] + ty) # minus bottom padding
min_x = min(min_x, x_coord - padding[3] - sm_left)
min_y = min(min_y, y_coord - padding[0])
# Update position
if min_max_updated:
self._widget_max_position = (max_x, max_y)
self._widget_min_position = (min_x, min_y)
else:
self._widget_max_position = (0, 0)
self._widget_min_position = (0, 0)
self._stats.position_update += 1
def _build_widget_surface(self) -> None:
"""
Create the surface used to draw widgets according the required width and
height.
"""
self._stats.build_surface += 1
t0 = time.time()
# Update internals
self._update_selection_if_hidden()
self._update_widget_position()
menubar_height = self._menubar.get_height() if not self._menubar.fixed else 0
max_x, max_y = self._widget_max_position
# Get scrollbars size
sx, sy = self._get_scrollbar_thickness()
# Remove the thick of the scrollbar to avoid displaying a horizontal one
# If overflow on both axis
if max_x > self._width - sy and max_y > self._height - sx - menubar_height:
width, height = max_x, max_y
if not self._mouse_visible:
self._mouse_visible = True
# If horizontal overflow
elif max_x > self._width - sy:
width, height = max_x, self._height - menubar_height - sx
self._mouse_visible = self._mouse_visible_default
# If vertical overflow
elif max_y > self._height - sx - menubar_height:
width, height = self._width - sy, max_y
if not self._mouse_visible:
self._mouse_visible = True
# No overflow
else:
width, height = self._width, self._height - menubar_height
self._mouse_visible = self._mouse_visible_default
# Checks overflow
if not self._overflow[0]:
width = self._width
if not self._overflow[1]:
height = self._height - menubar_height
# Adds ScrollArea margin
width += self._scrollarea_margin[0]
height += self._scrollarea_margin[1]
# Cast to int
width = int(width)
height = int(height)
# Get the previous surface if the width/height is the same
if width == self._widgets_surface_last[0] and \
height == self._widgets_surface_last[1]:
self._widgets_surface = self._widgets_surface_last[2]
else:
self._widgets_surface = make_surface(width, height)
self._widgets_surface_last = (width, height, self._widgets_surface)
# Set position
self._scrollarea.set_world(self._widgets_surface)
self._scrollarea.set_position(*self.get_position())
# Check if the scrollbars changed
sx, sy = self._get_scrollbar_thickness()
if (sx, sy) != self._last_scroll_thickness[0] and \
self._last_scroll_thickness[1] == 0:
self._last_scroll_thickness[0] = (sx, sy)
self._last_scroll_thickness[1] += 1
self._widgets_surface_need_update = True
self._render()
else:
self._last_scroll_thickness[1] = 0
# Update times
dt = time.time() - t0
self._stats.total_building_time += dt
self._stats.last_build_surface_time = dt
def _check_id_duplicated(self, widget_id: str) -> None:
"""
Check if widget ID is duplicated. Throws ``IndexError`` if the index is
duplicated.
:param widget_id: New widget ID
"""
assert isinstance(widget_id, str)
for widget in self._widgets:
if widget.get_id() == widget_id:
raise IndexError(
f'widget id "{widget_id}" already exists on the current menu ({widget.get_class_id()})'
)
def _close(self) -> bool:
"""
Execute close callbacks and disable the Menu, only if ``onclose`` is not
None (or :py:mod:`pygame_menu.events.NONE`).
:return: ``True`` if the Menu has executed the ``onclose`` callback
"""
onclose = self._onclose
# Apply action
if onclose is None or onclose == _events.NONE:
return False
else:
# Closing disables the Menu
self.disable()
# If action is an event
if _events.is_event(onclose):
# Sort through events
if onclose == _events.BACK:
self.reset(1)
elif onclose == _events.CLOSE:
pass
elif onclose == _events.EXIT:
self._exit()
elif onclose == _events.RESET:
self.full_reset()
# If action is callable (function)
elif callable(onclose):
try:
onclose(self)
except TypeError:
onclose()
return True
def close(self) -> bool:
"""
Closes the **current** Menu firing ``onclose`` callback. If ``callback=None``
this method does nothing.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().reset(...)``.
:return: ``True`` if the Menu has executed the ``onclose`` callback
"""
if not self.is_enabled():
self._current._runtime_errors.throw(
self._current._runtime_errors.close, 'menu already closed'
)
return self._current._close()
def _get_depth(self) -> int:
"""
Return the Menu depth.
:return: Menu depth
"""
prev = self._top._prev
depth = 0
if prev is not None:
while True:
if prev is not None:
prev = prev[0]
depth += 1
else:
break
return depth
def disable(self) -> 'Menu':
"""
Disables the Menu *(doesn't check events and draw on the surface)*.
.. note::
This method does not fire ``onclose`` callback. Use ``Menu.close()``
instead.
:return: Self reference
"""
check_widget_mouseleave(force=True)
self._top._enabled = False
return self
def set_absolute_position(self, position_x: NumberType, position_y: NumberType) -> 'Menu':
"""
Set the absolute Menu position.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param position_x: Left position of the window
:param position_y: Top position of the window
:return: Self reference
"""
assert isinstance(position_x, NumberInstance)
assert isinstance(position_y, NumberInstance)
self._position = (position_x, position_y)
self._widgets_surface = None # This forces an update of the widgets
return self
def set_relative_position(self, position_x: NumberType, position_y: NumberType) -> 'Menu':
"""
Set the Menu position relative to the window.
.. note::
- Menu left position (x) must be between ``0`` and ``100``, if ``0``
the margin is at the left of the window, if ``100`` the Menu is at
the right of the window.
- Menu top position (y) must be between ``0`` and ``100``, if ``0``
the margin is at the top of the window, if ``100`` the margin is at
the bottom of the window.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param position_x: Left position of the window
:param position_y: Top position of the window
:return: Self reference
"""
assert isinstance(position_x, NumberInstance)
assert isinstance(position_y, NumberInstance)
assert 0 <= position_x <= 100
assert 0 <= position_y <= 100
position_x = float(position_x) / 100
position_y = float(position_y) / 100
window_width, window_height = self._window_size
self._position = (int((window_width - self._width) * position_x),
int((window_height - self._height) * position_y))
self._widgets_surface = None # This forces an update of the widgets
return self
def center_content(self) -> 'Menu':
"""
Centers the content of the Menu vertically. This action rewrites ``widget_offset``.
.. note::
If the height of the widgets is greater than the height of the Menu,
the drawing region will cover all Menu inner surface.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
self._stats.center_content += 1
if len(self._widgets) == 0: # If this happens, get_widget_max returns an immense value
self._widget_offset[1] = 0
return self
if self._widgets_surface is None:
self._update_widget_position() # For position (max/min)
available = self.get_height(inner=True)
widget_height = self.get_height(widget=True)
if widget_height >= available: # There's nothing to center
if self._widget_offset[1] != 0:
self._widgets_surface = None
self._widget_offset[1] = 0
return self
new_offset = int(max(float(available - widget_height) / 2, 0))
if abs(new_offset - self._widget_offset[1]) > 1:
self._widget_offset[1] = new_offset
self._widgets_surface = None # Rebuild on the next draw
return self
def _get_scrollbar_thickness(self) -> Tuple2IntType:
"""
Return the scrollbar thickness from x-axis and y-axis (horizontal and vertical).
:return: Scrollbar thickness in px
"""
return self._scrollarea.get_scrollbar_thickness(ORIENTATION_HORIZONTAL), \
self._scrollarea.get_scrollbar_thickness(ORIENTATION_VERTICAL)
def get_width(self, inner: bool = False, widget: bool = False, border: bool = False) -> int:
"""
Get the Menu width.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param inner: If ``True`` returns the available width (menu width minus scroll if visible)
:param widget: If ``True`` returns the total width used by the widgets
:param border: If ``True`` add the mmenu border width. Only applied if both ``inner`` and ``widget`` are ``False``
:return: Width in px
"""
if widget:
return int(self._widget_max_position[0] - self._widget_min_position[0])
if not inner:
bw = 0 if not border else 2 * self._scrollarea.get_border_size()[0]
return int(self._width) + bw
return int(self._width - self._get_scrollbar_thickness()[1])
def get_height(self, inner: bool = False, widget: bool = False, border: bool = False) -> int:
"""
Get the Menu height.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param inner: If ``True`` returns the available height (menu height minus scroll and menubar)
:param widget: If ``True`` returns the total height used by the widgets
:param border: If ``True`` add the menu border height. Only applied if both ``inner`` and ``widget`` are ``False``
:return: Height in px
"""
if widget:
return int(self._widget_max_position[1] - self._widget_min_position[1])
if not inner:
bh = 0 if not border else 2 * self._scrollarea.get_border_size()[1]
return int(self._height) + bh
return int(self._height - self._menubar.get_height() - self._get_scrollbar_thickness()[0])
def get_size(self, inner: bool = False, widget: bool = False, border: bool = False) -> Vector2IntType:
"""
Return the Menu size as a tuple of (width, height) in px.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param inner: If ``True`` returns the available size (width, height) (menu height minus scroll and menubar)
:param widget: If ``True`` returns the total (width, height) used by the widgets
:param border: If ``True`` add the border size to the dimensions (width, height). Only applied if both ``inner`` and ``widget`` are ``False``
:return: Tuple of (width, height) in px
"""
return self.get_width(inner=inner, widget=widget, border=border), \
self.get_height(inner=inner, widget=widget, border=border)
def render(self) -> 'Menu':
"""
Force the **current** Menu to render. Useful to force widget update.
.. note::
This method should not be called if the Menu is being drawn as this
method is called by :py:meth:`pygame_menu.menu.Menu.draw`
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().render(...)``
:return: Self reference **(current)**
"""
self._current._widgets_surface = None
self._current._render()
self._current._stats.render_public += 1
return self
def _render(self) -> bool:
"""
Menu rendering.
:return: ``True`` if the surface has changed (if it was ``None``)
"""
t0 = time.time()
changed = False
if self._widgets_surface_need_update:
self._widgets_surface = None
if self._widgets_surface is None:
self._widgets_surface_need_update = False
if self._auto_centering:
self.center_content()
self._build_widget_surface()
self._stats.render_private += 1
changed = True
self._stats.total_rendering_time += time.time() - t0
return changed
def draw(self, surface: 'pygame.Surface', clear_surface: bool = False) -> 'Menu':
"""
Draw the **current** Menu into the given surface.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().draw(...)``
:param surface: Pygame surface to draw the Menu
:param clear_surface: Clear surface using theme ``surface_clear_color``
:return: Self reference **(current)**
"""
assert isinstance(surface, pygame.Surface)
assert isinstance(clear_surface, bool)
if not self.is_enabled():
self._current._runtime_errors.throw(self._current._runtime_errors.draw, 'menu is not enabled')
return self._current
if self._current._disable_draw:
return self._current
# Render menu; if True, the surface widget has changed, thus cache should
# change if enabled
render = self._current._render()
# Updates title
if self._current._theme.title_updates_pygame_display and \
pygame.display.get_caption()[0] != self._current.get_title():
pygame.display.set_caption(self._current.get_title())
# Clear surface
if clear_surface:
surface.fill(self._current._theme.surface_clear_color)
# Call background function (set from mainloop)
if self._top._background_function[1] is not None:
if self._top._background_function[0]:
self._top._background_function[1](self._current)
else:
self._top._background_function[1]()
# Draw the prev decorator
self._current._decorator.draw_prev(surface)
# Draw widgets, update cache if enabled
if not self._current._widget_surface_cache_enabled or \
(render or self._current._widget_surface_cache_need_update):
# This should be updated before drawing widgets. As widget
# draw may trigger surface cache updating. Don't move this
# line or unexpected errors may occur
self._current._widget_surface_cache_need_update = False
# Fill the scrolling surface (clear previous state)
self._current._widgets_surface.fill((255, 255, 255, 0))
# Call scrollarea draw decorator. This must be done before filling the
# surface. ScrollArea post decorator is drawn on _scroll.draw(surface) call
scrollarea_decorator = self._current._scrollarea.get_decorator()
scrollarea_decorator.force_cache_update()
scrollarea_decorator.draw_prev(self._current._widgets_surface)
# Iterate through widgets and draw them
selected_widget_draw: Tuple[Optional['Widget'], Optional['pygame.Surface']] = (None, None)
for widget in self._current._widgets:
# Widgets within frames are not drawn as it's frame draw these widgets
if widget.get_frame() is not None:
continue
if widget.is_selected():
selected_widget_draw = widget, self._current._widgets_surface
widget.draw(self._current._widgets_surface)
if isinstance(widget, Frame):
f_selected_widget = widget.selected_widget_draw
if f_selected_widget[0] is not None:
selected_widget_draw = f_selected_widget
if selected_widget_draw[0] is not None:
selected_widget_draw[0].draw_after_if_selected(selected_widget_draw[1])
self._current._stats.draw_update_cached += 1
self._current._scrollarea.draw(surface)
self._current._menubar.draw(surface)
# Draw focus on selected if the widget is active
self._current._draw_focus_widget(surface, self._current.get_selected_widget())
self._current._decorator.draw_post(surface)
self._current._stats.draw += 1
# Update cursor if not mainloop
if self._current._mainloop:
check_widget_mouseleave()
return self._current
def _draw_focus_widget(
self,
surface: 'pygame.Surface',
widget: Optional['Widget'],
force: bool = False
) -> Optional[Dict[int, Tuple4Tuple2IntType]]:
"""
Draw the focus background from a given widget. Widget must be selectable,
active, selected. Not all widgets requests the active status, then focus
may not be drawn.
:param surface: Pygame surface to draw the Menu
:param widget: Focused widget
:param force: If ``True`` forces focus without any checks
:return: The focus region, ``None`` if the focus could not be possible
"""
assert isinstance(surface, pygame.Surface)
assert isinstance(widget, (Widget, type(None)))
force = force or (widget is not None and widget.active and widget.force_menu_draw_focus)
if not force and (widget is None
or not widget.active
or not widget.is_selectable
or not widget.is_selected()
or not (self._mouse_motion_selection or self._touchscreen_motion_selection)
or not widget.is_visible()):
return
window_width, window_height = self._window_size
self._render() # Surface may be none, then update the positioning
rect = widget.get_focus_rect()
# Apply selection effect
rect = widget.get_selection_effect().inflate(rect)
if rect.width == 0 or rect.height == 0:
return
x1, y1, x2, y2 = rect.topleft + rect.bottomright
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
coords = {}
if abs(y1 - y2) <= 4 or abs(x1 - x2) <= 4:
# If the area of the selected widget is too small, draw focus over the entire menu
# .------------------.
# | |
# | 1 |
# | |
# .------------------.
coords[1] = (0, 0), (window_width, 0), (window_width, window_height), (0, window_height)
else:
# Draw 4 areas:
# .------------------.
# |________1_________|
# | 2 |******| 3 |
# |_____|******|_____|
# | 4 |
# .------------------.
coords[1] = (0, 0), (window_width, 0), (window_width, y1 - 1), (0, y1 - 1)
coords[2] = (0, y1), (x1 - 1, y1), (x1 - 1, y2 - 1), (0, y2 - 1)
coords[3] = (x2, y1), (window_width, y1), (window_width, y2 - 1), (x2, y2 - 1)
coords[4] = (0, y2), (window_width, y2), (window_width, window_height), (0, window_height)
for area in coords:
gfxdraw.filled_polygon(surface, coords[area], self._theme.focus_background_color)
return coords
def set_controller(self, controller: 'Controller') -> 'Menu':
"""
Set a new controller object.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param controller: Controller
:return: Self reference
"""
self._ctrl = controller
return self
def enable(self) -> 'Menu':
"""
Enables Menu (can check events and draw).
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
self._top._enabled = True
return self
def toggle(self) -> 'Menu':
"""
Switch between enable/disable Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
self._top._enabled = not self._top._enabled
return self
def _exit(self) -> None:
"""
Internal exit function.
"""
if self._disable_exit:
return
self.disable()
pygame.quit()
try:
sys.exit(0)
except SystemExit:
# noinspection PyUnresolvedReferences,PyProtectedMember
os._exit(1)
# This should be unreachable
exit(0)
def is_enabled(self) -> bool:
"""
Return ``True`` if the Menu is enabled.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Menu enabled status
"""
return self._top._enabled
def _sort_update_frames(self) -> None:
"""
Sort the update frames (frames which receive updates).
"""
if len(self._update_frames) <= 1:
return
# Sort frames by depth
widgets: List[Tuple[int, 'Frame']] = []
for w in self._update_frames:
assert isinstance(w, Frame)
widgets.append((-w.get_frame_depth(), w))
widgets.sort(key=lambda x: x[0])
# Sort frames with same depth by index
frame_depths: Dict[int, List[Tuple[int, 'Frame']]] = {}
for w in widgets:
w_depth = w[0]
if w_depth not in frame_depths.keys():
frame_depths[w_depth] = []
if w[1] in self._widgets:
frame_depths[w_depth].append((self._widgets.index(w[1]), w[1]))
else:
frame_depths[w_depth].append((0, w[1]))
self._update_frames = []
for d in frame_depths.keys():
frame_depths[d].sort(key=lambda x: x[0])
for w in frame_depths[d]:
self._update_frames.append(w[1])
def _move_selected_left_right(self, pos: int, apply_sound: bool = False) -> bool:
"""
Move the selected widget index to left/right position (column support).
:param pos: If ``+1`` selects right column, ``-1`` left column
:param apply_sound: Apply sound on widget selection
:return: ``True`` if the widget changed
"""
if not (pos == 1 or pos == -1):
raise ValueError('pos must be +1 or -1')
def _default() -> bool:
if pos == -1:
return self._select(0, 1, SELECT_KEY, apply_sound)
return self._select(-1, -1, SELECT_KEY, apply_sound)
if self._used_columns > 1:
# Get current widget
sel_widget = self.get_selected_widget()
# No widget is selected
if sel_widget is None:
return _default()
# Get column row position
col, row, _ = sel_widget.get_col_row_index()
# Move column to position
col = (col + pos) % self._used_columns
# Get the first similar row in that column, if no widget is found
# then select the first widget
for widget in self._widget_columns[col]:
c, r, i = widget.get_col_row_index()
if r == row:
return self._select(i, pos, SELECT_KEY, apply_sound)
# If no widget is in that column
if len(self._widget_columns[col]) == 0:
return _default()
# If the number of rows in that column is less than current,
# select the first one
first_widget = self._widget_columns[col][0]
_, _, i = first_widget.get_col_row_index()
return self._select(i, pos, SELECT_KEY, apply_sound)
else:
return _default()
def _handle_joy_event(self, apply_sound: bool = False) -> bool:
"""
Handle joy events.
:param apply_sound: Apply sound on widget selection
:return: ``True`` if widget changed
"""
if self._joy_event & JOY_EVENT_UP:
return self._select(self._index - 1, -1, SELECT_KEY, apply_sound)
if self._joy_event & JOY_EVENT_DOWN:
return self._select(self._index + 1, 1, SELECT_KEY, apply_sound)
if self._joy_event & JOY_EVENT_LEFT:
return self._move_selected_left_right(-1, apply_sound)
if self._joy_event & JOY_EVENT_RIGHT:
return self._move_selected_left_right(1, apply_sound)
def _up(self, apply_sound: bool = False) -> bool:
"""
Process up key event.
:param apply_sound: Apply selection sound
:return: ``True`` if widget selected
"""
if not apply_sound:
self._sound.play_key_add()
return self._select(self._index + 1, 1, SELECT_KEY, apply_sound)
def _down(self, apply_sound: bool = False) -> bool:
"""
Process down key event.
:param apply_sound: Apply selection sound
:return: ``True`` if widget selected
"""
if not apply_sound:
self._sound.play_key_add()
return self._select(self._index - 1, -1, SELECT_KEY, apply_sound)
def _left(self, apply_sound: bool = False) -> bool:
"""
Process left key event.
:param apply_sound: Apply selection sound
:return: ``True`` if widget selected
"""
if not apply_sound:
self._sound.play_key_add()
# Get frame properties
selected_widget = self.get_selected_widget()
selected_widget_in_frame_horizontal = selected_widget is not None and \
selected_widget.get_frame() is not None and \
selected_widget.get_frame().horizontal
selected_widget_first_in_frame = selected_widget_in_frame_horizontal and \
selected_widget.get_frame().first_index == self._index
# If current selected in within a horizontal frame
if selected_widget_in_frame_horizontal and not selected_widget_first_in_frame:
return self._current._select(self._current._index - 1, -1, SELECT_KEY, False)
elif self._current._used_columns > 1:
return self._current._move_selected_left_right(-1)
return False
def _right(self, apply_sound: bool = False) -> bool:
"""
Process left key event.
:param apply_sound: Apply selection sound
:return: ``True`` if widget selected
"""
if not apply_sound:
self._sound.play_key_add()
# Get frame properties
selected_widget = self.get_selected_widget()
selected_in_frame_horizontal = selected_widget is not None and \
selected_widget.get_frame() is not None and \
selected_widget.get_frame().horizontal
selected_last_in_frame = selected_in_frame_horizontal and \
selected_widget.get_frame().last_index == self._current._index
# If current selected in within a horizontal frame
if selected_in_frame_horizontal and not selected_last_in_frame:
return self._current._select(self._current._index + 1, 1, SELECT_KEY, False)
elif self._current._used_columns > 1:
return self._current._move_selected_left_right(1)
return False
def get_last_update_mode(self) -> List[str]:
"""
Return the update mode.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().update(...)``.
:return: Returns a string that represents the update status, see ``pygame_menu.events``. Some also indicate which widget updated in the format ``EVENT_NAME#widget_id``
"""
if len(self._current._last_update_mode) == 0:
return [_events.MENU_LAST_NONE]
return self._current._last_update_mode
def update(self, events: EventVectorType) -> bool:
"""
Update the status of the Menu using external events. The update event is
applied only on the **current** Menu.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().update(...)``.
:param events: List of pygame events
:return: ``True`` if the menu updated (or a widget)
"""
# Check events
assert isinstance(events, list)
self._current._last_update_mode = []
# If menu is not enabled
if not self.is_enabled():
self._current._runtime_errors.throw(self._current._runtime_errors.update,
'menu is not enabled')
self._current._stats.update += 1
# Call onupdate callback
if self._current._onupdate is not None:
try:
self._current._onupdate(events, self._current)
except TypeError:
self._current._onupdate()
if self._current._disable_update:
self._current._last_update_mode.append(_events.MENU_LAST_DISABLE_UPDATE)
return False
# If any widget status changes, set the status as True
updated = False
# Update mouse
pygame.mouse.set_visible(self._current._mouse_visible)
mouse_motion_event = None
selected_widget = self._current.get_selected_widget()
selected_widget_disable_frame_update = \
(False if selected_widget is None else selected_widget.active) and \
self._current._mouse_motion_selection or \
selected_widget is not None and selected_widget.active and \
selected_widget.force_menu_draw_focus
selected_widget_scrollarea = None if selected_widget is None else selected_widget.get_scrollarea()
# First, check update frames
frames_updated = False
if not selected_widget_disable_frame_update:
for frame in self._current._update_frames:
frames_updated = frames_updated or frame.update(events)
# Update widgets on update list
for widget in self._current._update_widgets:
widget.update(events)
# Frames have updated
if frames_updated:
self._current._last_update_mode.append(_events.MENU_LAST_FRAMES)
updated = True
# Update scroll bars
elif not selected_widget_disable_frame_update and self._current._scrollarea.update(events):
self._current._last_update_mode.append(_events.MENU_LAST_SCROLL_AREA)
updated = True
# Update the menubar, it may change the status of the widget because
# of the button back/close
elif self._current._menubar.update(events):
self._current._last_update_mode.append(_events.MENU_LAST_MENUBAR)
updated = True
# Check selected widget
elif selected_widget is not None and self._current._widget_selected_update and \
selected_widget.update(events):
self._current._last_update_mode.append(
f'{_events.MENU_LAST_SELECTED_WIDGET_EVENT}#{selected_widget.get_id()}'
)
updated = True
# Check others
else:
# If mouse motion enabled, add the current mouse position to the events list
if self._current._mouse and self._current._mouse_motion_selection:
events.append(mouse_motion_current_mouse_position())
for event in events:
# User closes window
close_altf4 = event.type == pygame.KEYDOWN and event.key == pygame.K_F4 and (
event.mod == pygame.KMOD_LALT or event.mod == pygame.KMOD_RALT)
if event.type == _events.PYGAME_QUIT or close_altf4 or event.type == _events.PYGAME_WINDOWCLOSE:
self._current._last_update_mode.append(_events.MENU_LAST_QUIT)
self._current._exit()
return True
# User press key
elif event.type == pygame.KEYDOWN and self._current._keyboard:
# Check key event is valid
if self._keyboard_ignore_nonphysical and not check_key_pressed_valid(event):
continue
if self._ctrl.move_down(event, self):
if self._current._down(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_DOWN)
updated = True
break
elif self._ctrl.move_up(event, self):
if self._current._up(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_UP)
updated = True
break
elif self._ctrl.left(event, self):
if self._current._left(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_LEFT)
updated = True
break
elif self._ctrl.right(event, self):
if self._current._right(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_RIGHT)
updated = True
break
elif self._ctrl.back(event, self) and self._top._prev is not None:
self._current._sound.play_close_menu()
self.reset(1) # public, do not use _current
self._current._last_update_mode.append(_events.MENU_LAST_MENU_BACK)
updated = True
elif self._ctrl.close_menu(event, self):
self._current._sound.play_close_menu()
if self._current._close():
self._current._last_update_mode.append(_events.MENU_LAST_MENU_CLOSE)
updated = True
# User moves hat joystick
elif event.type == pygame.JOYHATMOTION and self._current._joystick:
if self._ctrl.joy_up(event, self):
if self._current._down(apply_sound=True):
self._current._last_update_mode.append(_events.MENU_LAST_MOVE_DOWN)
updated = True
break
elif self._ctrl.joy_down(event, self):
if self._current._up(apply_sound=True):
self._current._last_update_mode = _events.MENU_LAST_MOVE_UP
updated = True
break
elif self._ctrl.joy_left(event, self):
if self._current._left(apply_sound=True):
self._current._last_update_mode = _events.MENU_LAST_MOVE_LEFT
updated = True
break
elif self._ctrl.joy_right(event, self):
if self._current._right(apply_sound=True):
self._current._last_update_mode = _events.MENU_LAST_MOVE_RIGHT
updated = True
break
# User moves joy axis motion
elif event.type == pygame.JOYAXISMOTION and self._current._joystick and \
hasattr(event, 'axis'):
prev = self._current._joy_event
self._current._joy_event = 0
if self._ctrl.joy_axis_y_up(event, self):
self._current._joy_event |= JOY_EVENT_UP
elif self._ctrl.joy_axis_y_down(event, self):
self._current._joy_event |= JOY_EVENT_DOWN
elif self._ctrl.joy_axis_x_left(event, self) and self._current._used_columns > 1:
self._current._joy_event |= JOY_EVENT_LEFT
elif self._ctrl.joy_axis_x_right(event, self) and self._current._used_columns > 1:
self._current._joy_event |= JOY_EVENT_RIGHT
if self._current._joy_event:
sel = self._current._handle_joy_event(True)
if self._current._joy_event == prev:
pygame.time.set_timer(self._current._joy_event_repeat, self._ctrl.joy_repeat)
else:
pygame.time.set_timer(self._current._joy_event_repeat, self._ctrl.joy_delay)
if sel:
self._current._last_update_mode.append(_events.MENU_LAST_JOY_REPEAT)
updated = True
break
else:
pygame.time.set_timer(self._current._joy_event_repeat, 0)
# User repeats previous joy event input
elif event.type == self._current._joy_event_repeat:
if self._current._joy_event:
sel = self._current._handle_joy_event(True)
pygame.time.set_timer(self._current._joy_event_repeat, self._ctrl.joy_repeat)
if sel:
self._current._last_update_mode.append(_events.MENU_LAST_JOY_REPEAT)
updated = True
break
else:
pygame.time.set_timer(self._current._joy_event_repeat, 0)
# Select widget by clicking
elif event.type == pygame.MOUSEBUTTONDOWN and self._current._mouse and \
event.button in (1, 2, 3): # Don't consider the mouse wheel (button 4 & 5)
# If the mouse motion selection is disabled then select a widget by clicking
if not self._current._mouse_motion_selection:
sel = False
for index in range(len(self._current._widgets)):
widget = self._current._widgets[index]
if isinstance(widget, Frame): # Frame does not accept click
continue
if widget.is_selectable and widget.is_visible() and \
widget.get_scrollarea().collide(widget, event):
sel = self._current._select(index, 1, SELECT_MOUSE_BUTTON_DOWN, True)
break
if sel:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_SELECT}#{self._current.get_selected_widget().get_id()}'
)
updated = True
break
# If mouse motion selection, clicking will disable the active state
# only if the user clicked outside the widget
else:
if selected_widget is not None and selected_widget.active:
focus_rect = selected_widget.get_focus_rect()
if not selected_widget_scrollarea.collide(focus_rect, event):
selected_widget.active = False
selected_widget.render() # Some widgets need to be rendered
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_DISABLE_ACTIVE_STATE}#{selected_widget.get_id()}'
)
updated = True
break
# Mouse enters or leaves the window
elif event.type == pygame.ACTIVEEVENT and hasattr(event, 'gain'):
if event.gain == 1: # Enter
if self._current._onwindowmouseover is not None:
try:
self._current._onwindowmouseover(self._current)
except TypeError:
self._current._onwindowmouseover()
check_widget_mouseleave()
self._current._last_update_mode.append(_events.MENU_LAST_MOUSE_ENTER_WINDOW)
else: # Leave
if self._current._onwindowmouseleave is not None:
try:
self._current._onwindowmouseleave(self._current)
except TypeError:
self._current._onwindowmouseleave()
if self._current._mouseover:
self._current._mouseover = False
if self._current._onmouseleave is not None:
try:
self._current._onmouseleave(self._current, event)
except TypeError:
self._current._onmouseleave()
check_widget_mouseleave(force=True)
self._current._last_update_mode.append(_events.MENU_LAST_MOUSE_LEAVE_WINDOW)
# Mouse motion. It changes the cursor of the mouse if enabled
elif event.type == pygame.MOUSEMOTION and self._current._mouse:
mouse_motion_event = event
# Check if mouse over menu
if not self._current._mouseover:
if self._current.collide(event):
self._current._mouseover = True
if self._current._onmouseover is not None:
try:
self._current._onmouseover(self._current, event)
except TypeError:
self._current._onmouseover()
self._current._last_update_mode.append(_events.MENU_LAST_MOUSE_ENTER_MENU)
else:
if not self._current.collide(event):
self._current._mouseover = False
if self._current._onmouseleave is not None:
try:
self._current._onmouseleave(self._current, event)
except TypeError:
self._current._onmouseleave()
mouse_motion_event = None
check_widget_mouseleave(force=True)
self._current._last_update_mode.append(_events.MENU_LAST_MOUSE_LEAVE_MENU)
# If selected widget is active then motion should not select
# or change mouseover widget
if self._current._mouse_motion_selection and \
selected_widget is not None and selected_widget.active:
continue
# Check if "rel" exists within the event
if not hasattr(event, 'rel'):
continue
# Select if mouse motion
sel = False # Widget has been selected
for index in range(len(self._current._widgets)):
widget = self._current._widgets[index]
if widget.is_visible() and widget.get_scrollarea().collide(widget, event):
if self._current._mouse_motion_selection and \
widget.is_selectable and \
not isinstance(widget, Frame):
sel = self._current._select(index, 1, SELECT_MOUSE_MOTION, True)
# noinspection PyProtectedMember
widget._check_mouseover(event)
if sel:
break
if sel:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_SELECT_MOTION}#{self._current.get_selected_widget().get_id()}'
)
updated = True
break
# Mouse events in selected widget; don't consider the mouse wheel (button 4 & 5)
elif event.type == pygame.MOUSEBUTTONUP and self._current._mouse and \
selected_widget is not None and event.button in (1, 2, 3):
self._current._sound.play_click_mouse()
if selected_widget_scrollarea.collide(selected_widget, event):
updated = selected_widget.update([event])
if updated:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_SELECTED_WIDGET_BUTTON_UP}#{selected_widget.get_id()}'
)
break
# Touchscreen event:
elif event.type == FINGERDOWN and self._current._touchscreen:
# If the touchscreen motion selection is disabled then select
# a widget by clicking
if not self._current._touchscreen_motion_selection:
sel = False
for index in range(len(self._current._widgets)):
widget = self._current._widgets[index]
if isinstance(widget, Frame): # Frame does not accept touch
continue
if widget.is_selectable and widget.is_visible() and \
widget.get_scrollarea().collide(widget, event):
sel = self._current._select(index, 1, SELECT_TOUCH, True)
if not isinstance(widget, Frame):
break
if sel:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_SELECT}#{self._current.get_selected_widget().get_id()}'
)
updated = True
break
# If touchscreen motion selection, clicking will disable the
# active state only if the user clicked outside the widget
else:
if selected_widget is not None and selected_widget.active:
if not selected_widget_scrollarea.collide(selected_widget, event):
selected_widget.active = False
selected_widget.render() # Some widgets need to be rendered
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_DISABLE_ACTIVE_STATE}#{selected_widget.get_id()}'
)
updated = True
break
# Touchscreen events in selected widget
elif event.type == FINGERUP and self._current._touchscreen and \
selected_widget is not None:
self._current._sound.play_click_touch()
if selected_widget_scrollarea.collide(selected_widget, event):
updated = selected_widget.update([event])
if updated:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_SELECTED_WIDGET_FINGER_UP}#{selected_widget.get_id()}'
)
break
# Select widgets by touchscreen motion, this is valid only if the
# current selected widget is not active and the pointed widget is
# selectable
elif event.type == FINGERMOTION and self._current._touchscreen_motion_selection:
# If selected widget is active then motion should not select
# any widget
if selected_widget is not None and selected_widget.active:
continue
sel = False
for index in range(len(self._current._widgets)):
widget = self._current._widgets[index]
if isinstance(widget, Frame): # Frame does not accept touch
continue
if widget.is_selectable and widget.is_visible() and \
widget.get_scrollarea().collide(widget, event):
sel = self._current._select(index, 1, SELECT_TOUCH, True)
if not isinstance(widget, Frame):
break
if sel:
self._current._last_update_mode.append(
f'{_events.MENU_LAST_WIDGET_SELECT_MOTION}#{self._current.get_selected_widget().get_id()}'
)
updated = True
break
if mouse_motion_event is not None:
check_widget_mouseleave(event=mouse_motion_event)
# If cache is enabled, always force a rendering (user may have changed any status)
if self._current._widget_surface_cache_enabled and updated:
self._current._widget_surface_cache_need_update = True
# A widget has closed the Menu
if not self.is_enabled():
updated = True
return updated
def collide(self, event: EventType) -> bool:
"""
Check if user event collides the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param event: Pygame event
:return: ``True`` if collide
"""
return bool(self.get_rect().collidepoint(*get_finger_pos(self, event)))
def mainloop(
self,
surface: 'pygame.Surface',
bgfun: Optional[Union[Callable[['Menu'], Any], CallableNoArgsType]] = None,
**kwargs
) -> 'Menu':
"""
Main loop of the **current** Menu. In this function, the Menu handle
exceptions and draw. The Menu pauses the application and checks :py:mod:`pygame`
events itself.
This method returns until the Menu is updated (a widget status has changed).
The execution of the mainloop is at the current Menu level.
.. code-block:: python
menu = pygame_menu.Menu(...)
menu.mainloop(surface)
The ``bgfun`` callable (if not None) can receive 1 argument maximum, if so,
the Menu instance is provided:
.. code-block:: python
draw(...):
bgfun(menu) <or> bgfun()
Finally, mainloop can be disabled externally if menu.disable() is called.
kwargs (Optional)
- ``clear_surface`` (bool) – If ``True`` surface is cleared using ``theme.surface_clear_color``
- ``disable_loop`` (bool) – If ``True`` the mainloop only runs once. Use for running draw and update in a single call
- ``fps_limit`` (int) – Maximum FPS of the loop. Default equals to ``theme.fps``. If ``0`` there's no limit
- ``wait_for_event`` (bool) – Holds the loop until an event is provided, useful to save CPU power
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().mainloop(...)``.
:param surface: Pygame surface to draw the Menu
:param bgfun: Background function called on each loop iteration before drawing the Menu
:param kwargs: Optional keyword arguments
:return: Self reference **(current)**
"""
# Unpack kwargs
clear_surface = kwargs.get('clear_surface', True)
disable_loop = kwargs.get('disable_loop', False)
fps_limit = kwargs.get('fps_limit', self._theme.fps)
wait_for_event = kwargs.get('wait_for_event', False)
assert isinstance(clear_surface, bool)
assert isinstance(disable_loop, bool)
assert isinstance(fps_limit, NumberInstance)
assert isinstance(surface, pygame.Surface)
assert isinstance(wait_for_event, bool)
assert fps_limit >= 0, 'fps limit cannot be negative'
# NOTE: For Menu accessor, use only _current, as the Menu pointer can
# change through the execution
if not self.is_enabled():
self._current._runtime_errors.throw(
self._current._runtime_errors.mainloop, 'menu is not enabled'
)
return self._current
# Check background function
bgfun_accept_menu = False
if bgfun:
assert callable(bgfun), \
'background function must be callable (function-type) object'
try:
bgfun(self._current)
bgfun_accept_menu = True
except TypeError:
pass
self._current._background_function = (bgfun_accept_menu, bgfun)
# Change state
self._current._mainloop = True
# Force rendering before loop
self._current._widgets_surface = None
# Start loop
while True:
self._current._stats.loop += 1
self._current._clock.tick(fps_limit)
# Draw the menu
self.draw(surface=surface, clear_surface=clear_surface)
# Gather events by Menu
if wait_for_event:
self.update([pygame.event.wait()])
if (not wait_for_event or pygame.event.peek()) and self.is_enabled():
self.update(pygame.event.get())
# Flip contents to screen
pygame.display.flip()
# Menu closed or disabled
if not self.is_enabled() or disable_loop:
self._current._mainloop = False
check_widget_mouseleave(force=True)
return self._current
def get_input_data(self, recursive: bool = False) -> Dict[str, Any]:
"""
Return input data from a Menu. The results are given as a dict object.
The keys are the ID of each element.
With ``recursive=True`` it collects also data inside the all sub-menus.
.. note::
This is applied only to the base Menu (not the currently displayed),
for such behaviour apply to :py:meth:`pygame_menu.menu.Menu.get_current` object.
:param recursive: Look in Menu and sub-menus
:return: Input dict e.g.: ``{'id1': value, 'id2': value, ...}``
"""
assert isinstance(recursive, bool)
return self._get_input_data(recursive, depth=0)
def _get_input_data(self, recursive: bool, depth: int) -> Dict[str, Any]:
"""
Return input data from a Menu. The results are given as a dict object.
The keys are the ID of each element.
With ``recursive=True``: it collects also data inside the all sub-menus.
:param recursive: Look in Menu and sub-menus
:param depth: Depth of the input data
:return: Input dict e.g.: ``{'id1': value, 'id2': value, ...}``
"""
data = {}
for widget in self._widgets:
try:
data[widget.get_id()] = widget.get_value()
except ValueError: # Widget does not return data
pass
if recursive:
depth += 1
for menu in self._submenus.keys():
# noinspection PyProtectedMember
data_submenu = menu._get_input_data(recursive=recursive, depth=depth)
# Check if there is a collision between keys
data_keys = data.keys()
sub_data_keys = data_submenu.keys()
for key in sub_data_keys:
if key in data_keys:
raise ValueError(f'collision between widget data ID="{key}" at depth={depth}')
# Update data
data.update(data_submenu)
return data
def get_rect(self) -> 'pygame.Rect':
"""
Return the :py:class:`pygame.Rect` object of the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Rect
"""
x, y = self.get_position()
return pygame.Rect(x, y, int(self._width), int(self._height))
def set_sound(self, sound: Optional['Sound'], recursive: bool = False) -> 'Menu':
"""
Add a sound engine to the Menu. If ``recursive=True``, the sound is
applied to all submenus.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param sound: Sound object
:param recursive: Set the sound engine to all submenus
:return: Self reference
"""
assert isinstance(sound, (type(self._sound), type(None))), \
'sound must be pygame_menu.Sound type or None'
if sound is None:
sound = Sound()
self._sound = sound
for widget in self._widgets:
widget.set_sound(sound)
if recursive:
for menu in self._submenus.keys():
menu.set_sound(sound, recursive=True)
return self
def get_title(self) -> str:
"""
Return the title of the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Menu title
"""
return self._menubar.get_title()
def set_title(self, title: Any, offset: Optional[Vector2NumberType] = None) -> 'Menu':
"""
Set the title of the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param title: New menu title
:param offset: If ``None`` uses theme offset, else it defines the title offset on x-axis and y-axis (x, y)
:return: Self reference
"""
if offset is None:
offset = self._theme.title_offset
else:
assert_vector(offset, 2)
self._menubar.set_title(title, offsetx=offset[0], offsety=offset[1])
return self
def full_reset(self) -> 'Menu':
"""
Reset the Menu back to the first opened Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
depth = self._get_depth()
if depth > 0:
self.reset(depth)
return self
def clear(self, reset: bool = True) -> 'Menu':
"""
Clears all widgets.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param reset: If ``True`` the menu full-resets
:return: Self reference
"""
if reset:
self.full_reset()
for w in self._widgets.copy():
self.remove_widget(w)
del self._widgets[:]
del self._submenus
self._submenus = {}
self._index = -1
self._stats.clear += 1
self._render()
return self
def _open(self, menu: 'Menu') -> None:
"""
Open the given Menu.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().reset(...)``.
:param menu: Menu object
"""
current = self
# Update pointers
menu._top = self._top
self._top._current = menu._current
self._top._prev = [self._top._prev, current]
# Call event
if menu._onbeforeopen is not None:
menu._onbeforeopen(current, menu)
# Select the first widget
self._current._select(0, 1, SELECT_OPEN, False, update_mouse_position=False)
# Re-render menu
check_widget_mouseleave(force=True)
self._render()
def reset(self, total: int) -> 'Menu':
"""
Go back in Menu history a certain number of times from the **current** Menu.
This method operates through the **current** Menu pointer.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().reset(...)``.
:param total: How many menus to go back
:return: Self reference **(current)**
"""
assert isinstance(total, int)
assert total > 0, 'total must be greater than zero'
i = 0
if self._top._prev is not None:
while True:
if self._top._prev is not None:
prev = self._top._prev
self._top._current = prev[1] # This changes the "current" pointer
self._top._prev = prev[0] # Eventually will reach None
i += 1
if i == total:
break
else:
break
# Execute onreset callback
if self._current._onreset is not None:
try:
self._current._onreset(self._current)
except TypeError:
self._current._onreset()
self._current._widgets_surface = None
check_widget_mouseleave(force=True)
self._current._select(self._top._current._index, 1, SELECT_RESET, False,
update_mouse_position=False)
self._current._stats.reset += 1
return self._current
def _select(
self,
new_index: int,
dwidget: int,
select_type: str,
apply_sound: bool,
**kwargs
) -> bool:
"""
Select the widget at the given index and unselect others. Selection forces
rendering of the widget. Also play widget selection sound. This is applied
to the base Menu pointer.
kwargs (Optional)
- ``last_index`` (int) – Last index in recursive call on Frames
- ``update_mouse_position`` (bool) – Update mouse position
:param new_index: Widget index
:param dwidget: Direction to search if ``new_index`` widget is non-selectable
:param select_type: Select type identifier
:param apply_sound: Apply widget sound if selected
:param kwargs: Optional keyword arguments
:return: ``True`` if the widget changed
"""
self._stats.select += 1
self._last_selected_type = select_type
if len(self._widgets) == 0:
return False
# This stores +/-1 if the index increases or decreases, used by non-selectable selection
if dwidget == 0:
if new_index < self._index:
dwidget = -1
else:
dwidget = 1
# Limit the index to the length
new_index %= len(self._widgets)
# Get both widgets
if self._index >= len(self._widgets): # Menu length changed during execution time
for i in range(len(self._widgets)): # Unselect all possible candidates
self._widgets[i].select(False)
self._index = 0
old_widget = self._widgets[self._index]
new_widget = self._widgets[new_index]
if old_widget == new_widget and self._index != -1 and old_widget.is_selected():
return False
# If new widget is not selectable or visible
if not new_widget.is_selectable or not new_widget.is_visible():
# If it is a frame, select the first selectable object
if isinstance(new_widget, Frame):
if dwidget == 1:
min_index = new_widget.first_index
else:
min_index = new_widget.last_index
current_frame = self._widgets[self._index].get_frame()
same_frame = current_frame is not None and current_frame == new_widget # Ignore cycles
# Check if recursive but same index as before
last_index = kwargs.get('last_index', -1)
if select_type == SELECT_RECURSIVE and last_index == min_index:
min_index += 2 * dwidget
# A selectable widget has been found within frame
if min_index != -1 and not same_frame and min_index != self._index:
kwargs['last_index'] = new_index
return self._select(min_index, dwidget, SELECT_RECURSIVE,
apply_sound, **kwargs)
# There's at least 1 selectable option
if self._index >= 0:
kwargs['last_index'] = new_index
return self._select(new_index + dwidget, dwidget, SELECT_RECURSIVE,
apply_sound, **kwargs)
# No selectable options, quit
else:
return False
# Selecting widgets forces rendering
old_widget.select(False)
self._index = new_index # Update selected index
new_widget.select()
self.scroll_to_widget(new_widget)
# Play widget selection sound
if old_widget != new_widget and apply_sound:
self._sound.play_widget_selection()
# Update mouse position if selected using keys
if select_type in (SELECT_KEY, SELECT_RECURSIVE) and \
self._mouse_motion_selection and \
not self._disable_widget_update_mousepos_mouseselection and \
not new_widget.is_floating() and \
self._mouseover and \
kwargs.get('update_mouse_position', True):
pygame.mouse.set_pos(new_widget.get_rect(to_real_position=True).center)
return True
def scroll_to_widget(
self,
widget: Optional['Widget'],
scroll_parent: bool = True
) -> 'Menu':
"""
Scroll the Menu to the given widget.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param widget: Widget to request scroll. If ``None`` scrolls to the selected widget
:param scroll_parent: If ``True`` parent scroll also scrolls to rect
:return: Self reference
"""
if widget is None:
widget = self.get_selected_widget()
if widget is None: # No widget is selected, scroll to top
self.get_scrollarea().scroll_to(ORIENTATION_VERTICAL, 0)
self.get_scrollarea().scroll_to(ORIENTATION_HORIZONTAL, 0)
return self
assert isinstance(widget, Widget), \
'widget to scroll from must be a Widget class, not None'
widget_scroll = widget.get_scrollarea()
if widget_scroll is None:
warn(f'{widget.get_class_id()} scrollarea is None, thus, scroll to widget cannot be performed')
return self
# Scroll to rect
rect = widget.get_rect()
widget_frame = widget.get_frame()
widget_border = widget.get_border()[1]
# Compute margin depending on widget position
_, ry = widget_scroll.get_widget_position_relative_to_view_rect(widget)
mx = 0
my = 0
if ry < 0.15 and self._menubar.fixed:
my = -self._menubar.get_height() - widget_border
# Call scroll parent container
if widget_frame is not None and widget_frame.is_scrollable:
widget_frame.scroll_to_widget((mx, my), scroll_parent)
# The first set the scrolls
widget_scroll.scroll_to_rect(rect, (mx, my), scroll_parent)
# The latter updates to active object
widget_scroll.scroll_to_rect(rect, (mx, my), scroll_parent)
return self
def get_window_size(self) -> Tuple2IntType:
"""
Return the window size as a tuple of (width, height).
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Window size in px
"""
return self._window_size
def get_submenus(self, recursive: bool = False) -> Tuple['Menu', ...]:
"""
Return the Menu submenus as a tuple.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param recursive: If ``True`` return all submenus in a recursive fashion
:return: Submenus tuple
"""
assert isinstance(recursive, bool)
if not recursive:
return tuple(self._submenus.keys())
sm = list(self._submenus.keys())
for m in self._submenus:
m_sm = m.get_submenus(recursive=recursive)
for i in m_sm:
if i not in sm:
sm.append(i)
return tuple(sm)
def get_menubar(self) -> 'MenuBar':
"""
Return menubar widget.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: MenuBar widget
"""
return self._menubar
def get_scrollarea(self) -> 'ScrollArea':
"""
Return the Menu ScrollArea.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: ScrollArea object
"""
return self._scrollarea
def get_widget(
self,
widget_id: str,
recursive: bool = False
) -> Optional['Widget']:
"""
Return a widget by a given ID from the Menu.
With ``recursive=True`` it looks for a widget in the Menu and all sub-menus.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
.. note::
``None`` is returned if no widget is found.
:param widget_id: Widget ID
:param recursive: Look in Menu and submenus
:return: Widget object
"""
assert isinstance(widget_id, str)
assert isinstance(recursive, bool)
for widget in self._widgets:
if widget.get_id() == widget_id:
return widget
if recursive:
for menu in self._submenus.keys():
widget = menu.get_widget(widget_id, recursive)
if widget:
return widget
return None
def get_widgets_column(self, col: int) -> Tuple['Widget', ...]:
"""
Return all the widgets within column which are visible.
:param col: Column number (start from zero)
:return: Widget list
"""
return tuple(self._widget_columns[col])
def get_widgets(self, ids: Optional[Union[List[str], Tuple[str, ...]]] = None) -> Tuple['Widget', ...]:
"""
Return the Menu widgets as a tuple.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param ids: Widget id list. If ``None``, return all the widgets, otherwise, return the widgets from that list
:return: Widgets tuple
"""
if not ids:
return tuple(self._widgets)
widgets = []
for i in ids:
widgets.append(self.get_widget(i, recursive=True))
return tuple(widgets)
def reset_value(self, recursive: bool = False) -> 'Menu':
"""
Reset all widget values to default.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param recursive: Set value recursively
:return: Self reference
"""
for widget in self._widgets:
widget.reset_value()
if recursive:
for sm in self._submenus.keys():
sm.reset_value(recursive)
return self
def in_submenu(self, menu: 'Menu', recursive: bool = False) -> bool:
"""
Return ``True`` if ``menu`` is a submenu of the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param menu: Menu to check
:param recursive: Check recursively
:return: ``True`` if ``menu`` is in the submenus
"""
if menu in self._submenus.keys():
return True
if recursive:
for sm in self._submenus.keys():
if sm.in_submenu(menu, recursive):
return True
return False
def _remove_submenu(
self,
menu: 'Menu',
hook: 'Widget',
recursive: bool = False
) -> bool:
"""
Removes Menu from submenu if ``menu`` is a submenu of the Menu.
:param menu: Menu to remove
:param hook: Widget associated with the menu
:param recursive: Check recursively
:return: ``True`` if ``menu`` was removed
"""
assert isinstance(menu, Menu)
assert isinstance(hook, Widget)
if menu in self._submenus.keys():
# Remove hook if in list
if hook in self._submenus[menu]:
self._submenus[menu].remove(hook)
hook._menu_hook = None
# If total hooks are empty, remove the menu
if len(self._submenus[menu]) == 0:
del self._submenus[menu]
self._update_after_remove_or_hidden(self._index)
return True
if recursive:
for sm in self._submenus:
if sm._remove_submenu(menu, hook, recursive):
return True
return False
def get_theme(self) -> 'Theme':
"""
Return the Menu theme.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
.. warning::
Use with caution, changing the theme may affect other menus or
widgets if not properly copied.
:return: Menu theme
"""
return self._theme
def get_clock(self) -> 'pygame.time.Clock':
"""
Return the pygame Menu timer.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Pygame clock object
"""
return self._clock
def get_index(self) -> int:
"""
Get selected widget index from the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Selected widget index
"""
return self._index
def get_mouseover_widget(self, filter_appended: bool = True) -> Optional['Widget']:
"""
Return the mouseover widget on the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param filter_appended: If ``True`` return the widget only if it's appended to the base Menu
:return: Widget object, ``None`` if no widget is mouseover
"""
widget = WIDGET_MOUSEOVER[0]
if widget is None or filter_appended and widget.get_menu() != self:
return
return widget
def get_selected_widget(self) -> Optional['Widget']:
"""
Return the selected widget on the Menu.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Widget object, ``None`` if no widget is selected
"""
if not isinstance(self._index, int):
self._index = 0
return None
if self._index < 0:
return None
try:
return self._widgets[self._index % len(self._widgets)]
except (IndexError, ZeroDivisionError):
return None
def get_decorator(self) -> 'Decorator':
"""
Return the Menu decorator API.
.. note::
``prev`` menu decorator may not draw because :py:class:`pygame_menu.widgets.MenuBar`
and :py:class:`pygame_menu._scrollarea.ScrollArea` objects draw over
it. If it's desired to draw a decorator behind widgets, use the ScrollArea
decorator, for example: :py:data:`menu.get_scrollarea().get_decorator()`.
The menu drawing order is:
1. Menu background color/image
2. Menu ``prev`` decorator
3. Menu ScrollArea ``prev`` decorator
4. Menu ScrollArea widgets
5. Menu ScrollArea ``post`` decorator
6. Menu title
7. Menu ``post`` decorator
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Decorator API
"""
return self._decorator
def _test_widgets_status(self) -> Tuple[Tuple[Any, ...], ...]:
"""
Get the status of each widget as a tuple (position, indices, values, etc.).
:return: Widget status
"""
self.render()
data = []
for w in self._widgets:
# noinspection PyProtectedMember
data.append(w._get_status())
return tuple(data)
# noinspection PyProtectedMember
def move_widget_index(
self,
widget: Optional['Widget'],
index: Optional[Union['Widget', int]] = None,
render: bool = True,
**kwargs
) -> Optional[Tuple2IntType]:
"""
Move a given widget to a certain index. ``index`` can be another widget,
a numerical position, or ``None``; if ``None`` the widget is pushed to
the last widget list position.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param widget: Widget to move. If ``None`` the widgets are flipped or reversed and returns ``None``
:param index: Target index. It can be a widget, a numerical index, or ``None``; if ``None`` the widget is pushed to the last position
:param render: Force menu rendering after update
:param kwargs: Optional keyword arguments
:return: The new indices of the widget and the previous index element
"""
depth = kwargs.get('depth', 0)
# Update only selected index
if kwargs.get('update_selected_index', False):
self._index = -1
has_selected = False
invalid_w: List[str] = []
selected = None
for w in self._widgets:
if w.is_selected():
if not has_selected:
self._select(self._widgets.index(w), 1, SELECT_MOVE, False)
has_selected = True
selected = w.get_class_id()
else:
w.select(False)
invalid_w.append(w.get_class_id())
if len(invalid_w) > 0:
raise _MenuMultipleSelectedWidgetsException(
f'several widgets are selected at the same time, current '
f'selected (sorted by index): {selected}, but the following '
f'are also selected: {", ".join(invalid_w)}'
)
return
selected_widget = self.get_selected_widget()
# Reverse widgets
if widget is None:
new_widgets = []
lw = len(self._widgets)
j_limit = -1 # Last position containing non frame
for i in range(lw):
j = lw - 1 - i
if self._widgets[j].get_frame() is None:
new_widgets.append(self._widgets[j])
if j_limit != -1:
for k in range(j + 1, j_limit + 1):
new_widgets.append(self._widgets[k])
j_limit = -1
else:
if j_limit == -1:
j_limit = j
if j_limit != -1:
for k in range(j_limit):
new_widgets.append(self._widgets[k])
self._widgets = new_widgets
if selected_widget is not None:
selected_widget.select(False)
self._select(self._widgets.index(selected_widget), 1, SELECT_MOVE, False)
if len(self._update_frames) > 0:
self._update_frames[0]._sort_menu_update_frames()
if render:
self._widgets_surface = None
self._render()
check_widget_mouseleave()
return
# Asserts
assert len(self._widgets) >= 2, \
'menu must contain at least 2 widgets to perform this task'
try:
widget_index = self._widgets.index(widget)
except ValueError:
raise ValueError(f'{widget.get_class_id()} widget is not on widgets list')
assert widget in self._widgets, \
f'{widget.get_class_id()} does not exist on current menu widgets list'
assert isinstance(index, (Widget, int, type(None)))
if isinstance(index, Widget):
assert index in self._widgets, \
f'{index.get_class_id()} does not exist on current menu widgets list'
index = self._widgets.index(index)
elif isinstance(index, int):
assert 0 <= index < len(self._widgets), \
f'index {index} must be between 0 and the number of widgets ({len(self._widgets)})'
elif index is None:
index = len(self._widgets) - 1
else:
raise ValueError('index must be a widget, int, or None')
assert widget_index != index, \
f'target index must be different than the current widget index ({index})'
target_index = index
target_widget = self._widgets[target_index]
# If target widget is frame, find the latest index
both_frames = isinstance(target_widget, Frame) and isinstance(widget, Frame)
check_if_last = both_frames and self._validate_frame_widgetmove and target_index != 0
if check_if_last:
w_last = target_widget
while True:
target_index = w_last.last_index
w_last = self._widgets[w_last.last_index]
target_widget = w_last
if not (isinstance(w_last, Frame) and w_last.get_indices() != (-1, -1)) or \
w_last.get_menu() is None:
break
to_last_position = target_index == len(self._widgets) - 1
if not to_last_position and check_if_last:
target_index = index
target_widget = self._widgets[target_index]
if both_frames and self._validate_frame_widgetmove and \
not kwargs.get('swap_search', False):
return self.move_widget_index(
target_widget, widget, render=render, swap_search=True, depth=depth + 1
)
# Check both widgets are within frame if widget to move is frame
if self._validate_frame_widgetmove and not to_last_position and not both_frames:
assert widget.get_frame() == target_widget.get_frame(), \
'both widgets must be within same frame'
self._widgets.pop(widget_index)
self._widgets.insert(target_index, widget)
new_widget_index = self._widgets.index(widget)
assert new_widget_index != widget_index, 'widget index has not changed'
assert widget != target_widget, 'widget must be different than target'
# If frame is moved, move all sub-elements
if self._validate_frame_widgetmove:
if isinstance(widget, Frame):
self._validate_frame_widgetmove = False
for w in widget.get_widgets(unpack_subframes_include_frame=True,
reverse=not to_last_position):
if w.get_menu() is None:
continue
if not to_last_position:
self.move_widget_index(
w, self._widgets.index(widget) + 1, render=False, depth=depth + 1
)
else:
self.move_widget_index(w, render=False, depth=depth + 1)
self._validate_frame_widgetmove = True
# Sort frame widget list
if widget.get_frame() is not None:
prev_frame_widgs = widget.get_frame().get_widgets(unpack_subframes=False)
# Get none-menu widgets for ordering
none_menu_widgs: Dict[Optional['Widget'], List['Widget']] = {}
prev_wig: Optional['Widget'] = None
for i in range(len(prev_frame_widgs)):
if prev_frame_widgs[i].get_menu() is None:
if prev_wig not in none_menu_widgs.keys():
none_menu_widgs[prev_wig] = []
none_menu_widgs[prev_wig].append(prev_frame_widgs[i])
else:
prev_wig = prev_frame_widgs[i]
for i in none_menu_widgs.keys():
none_menu_widgs[i].reverse()
# Get all widgets within given frame
new_list = []
for w in self._widgets:
if w.get_frame() == widget.get_frame():
new_list.append(w)
# Create new list considering non-menu widgets
new_list_non_menu = []
if None in none_menu_widgs.keys():
for w in none_menu_widgs[None]:
new_list_non_menu.append(w)
for w in new_list:
new_list_non_menu.append(w)
if w in none_menu_widgs.keys():
for ww in none_menu_widgs[w]:
new_list_non_menu.append(ww)
# Make dict and update frame widgets dict
new_dict = {}
for w in new_list_non_menu:
new_dict[w.get_id()] = w
widget.get_frame()._widgets = new_dict
# Update selected widget
if selected_widget is not None and selected_widget.is_selectable and \
self._validate_frame_widgetmove:
self._index = -1
selected_widget.select(False)
self._select(self._widgets.index(selected_widget), 1, SELECT_MOVE, False)
if render:
self._widgets_surface = None
self._render()
if self._validate_frame_widgetmove:
if isinstance(widget, Frame) or isinstance(target_widget, Frame):
if isinstance(widget, Frame):
widget._sort_menu_update_frames()
else:
target_widget._sort_menu_update_frames()
check_widget_mouseleave()
return new_widget_index, target_index
def _test_print_widgets(self) -> None:
"""
Test printing widgets order.
"""
print_menu_widget_structure(self._widgets, self._index)
def _copy_theme(self) -> None:
"""
Updates theme reference with a copied one.
"""
self._theme = self._theme.copy()
class _MenuStats(object):
"""
Menu stats.
"""
def __init__(self) -> None:
# Widget update
self.added_widgets = 0
self.removed_widgets = 0
# Widget position
self.build_surface = 0
self.position_update = 0
self.center_content = 0
# Render
self.last_build_surface_time = 0
self.render_private = 0
self.render_public = 0
self.total_building_time = 0
self.total_rendering_time = 0
# Other
self.clear = 0
self.draw = 0
self.draw_update_cached = 0
self.loop = 0
self.reset = 0
self.select = 0
self.update = 0
class _MenuCopyException(Exception):
"""
If user tries to copy a Menu.
"""
pass
class _MenuRuntimeErrorConfig(object):
"""
Controls the runtime errors of the Menu.
"""
def __init__(self) -> None:
self.close = True
self.draw = True
self.mainloop = True
self.update = True # It should be True, as non-active Menus SHOULD NOT receive updates
@staticmethod
def throw(throw_runtime: bool, msg: str) -> None:
"""
Throws an error, if ``throw_runtime=True`` throws a ``RuntimeError``, otherwise
only a warning.
:param throw_runtime: If error is raised
:param msg: Message
"""
if throw_runtime:
raise RuntimeError(msg)
warn(msg)
class _MenuSizingException(Exception):
"""
Exception thrown if widget exceeds maximum size of column/row layout.
"""
pass
class _MenuWidgetOverflow(Exception):
"""
Exception thrown if adding more widgets than menu can contain on row/column layout.
"""
pass
class _MenuMultipleSelectedWidgetsException(Exception):
"""
Exception thrown if multiple widgets are selected at the same time.
"""
pass
|
dados_pessoa: dict = dict()
lista_pessoas: list = list()
media: list = list()
mulheres: list = list()
while True:
dados_pessoa['nome'] = str(input('Nome: ').strip().upper())
dados_pessoa['idade'] = int(input('Idade: '))
media.append(dados_pessoa['idade'])
sexo = ' '
while sexo not in 'MmFf':
sexo = str(input('Sexo[M/F]: ').strip())[0]
if sexo in 'MmFf':
dados_pessoa['sexo'] = sexo.upper()
if sexo in 'Ff':
mulheres.append(dados_pessoa['nome'])
lista_pessoas.append(dados_pessoa.copy())
dados_pessoa.clear()
flag = ' '
while flag not in 'SsNn':
flag = str(input('Deseja continuar: ').strip())[0]
print('--' * 15)
if flag in 'Nn':
break
print(f' -Foram cadastradas {len(lista_pessoas)} pessoas')
print(f' -A media de idade é de {sum(media) / len(lista_pessoas):.2f} anos.')
print(f' -As mulheres cadastradas foram: ', end=' ')
# cm = cada mulher
for cm in mulheres:
print(f'[{cm}]', end=' ')
print()
print('Lista de pessoas acima da média: ')
print('--' * 16)
# cp = cada pessoa
for cp in lista_pessoas:
if cp['idade'] > sum(media) / len(lista_pessoas):
print(f'{cp['nome']} com {cp['idade']} anos, sexo = {cp['sexo']}')
print('--' * 15)
print('<< encerrado >>')
| dados_pessoa: dict = dict()
lista_pessoas: list = list()
media: list = list()
mulheres: list = list()
while True:
dados_pessoa['nome'] = str(input('Nome: ').strip().upper())
dados_pessoa['idade'] = int(input('Idade: '))
media.append(dados_pessoa['idade'])
sexo = ' '
while sexo not in 'MmFf':
sexo = str(input('Sexo[M/F]: ').strip())[0]
if sexo in 'MmFf':
dados_pessoa['sexo'] = sexo.upper()
if sexo in 'Ff':
mulheres.append(dados_pessoa['nome'])
lista_pessoas.append(dados_pessoa.copy())
dados_pessoa.clear()
flag = ' '
while flag not in 'SsNn':
flag = str(input('Deseja continuar: ').strip())[0]
print('--' * 15)
if flag in 'Nn':
break
print(f' -Foram cadastradas {len(lista_pessoas)} pessoas')
print(f' -A media de idade é de {sum(media) / len(lista_pessoas):.2f} anos.')
print(f' -As mulheres cadastradas foram: ', end=' ')
# cm = cada mulher
for cm in mulheres:
print(f'[{cm}]', end=' ')
print()
print('Lista de pessoas acima da média: ')
print('--' * 16)
# cp = cada pessoa
for cp in lista_pessoas:
if cp['idade'] > sum(media) / len(lista_pessoas):
print(f'{cp["nome"]} com {cp["idade"]} anos, sexo = {cp["sexo"]}')
print('--' * 15)
print('<< encerrado >>')
|
# Third-party
from astropy.io import fits
import numpy as np
from tqdm.auto import tqdm
# Joaquin
from .logger import logger
from .features import get_phot_features, get_lsf_features, get_spec_features
def get_aspcapstar_path(config, star):
filename = f"aspcapStar-{config.apogee_reduction}-{star["APOGEE_ID"]}.fits"
local_path = (config.apogee_cache_path /
config.apogee_dr /
star['TELESCOPE'] /
star['FIELD'].strip() /
filename)
return local_path
def get_aspcapstar(config, star):
local_path = get_aspcapstar_path(config, star)
with fits.open(local_path) as hdul:
pix = np.arange(hdul[1].header['NAXIS1'])
wvln = 10 ** (hdul[1].header['CRVAL1'] +
pix * hdul[1].header['CDELT1'])
flux = hdul[1].data
err = hdul[2].data
return wvln, flux, err
def get_lsf_path(config, star):
if star['TELESCOPE'] == 'apo25m':
sorp = 'p'
elif star['TELESCOPE'] == 'lco25m':
sorp = 's'
else:
raise NotImplementedError()
filename = f"a{sorp}StarLSF-{star["APOGEE_ID"]}.fits"
local_path = (config.apogee_cache_path /
config.apogee_dr /
star['TELESCOPE'] /
star['FIELD'].strip() /
filename)
return local_path
def get_lsf(config, star):
local_path = get_lsf_path(config, star)
with fits.open(local_path) as hdul:
if config.apogee_dr == 'dr17':
lsf = hdul[0].data[:, 7]
else:
lsf = hdul[1].data[7]
pix = np.arange(len(lsf))
return pix, lsf
def make_apogee_X(config, stars, progress=True, X_dtype=np.float32,
spec_fill_value=0.):
if progress:
iter_ = tqdm
else:
iter_ = iter
if stars is None:
raise ValueError(
"Input `stars` is None! You must pass a table of allStar data "
"using the `stars` argument to the initializer")
# First, figure out how many features we have:
for star in stars:
try:
wvln, flux, err = get_aspcapstar(config, star)
pix, lsf = get_lsf(config, star)
phot_f = get_phot_features(star, config.phot_names)
lsf_f = get_lsf_features(lsf)
spec_f, mask = get_spec_features(wvln, flux, err,
fill_value=spec_fill_value)
except Exception: # noqa
continue
Nlsf = len(lsf_f)
Nphot = len(phot_f)
Nspec = len(spec_f)
break
else:
raise RuntimeError("Failed to determine number of features")
Nstars = len(stars)
Nfeatures = Nphot + Nlsf + Nspec
X = np.full((Nstars, Nfeatures), np.nan, dtype=X_dtype)
spec_bad_masks = np.full((Nstars, Nspec), True, dtype=bool)
for i, star in enumerate(iter_(stars)):
try:
wvln, flux, err = get_aspcapstar(config, star)
pix, lsf = get_lsf(config, star)
except Exception as e:
logger.log(1,
"failed to get aspcapStar or apStarLSF data for "
f"star {i}\n{e}")
continue
try:
phot_f = get_phot_features(star, config.phot_names)
lsf_f = get_lsf_features(lsf)
spec_f, spec_mask = get_spec_features(wvln, flux, err,
fill_value=spec_fill_value)
except Exception as e:
logger.log(1, f"failed to get features for star {i}\n{e}")
continue
phot_idx = np.arange(Nphot, dtype=int)
last = phot_idx[-1] + 1
lsf_idx = np.arange(last, last + Nlsf, dtype=int)
last = lsf_idx[-1] + 1
spec_idx = np.arange(last, last + Nspec, dtype=int)
X[i] = np.concatenate((phot_f, lsf_f, spec_f))
spec_bad_masks[i] = spec_mask
idx_map = {
'phot': phot_idx,
'lsf': lsf_idx,
'spec': spec_idx
}
return X, idx_map, wvln, spec_bad_masks
| # Third-party
from astropy.io import fits
import numpy as np
from tqdm.auto import tqdm
# Joaquin
from .logger import logger
from .features import get_phot_features, get_lsf_features, get_spec_features
def get_aspcapstar_path(config, star):
filename = f"aspcapStar-{config.apogee_reduction}-{star['APOGEE_ID']}.fits"
local_path = (config.apogee_cache_path /
config.apogee_dr /
star['TELESCOPE'] /
star['FIELD'].strip() /
filename)
return local_path
def get_aspcapstar(config, star):
local_path = get_aspcapstar_path(config, star)
with fits.open(local_path) as hdul:
pix = np.arange(hdul[1].header['NAXIS1'])
wvln = 10 ** (hdul[1].header['CRVAL1'] +
pix * hdul[1].header['CDELT1'])
flux = hdul[1].data
err = hdul[2].data
return wvln, flux, err
def get_lsf_path(config, star):
if star['TELESCOPE'] == 'apo25m':
sorp = 'p'
elif star['TELESCOPE'] == 'lco25m':
sorp = 's'
else:
raise NotImplementedError()
filename = f"a{sorp}StarLSF-{star['APOGEE_ID']}.fits"
local_path = (config.apogee_cache_path /
config.apogee_dr /
star['TELESCOPE'] /
star['FIELD'].strip() /
filename)
return local_path
def get_lsf(config, star):
local_path = get_lsf_path(config, star)
with fits.open(local_path) as hdul:
if config.apogee_dr == 'dr17':
lsf = hdul[0].data[:, 7]
else:
lsf = hdul[1].data[7]
pix = np.arange(len(lsf))
return pix, lsf
def make_apogee_X(config, stars, progress=True, X_dtype=np.float32,
spec_fill_value=0.):
if progress:
iter_ = tqdm
else:
iter_ = iter
if stars is None:
raise ValueError(
"Input `stars` is None! You must pass a table of allStar data "
"using the `stars` argument to the initializer")
# First, figure out how many features we have:
for star in stars:
try:
wvln, flux, err = get_aspcapstar(config, star)
pix, lsf = get_lsf(config, star)
phot_f = get_phot_features(star, config.phot_names)
lsf_f = get_lsf_features(lsf)
spec_f, mask = get_spec_features(wvln, flux, err,
fill_value=spec_fill_value)
except Exception: # noqa
continue
Nlsf = len(lsf_f)
Nphot = len(phot_f)
Nspec = len(spec_f)
break
else:
raise RuntimeError("Failed to determine number of features")
Nstars = len(stars)
Nfeatures = Nphot + Nlsf + Nspec
X = np.full((Nstars, Nfeatures), np.nan, dtype=X_dtype)
spec_bad_masks = np.full((Nstars, Nspec), True, dtype=bool)
for i, star in enumerate(iter_(stars)):
try:
wvln, flux, err = get_aspcapstar(config, star)
pix, lsf = get_lsf(config, star)
except Exception as e:
logger.log(1,
"failed to get aspcapStar or apStarLSF data for "
f"star {i}\n{e}")
continue
try:
phot_f = get_phot_features(star, config.phot_names)
lsf_f = get_lsf_features(lsf)
spec_f, spec_mask = get_spec_features(wvln, flux, err,
fill_value=spec_fill_value)
except Exception as e:
logger.log(1, f"failed to get features for star {i}\n{e}")
continue
phot_idx = np.arange(Nphot, dtype=int)
last = phot_idx[-1] + 1
lsf_idx = np.arange(last, last + Nlsf, dtype=int)
last = lsf_idx[-1] + 1
spec_idx = np.arange(last, last + Nspec, dtype=int)
X[i] = np.concatenate((phot_f, lsf_f, spec_f))
spec_bad_masks[i] = spec_mask
idx_map = {
'phot': phot_idx,
'lsf': lsf_idx,
'spec': spec_idx
}
return X, idx_map, wvln, spec_bad_masks
|
"""Custom expansions of :mod:`sklearn` functionalities.
Note
----
This module provides custom expansions of some :mod:`sklearn` classes and
functions which are necessary to fit the purposes for the desired
functionalities of the :ref:`MLR module <api.esmvaltool.diag_scripts.mlr>`. As
long-term goal we would like to include these functionalities to the
:mod:`sklearn` package since we believe these additions might be helpful for
everyone. This module serves as interim solution. To ensure that all features
are properly working this module is also covered by extensive tests.
Parts of this code have been copied from :mod:`sklearn`.
License: BSD 3-Clause License
Copyright (c) 2007-2020 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# pylint: disable=arguments-differ
# pylint: disable=attribute-defined-outside-init
# pylint: disable=protected-access
# pylint: disable=super-init-not-called
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-locals
# pylint: disable=too-many-return-statements
import itertools
import logging
import numbers
import os
import warnings
from contextlib import suppress
from copy import deepcopy
from inspect import getfullargspec
from traceback import format_exc
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.compose import ColumnTransformer, TransformedTargetRegressor
from sklearn.exceptions import FitFailedWarning, NotFittedError
from sklearn.feature_selection import RFE, SelectorMixin
from sklearn.linear_model import LinearRegression
from sklearn.metrics import check_scoring
from sklearn.model_selection import check_cv
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils import check_array, check_X_y, indexable, safe_sqr
from sklearn.utils.fixes import np_version, parse_version
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils.validation import check_is_fitted
from esmvaltool.diag_scripts import mlr
logger = logging.getLogger(os.path.basename(__file__))
_DEFAULT_TAGS = {
'non_deterministic': False,
'requires_positive_X': False,
'requires_positive_y': False,
'X_types': ['2darray'],
'poor_score': False,
'no_validation': False,
'multioutput': False,
"allow_nan": False,
'stateless': False,
'multilabel': False,
'_skip_test': False,
'_xfail_checks': False,
'multioutput_only': False,
'binary_only': False,
'requires_fit': True,
'preserves_dtype': [np.float64],
'requires_y': False,
'pairwise': False,
}
def _determine_key_type(key, accept_slice=True):
"""Determine the data type of key."""
err_msg = ("No valid specification of the columns. Only a scalar, list or "
"slice of all integers or all strings, or boolean mask is "
"allowed")
dtype_to_str = {int: 'int', str: 'str', bool: 'bool', np.bool_: 'bool'}
array_dtype_to_str = {'i': 'int', 'u': 'int', 'b': 'bool', 'O': 'str',
'U': 'str', 'S': 'str'}
if key is None:
return None
if isinstance(key, tuple(dtype_to_str.keys())):
try:
return dtype_to_str[type(key)]
except KeyError:
raise ValueError(err_msg)
if isinstance(key, slice):
if not accept_slice:
raise TypeError(
'Only array-like or scalar are supported. '
'A Python slice was given.'
)
if key.start is None and key.stop is None:
return None
key_start_type = _determine_key_type(key.start)
key_stop_type = _determine_key_type(key.stop)
if key_start_type is not None and key_stop_type is not None:
if key_start_type != key_stop_type:
raise ValueError(err_msg)
if key_start_type is not None:
return key_start_type
return key_stop_type
if isinstance(key, (list, tuple)):
unique_key = set(key)
key_type = {_determine_key_type(elt) for elt in unique_key}
if not key_type:
return None
if len(key_type) != 1:
raise ValueError(err_msg)
return key_type.pop()
if hasattr(key, 'dtype'):
try:
return array_dtype_to_str[key.dtype.kind]
except KeyError:
raise ValueError(err_msg)
raise ValueError(err_msg)
def _array_indexing(array, key, key_dtype, axis):
"""Index an array or scipy.sparse consistently across numpy version."""
if np_version < parse_version('1.12') or sp.issparse(array):
if key_dtype == 'bool':
key = np.asarray(key)
if isinstance(key, tuple):
key = list(key)
return array[key] if axis == 0 else array[:, key]
def _list_indexing(x_data, key, key_dtype):
"""Index a python list."""
if np.isscalar(key) or isinstance(key, slice):
# key is a slice or a scalar
return x_data[key]
if key_dtype == 'bool':
# key is a boolean array-like
return list(itertools.compress(x_data, key))
# key is a integer array-like of key
return [x_data[idx] for idx in key]
def _pandas_indexing(x_data, key, key_dtype, axis):
"""Index a pandas dataframe or a series."""
if hasattr(key, 'shape'):
key = np.asarray(key)
key = key if key.flags.writeable else key.copy()
elif isinstance(key, tuple):
key = list(key)
# check whether we should index with loc or iloc
indexer = x_data.iloc if key_dtype == 'int' else x_data.loc
return indexer[:, key] if axis else indexer[key]
def _safe_indexing(x_data, indices, *_, axis=0):
"""Return rows, items or columns of x_data using indices."""
if indices is None:
return x_data
if axis not in (0, 1):
raise ValueError(
"'axis' should be either 0 (to index rows) or 1 (to index "
"column). Got {} instead.".format(axis)
)
indices_dtype = _determine_key_type(indices)
if axis == 0 and indices_dtype == 'str':
raise ValueError(
"String indexing is not supported with 'axis=0'"
)
if axis == 1 and x_data.ndim != 2:
raise ValueError(
"'x_data' should be a 2D NumPy array, 2D sparse matrix or pandas "
"dataframe when indexing the columns (i.e. 'axis=1'). "
"Got {} instead with {} dimension(s).".format(type(x_data),
x_data.ndim)
)
if axis == 1 and indices_dtype == 'str' and not hasattr(x_data, 'loc'):
raise ValueError(
"Specifying the columns using strings is only supported for "
"pandas DataFrames"
)
if hasattr(x_data, "iloc"):
return _pandas_indexing(x_data, indices, indices_dtype, axis=axis)
if hasattr(x_data, "shape"):
return _array_indexing(x_data, indices, indices_dtype, axis=axis)
return _list_indexing(x_data, indices, indices_dtype)
def _is_arraylike(input_array):
"""Check whether the input is array-like."""
return (hasattr(input_array, '__len__') or
hasattr(input_array, 'shape') or
hasattr(input_array, '__array__'))
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant."""
if sp.issparse(iterable):
return iterable.tocsr()
if hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
if iterable is None:
return iterable
return np.array(iterable)
def _num_samples(x_data):
"""Return number of samples in array-like x_data."""
message = 'Expected sequence or array-like, got %s' % type(x_data)
if hasattr(x_data, 'fit') and callable(x_data.fit):
# Don't get num_samples from an ensembles length!
raise TypeError(message)
if not hasattr(x_data, '__len__') and not hasattr(x_data, 'shape'):
if hasattr(x_data, '__array__'):
x_data = np.asarray(x_data)
else:
raise TypeError(message)
if hasattr(x_data, 'shape') and x_data.shape is not None:
if len(x_data.shape) == 0:
raise TypeError("Singleton array %r cannot be considered a valid "
"collection." % x_data)
# Check that shape is returning an integer or default to len
# Dask dataframes may not return numeric shape[0] value
if isinstance(x_data.shape[0], numbers.Integral):
return x_data.shape[0]
try:
return len(x_data)
except TypeError as type_error:
raise TypeError(message) from type_error
def _check_fit_params(x_data, fit_params, indices=None):
"""Check and validate the parameters passed during ``fit``."""
fit_params_validated = {}
for param_key, param_value in fit_params.items():
if (not _is_arraylike(param_value) or
_num_samples(param_value) != _num_samples(x_data)):
# Non-indexable pass-through (for now for backward-compatibility).
# https://github.com/scikit-learn/scikit-learn/issues/15805
fit_params_validated[param_key] = param_value
else:
# Any other fit_params should support indexing
# (e.g. for cross-validation).
fit_params_validated[param_key] = _make_indexable(param_value)
fit_params_validated[param_key] = _safe_indexing(
fit_params_validated[param_key], indices
)
return fit_params_validated
def _safe_tags(estimator, key=None):
"""Safely get estimator tags."""
if hasattr(estimator, "_get_tags"):
tags_provider = "_get_tags()"
tags = estimator._get_tags()
elif hasattr(estimator, "_more_tags"):
tags_provider = "_more_tags()"
tags = {**_DEFAULT_TAGS, **estimator._more_tags()}
else:
tags_provider = "_DEFAULT_TAGS"
tags = _DEFAULT_TAGS
if key is not None:
if key not in tags:
raise ValueError(
f"The key {key} is not defined in {tags_provider} for the "
f"class {estimator.__class__.__name__}."
)
return tags[key]
return tags
def _is_pairwise(estimator):
"""Return ``True`` if estimator is pairwise."""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FutureWarning)
has_pairwise_attribute = hasattr(estimator, '_pairwise')
pairwise_attribute = getattr(estimator, '_pairwise', False)
pairwise_tag = _safe_tags(estimator, key="pairwise")
if has_pairwise_attribute:
if pairwise_attribute != pairwise_tag:
warnings.warn(
"_pairwise attribute is inconsistent with tags. Set the "
"estimator tags of your estimator instead", FutureWarning,
)
return pairwise_attribute
# Use pairwise tag when the attribute is not present
return pairwise_tag
def _safe_split(estimator, x_data, y_data, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if _is_pairwise(estimator):
if not hasattr(x_data, "shape"):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
# x_data is a precomputed square kernel matrix
if x_data.shape[0] != x_data.shape[1]:
raise ValueError("x_data should be a square kernel matrix")
if train_indices is None:
x_subset = x_data[np.ix_(indices, indices)]
else:
x_subset = x_data[np.ix_(indices, train_indices)]
else:
x_subset = _safe_indexing(x_data, indices)
if y_data is not None:
y_subset = _safe_indexing(y_data, indices)
else:
y_subset = None
return (x_subset, y_subset)
def _fit_and_score_weighted(estimator, x_data, y_data, scorer, train, test,
parameters, fit_params, error_score=np.nan,
sample_weights=None):
"""Expand :func:`sklearn.model_selection._validation._fit_and_score`."""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(x_data, fit_params, train)
if parameters is not None:
# clone after setting parameters in case any parameters
# are estimators (like pipeline steps)
# because pipeline doesn't clone steps in fit
cloned_parameters = {}
for (key, val) in parameters.items():
cloned_parameters[key] = clone(val, safe=False)
estimator = estimator.set_params(**cloned_parameters)
(x_train, y_train) = _safe_split(estimator, x_data, y_data, train)
(x_test, y_test) = _safe_split(estimator, x_data, y_data, test, train)
if sample_weights is not None:
sample_weights_test = sample_weights[test]
else:
sample_weights_test = None
try:
if y_train is None:
estimator.fit(x_train, **fit_params)
else:
estimator.fit(x_train, y_train, **fit_params)
except Exception:
if error_score == 'raise':
raise
if isinstance(error_score, numbers.Number):
test_score = error_score
warnings.warn("Estimator fit failed. The score on this train-test "
"partition for these parameters will be set to %f. "
"Details: \n%s" % (error_score, format_exc()),
FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a "
"numeric value. (Hint: if using 'raise', please "
"make sure that it has been spelled correctly.)")
else:
test_score = _score_weighted(estimator, x_test, y_test, scorer,
sample_weights=sample_weights_test)
return test_score
def _get_fit_parameters(fit_kwargs, steps, cls):
"""Retrieve fit parameters from ``fit_kwargs``."""
params = {name: {} for (name, step) in steps if step is not None}
step_names = list(params.keys())
for (param_name, param_val) in fit_kwargs.items():
param_split = param_name.split('__', 1)
if len(param_split) != 2:
raise ValueError(
f"Fit parameters for {cls} have to be given in the form "
f"'s__p', where 's' is the name of the step and 'p' the name "
f"of the parameter, got '{param_name}'")
try:
params[param_split[0]][param_split[1]] = param_val
except KeyError:
raise ValueError(
f"Expected one of {step_names} for step of fit parameter, got "
f"'{param_split[0]}' for parameter '{param_name}'")
return params
def _score_weighted(estimator, x_test, y_test, scorer, sample_weights=None):
"""Expand :func:`sklearn.model_selection._validation._score`."""
if y_test is None:
score = scorer(estimator, x_test, sample_weight=sample_weights)
else:
score = scorer(estimator, x_test, y_test, sample_weight=sample_weights)
error_msg = ("Scoring must return a number, got %s (%s) instead. "
"(scorer=%s)")
if hasattr(score, 'item'):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
score = score.item()
if not isinstance(score, numbers.Number):
raise ValueError(error_msg % (score, type(score), scorer))
return score
def _split_fit_kwargs(fit_kwargs, train_idx, test_idx):
"""Get split fit kwargs for single CV step."""
fit_kwargs_train = {}
fit_kwargs_test = {}
for (key, val) in fit_kwargs.items():
if 'sample_weight' in key and 'sample_weight_eval_set' not in key:
fit_kwargs_train[key] = deepcopy(val)[train_idx]
fit_kwargs_test[key] = deepcopy(val)[test_idx]
else:
fit_kwargs_train[key] = deepcopy(val)
fit_kwargs_test[key] = deepcopy(val)
return (fit_kwargs_train, fit_kwargs_test)
def _rfe_single_fit(rfe, estimator, x_data, y_data, train, test, scorer,
**fit_kwargs):
"""Return the score for a fit across one fold."""
(x_train, y_train) = _safe_split(estimator, x_data, y_data, train)
(x_test, y_test) = _safe_split(estimator, x_data, y_data, test, train)
(fit_kwargs_train, fit_kwargs_test) = _split_fit_kwargs(fit_kwargs, train,
test)
if 'sample_weight' in fit_kwargs_test:
fit_kwargs_test['sample_weights'] = fit_kwargs_test.pop(
'sample_weight')
def step_score(estimator, features):
"""Score for a single step in the recursive feature elimination."""
return _score_weighted(estimator, x_test[:, features], y_test, scorer,
**fit_kwargs_test)
return rfe._fit(x_train, y_train, step_score=step_score,
**fit_kwargs_train).scores_
def _map_features(features, support):
"""Map old features indices to new ones using boolean mask."""
feature_mapping = {}
new_idx = 0
for (old_idx, supported) in enumerate(support):
if supported:
val = new_idx
new_idx += 1
else:
val = None
feature_mapping[old_idx] = val
new_features = []
for feature in features:
new_feature = feature_mapping[feature]
if new_feature is not None:
new_features.append(new_feature)
return new_features
def _update_transformers_param(estimator, support):
"""Update ``transformers`` argument of ``ColumnTransformer`` steps."""
all_params = estimator.get_params()
params = []
for key in all_params:
if key.endswith('transformers'):
params.append(key)
if isinstance(estimator, (Pipeline, AdvancedPipeline)):
step = estimator.named_steps[key.split('__')[0]]
if not isinstance(step, ColumnTransformer):
raise TypeError(
f"Found 'transformers' parameter ('{key}'), but the "
f"corresponding pipeline step is not a "
f"ColumnTransformer (got '{type(step)}')")
else:
raise TypeError(
f"Found 'transformers' parameter ('{key}'), but the "
f"corresponding estimator is not a Pipeline or "
f"AdvancedPipeline")
new_params = {}
for param in params:
new_transformers = []
for transformer in all_params[param]:
new_columns = _map_features(transformer[2], support)
new_transformers.append(
(transformer[0], transformer[1], new_columns))
new_params[param] = new_transformers
estimator.set_params(**new_params)
def cross_val_score_weighted(estimator, x_data, y_data=None, groups=None,
scoring=None, cv=None, n_jobs=None, verbose=0,
fit_params=None, pre_dispatch='2*n_jobs',
error_score=np.nan, sample_weights=None):
"""Expand :func:`sklearn.model_selection.cross_val_score`."""
scorer = check_scoring(estimator, scoring=scoring)
(x_data, y_data, groups) = indexable(x_data, y_data, groups)
cv = check_cv(cv, y_data, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(
delayed(_fit_and_score_weighted)(
clone(estimator), x_data, y_data, scorer, train, test, None,
fit_params, error_score=error_score, sample_weights=sample_weights)
for train, test in cv.split(x_data, y_data, groups))
return np.array(scores)
def get_rfecv_transformer(rfecv_estimator):
"""Get transformer step of RFECV estimator."""
try:
check_is_fitted(rfecv_estimator)
except NotFittedError:
raise NotFittedError(
"RFECV instance used to initialize FeatureSelectionTransformer "
"must be fitted")
transformer = FeatureSelectionTransformer(
grid_scores=rfecv_estimator.grid_scores_,
n_features=rfecv_estimator.n_features_,
ranking=rfecv_estimator.ranking_,
support=rfecv_estimator.support_,
)
return transformer
def perform_efecv(estimator, x_data, y_data, **kwargs):
"""Perform exhaustive feature selection."""
x_data, y_data = check_X_y(
x_data, y_data, ensure_min_features=2, force_all_finite='allow-nan')
n_all_features = x_data.shape[1]
# Iterate over all possible feature combinations
supports = list(itertools.product([False, True], repeat=n_all_features))
supports.remove(tuple([False] * n_all_features))
logger.info(
"Testing all %i possible feature combinations for exhaustive feature "
"selection", len(supports))
grid_scores = []
for support in supports:
support = np.array(support)
features = np.arange(n_all_features)[support]
# Evaluate estimator on new subset of features
new_estimator = clone(estimator)
_update_transformers_param(new_estimator, support)
scores = cross_val_score_weighted(new_estimator, x_data[:, features],
y_data, **kwargs)
grid_scores.append(np.mean(scores))
logger.debug("Fitted estimator with %i features, CV score was %.5f",
support.sum(), np.mean(scores))
# Final parameters
grid_scores = np.array(grid_scores)
best_idx = np.argmax(grid_scores)
support = np.array(supports[best_idx])
features = np.arange(n_all_features)[support]
n_features = support.sum()
ranking = np.where(support, 1, 2)
transformer = FeatureSelectionTransformer(
grid_scores=grid_scores, n_features=n_features, ranking=ranking,
support=support)
# Get final estimator
best_estimator = clone(estimator)
_update_transformers_param(best_estimator, support)
best_estimator.fit(x_data[:, features], y_data,
**kwargs.get('fit_params', {}))
logger.info("Found optimal score %.5f for %i features",
grid_scores[best_idx], n_features)
return (best_estimator, transformer)
class AdvancedPipeline(Pipeline):
"""Expand :class:`sklearn.pipeline.Pipeline`."""
@property
def coef_(self):
"""numpy.ndarray: Model coefficients."""
return self.steps[-1][1].coef_
@property
def feature_importances_(self):
"""numpy.ndarray: Feature importances."""
return self.steps[-1][1].feature_importances_
def _check_final_step(self):
"""Check type of final step of pipeline."""
final_step = self.steps[-1][1]
if not isinstance(final_step, AdvancedTransformedTargetRegressor):
raise TypeError(
f"Expected estimator of type "
f"{AdvancedTransformedTargetRegressor} for final step of "
f"pipeline, got {final_step.__class__}")
def fit_target_transformer_only(self, y_data, **fit_kwargs):
"""Fit only ``transform`` step of of target regressor."""
self._check_final_step()
reg = self.steps[-1][1]
fit_params = _get_fit_parameters(fit_kwargs, self.steps,
self.__class__)
reg_fit_params = fit_params[self.steps[-1][0]]
reg.fit_transformer_only(y_data, **reg_fit_params)
def fit_transformers_only(self, x_data, y_data, **fit_kwargs):
"""Fit only ``transform`` steps of Pipeline."""
fit_params = _get_fit_parameters(fit_kwargs, self.steps,
self.__class__)
return self._fit(x_data, y_data, **fit_params)
def transform_only(self, x_data):
"""Only perform ``transform`` steps of Pipeline."""
for (_, transformer) in self.steps[:-1]:
x_data = transformer.transform(x_data)
return x_data
def transform_target_only(self, y_data):
"""Only perform ``transform`` steps of target regressor."""
self._check_final_step()
reg = self.steps[-1][1]
if not hasattr(reg, 'transformer_'):
raise NotFittedError(
"Transforming target not possible, final regressor is not "
"fitted yet, call fit() or fit_target_transformer_only() "
"first")
if y_data.ndim == 1:
y_data = y_data.reshape(-1, 1)
y_trans = reg.transformer_.transform(y_data)
if y_trans.ndim == 2 and y_trans.shape[1] == 1:
y_trans = y_trans.squeeze(axis=1)
return y_trans
class AdvancedRFE(RFE):
"""Expand :class:`sklearn.feature_selection.RFE`."""
def fit(self, x_data, y_data, **fit_kwargs):
"""Expand :meth:`fit` to accept kwargs."""
return self._fit(x_data, y_data, **fit_kwargs)
def _fit(self, x_data, y_data, step_score=None, **fit_kwargs):
"""Expand :meth:`_fit` to accept kwargs."""
# Parameter step_score controls the calculation of self.scores_
# step_score is not exposed to users
# and is used when implementing AdvancedRFECV
# self.scores_ will not be calculated when calling _fit through fit
x_data, y_data = check_X_y(x_data, y_data, "csc",
ensure_min_features=2,
force_all_finite=False)
# Initialization
n_features = x_data.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=bool)
ranking_ = np.ones(n_features, dtype=int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
_update_transformers_param(estimator, support_)
estimator.fit(x_data[:, features], y_data, **fit_kwargs)
# Get coefs (hasattr(estimator, 'coef_') raises a KeyError for
# XGBRegressor models
try:
coefs = estimator.coef_
except (AttributeError, KeyError):
coefs = getattr(estimator, 'feature_importances_', None)
if coefs is None:
raise RuntimeError("The classifier does not expose "
"'coef_' or 'feature_importances_' "
"attributes")
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# Transformer steps that reduce number of features is not supported
if len(ranks) != len(features):
raise NotImplementedError(
f"Estimators that contain transforming steps that reduce "
f"the number of features are not supported in "
f"{self.__class__}, got {len(features):d} features for ",
f"fit(), but only {len(ranks):d} elements for 'coefs_' / "
f"'feature_importances_' are provided. Estimator:\n"
f"{estimator}")
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
_update_transformers_param(self.estimator_, support_)
self.estimator_.fit(x_data[:, features], y_data, **fit_kwargs)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, x_data, **predict_kwargs):
"""Expand :meth:`predict()` to accept kwargs."""
check_is_fitted(self)
return self.estimator_.predict(self.transform(x_data),
**predict_kwargs)
class AdvancedRFECV(AdvancedRFE):
"""Expand :class:`sklearn.feature_selection.RFECV`."""
def __init__(self, estimator, step=1, min_features_to_select=1, cv=None,
scoring=None, verbose=0, n_jobs=None):
"""Original constructor of :class:`sklearn.feature_selection.RFECV`."""
self.estimator = estimator
self.step = step
self.min_features_to_select = min_features_to_select
self.cv = cv
self.scoring = scoring
self.verbose = verbose
self.n_jobs = n_jobs
def fit(self, x_data, y_data, groups=None, **fit_kwargs):
"""Expand :meth:`fit` to accept kwargs."""
x_data, y_data = check_X_y(
x_data, y_data, "csr", ensure_min_features=2,
force_all_finite=False)
# Initialization
cv = check_cv(self.cv, y_data,
classifier=is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = x_data.shape[1]
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
# Build an AdvancedRFE object, which will evaluate and score each
# possible feature count, down to self.min_features_to_select
rfe = AdvancedRFE(estimator=self.estimator,
n_features_to_select=self.min_features_to_select,
step=self.step, verbose=self.verbose)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter.
if effective_n_jobs(self.n_jobs) == 1:
(parallel, func) = (list, _rfe_single_fit)
else:
parallel = Parallel(n_jobs=self.n_jobs)
func = delayed(_rfe_single_fit)
scores = parallel(
func(rfe, self.estimator, x_data, y_data, train, test, scorer,
**fit_kwargs)
for train, test in cv.split(x_data, y_data, groups))
scores = np.sum(scores, axis=0)
scores_rev = scores[::-1]
argmax_idx = len(scores) - np.argmax(scores_rev) - 1
n_features_to_select = max(
n_features - (argmax_idx * step),
self.min_features_to_select)
# Re-execute an elimination with best_k over the whole set
rfe = AdvancedRFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose)
rfe.fit(x_data, y_data, **fit_kwargs)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
_update_transformers_param(self.estimator_, self.support_)
self.estimator_.fit(self.transform(x_data), y_data, **fit_kwargs)
# Fixing a normalization error, n is equal to
# get_n_splits(x_data, y_data) - 1 here, the scores are normalized by
# get_n_splits(x_data, y_data)
self.grid_scores_ = scores[::-1] / cv.get_n_splits(x_data, y_data,
groups)
return self
class AdvancedTransformedTargetRegressor(TransformedTargetRegressor):
"""Expand :class:`sklearn.compose.TransformedTargetRegressor`."""
@property
def coef_(self):
"""numpy.ndarray: Model coefficients."""
return self.regressor_.coef_
@property
def feature_importances_(self):
"""numpy.ndarray: Feature importances."""
return self.regressor_.feature_importances_
def fit(self, x_data, y_data, **fit_kwargs):
"""Expand :meth:`fit` to accept kwargs."""
(y_2d,
regressor_kwargs) = self.fit_transformer_only(y_data, **fit_kwargs)
# Transform y and convert back to 1d array if necessary
y_trans = self.transformer_.transform(y_2d)
if y_trans.ndim == 2 and y_trans.shape[1] == 1:
y_trans = y_trans.squeeze(axis=1)
# Perform linear regression if regressor is not given
if self.regressor is None:
self.regressor_ = LinearRegression()
else:
self.regressor_ = clone(self.regressor)
# Fit regressor with kwargs
self.regressor_.fit(x_data, y_trans, **regressor_kwargs)
return self
def fit_transformer_only(self, y_data, **fit_kwargs):
"""Fit only ``transformer`` step."""
y_data = check_array(y_data,
accept_sparse=False,
force_all_finite=True,
ensure_2d=False,
dtype='numeric')
self._training_dim = y_data.ndim
# Process kwargs
(_, regressor_kwargs) = self._get_fit_params(fit_kwargs)
# Transformers are designed to modify X which is 2D, modify y_data
# FIXME: Transformer does NOT use transformer_kwargs
if y_data.ndim == 1:
y_2d = y_data.reshape(-1, 1)
else:
y_2d = y_data
self._fit_transformer(y_2d)
return (y_2d, regressor_kwargs)
def predict(self, x_data, always_return_1d=True, **predict_kwargs):
"""Expand :meth:`predict()` to accept kwargs."""
check_is_fitted(self)
if not hasattr(self, 'regressor_'):
raise NotFittedError(
f"Regressor of {self.__class__} is not fitted yet, call fit() "
f"first")
# Kwargs for returning variance or covariance
if ('return_std' in predict_kwargs and 'return_std' in getfullargspec(
self.regressor_.predict).args):
raise NotImplementedError(
f"Using keyword argument 'return_std' for final regressor "
f"{self.regressor_.__class__} is not supported yet, only "
f"'return_var' is allowed. Expand the regressor to accept "
f"'return_var' instead (see 'esmvaltool/diag_scripts/mlr"
f"/models/gpr_sklearn.py' for an example)")
mlr.check_predict_kwargs(predict_kwargs)
return_var = predict_kwargs.get('return_var', False)
return_cov = predict_kwargs.get('return_cov', False)
# Prediction
prediction = self.regressor_.predict(x_data, **predict_kwargs)
if return_var or return_cov:
pred = prediction[0]
else:
pred = prediction
if pred.ndim == 1:
pred_trans = self.transformer_.inverse_transform(
pred.reshape(-1, 1))
else:
pred_trans = self.transformer_.inverse_transform(pred)
if self._to_be_squeezed(pred_trans, always_return_1d=always_return_1d):
pred_trans = pred_trans.squeeze(axis=1)
if not (return_var or return_cov):
return pred_trans
# Return scaled variance or covariance if desired
err = prediction[1]
if not hasattr(self.transformer_, 'scale_'):
raise NotImplementedError(
f"Transforming of additional prediction output (e.g. by "
f"'return_var' or 'return_cov') is not supported for "
f"transformer {self.transformer_.__class__} yet, the "
f"necessary attribute 'scale_' is missing")
scale = self.transformer_.scale_
if scale is not None:
err *= scale**2
if self._to_be_squeezed(err, always_return_1d=always_return_1d):
err = err.squeeze(axis=1)
return (pred_trans, err)
def _get_fit_params(self, fit_kwargs):
"""Separate ``transformer`` and ``regressor`` kwargs."""
steps = [
('transformer', self.transformer),
('regressor', self.regressor),
]
fit_params = _get_fit_parameters(fit_kwargs, steps, self.__class__)
fit_params.setdefault('transformer', {})
fit_params.setdefault('regressor', {})
# FIXME
if fit_params['transformer']:
raise NotImplementedError(
f"Fit parameters {fit_params["transformer"]} for transformer "
f"{self.transformer.__class__} of {self.__class__} are not "
f"supported at the moment")
return (fit_params['transformer'], fit_params['regressor'])
def _fit_transformer(self, y_data):
"""Check transformer and fit transformer."""
if (self.transformer is not None and
(self.func is not None or self.inverse_func is not None)):
raise ValueError("'transformer' and functions 'func'/"
"'inverse_func' cannot both be set.")
if self.transformer is not None:
self.transformer_ = clone(self.transformer)
else:
if self.func is not None and self.inverse_func is None:
raise ValueError(
"When 'func' is provided, 'inverse_func' must also be "
"provided")
self.transformer_ = FunctionTransformer(
func=self.func, inverse_func=self.inverse_func, validate=True,
check_inverse=self.check_inverse)
self.transformer_.fit(y_data)
if self.check_inverse:
idx_selected = slice(None, None, max(1, y_data.shape[0] // 10))
y_sel = _safe_indexing(y_data, idx_selected)
y_sel_t = self.transformer_.transform(y_sel)
if not np.allclose(y_sel,
self.transformer_.inverse_transform(y_sel_t)):
warnings.warn("The provided functions or transformer are "
"not strictly inverse of each other. If "
"you are sure you want to proceed regardless, "
"set 'check_inverse=False'", UserWarning)
def _to_be_squeezed(self, array, always_return_1d=True):
"""Check if ``array`` should be squeezed or not."""
squeeze = array.ndim == 2 and array.shape[1] == 1
if not always_return_1d:
squeeze = squeeze and self._training_dim == 1
return squeeze
class FeatureSelectionTransformer(BaseEstimator, SelectorMixin):
"""Transformer step of a feature selection estimator."""
def __init__(self, grid_scores, n_features, ranking, support):
"""Initialize feature selection transformer."""
self.grid_scores = grid_scores
self.n_features = n_features
self.ranking = ranking
self.support = support
def fit(self, *_, **__):
"""Empty method."""
return self
def _get_support_mask(self):
"""Get support mask."""
return self.support
def _more_tags(self):
"""Additional estimator tags."""
more_tags = deepcopy(_DEFAULT_TAGS)
more_tags['allow_nan'] = True
return more_tags
| """Custom expansions of :mod:`sklearn` functionalities.
Note
----
This module provides custom expansions of some :mod:`sklearn` classes and
functions which are necessary to fit the purposes for the desired
functionalities of the :ref:`MLR module <api.esmvaltool.diag_scripts.mlr>`. As
long-term goal we would like to include these functionalities to the
:mod:`sklearn` package since we believe these additions might be helpful for
everyone. This module serves as interim solution. To ensure that all features
are properly working this module is also covered by extensive tests.
Parts of this code have been copied from :mod:`sklearn`.
License: BSD 3-Clause License
Copyright (c) 2007-2020 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# pylint: disable=arguments-differ
# pylint: disable=attribute-defined-outside-init
# pylint: disable=protected-access
# pylint: disable=super-init-not-called
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-locals
# pylint: disable=too-many-return-statements
import itertools
import logging
import numbers
import os
import warnings
from contextlib import suppress
from copy import deepcopy
from inspect import getfullargspec
from traceback import format_exc
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.compose import ColumnTransformer, TransformedTargetRegressor
from sklearn.exceptions import FitFailedWarning, NotFittedError
from sklearn.feature_selection import RFE, SelectorMixin
from sklearn.linear_model import LinearRegression
from sklearn.metrics import check_scoring
from sklearn.model_selection import check_cv
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils import check_array, check_X_y, indexable, safe_sqr
from sklearn.utils.fixes import np_version, parse_version
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils.validation import check_is_fitted
from esmvaltool.diag_scripts import mlr
logger = logging.getLogger(os.path.basename(__file__))
_DEFAULT_TAGS = {
'non_deterministic': False,
'requires_positive_X': False,
'requires_positive_y': False,
'X_types': ['2darray'],
'poor_score': False,
'no_validation': False,
'multioutput': False,
"allow_nan": False,
'stateless': False,
'multilabel': False,
'_skip_test': False,
'_xfail_checks': False,
'multioutput_only': False,
'binary_only': False,
'requires_fit': True,
'preserves_dtype': [np.float64],
'requires_y': False,
'pairwise': False,
}
def _determine_key_type(key, accept_slice=True):
"""Determine the data type of key."""
err_msg = ("No valid specification of the columns. Only a scalar, list or "
"slice of all integers or all strings, or boolean mask is "
"allowed")
dtype_to_str = {int: 'int', str: 'str', bool: 'bool', np.bool_: 'bool'}
array_dtype_to_str = {'i': 'int', 'u': 'int', 'b': 'bool', 'O': 'str',
'U': 'str', 'S': 'str'}
if key is None:
return None
if isinstance(key, tuple(dtype_to_str.keys())):
try:
return dtype_to_str[type(key)]
except KeyError:
raise ValueError(err_msg)
if isinstance(key, slice):
if not accept_slice:
raise TypeError(
'Only array-like or scalar are supported. '
'A Python slice was given.'
)
if key.start is None and key.stop is None:
return None
key_start_type = _determine_key_type(key.start)
key_stop_type = _determine_key_type(key.stop)
if key_start_type is not None and key_stop_type is not None:
if key_start_type != key_stop_type:
raise ValueError(err_msg)
if key_start_type is not None:
return key_start_type
return key_stop_type
if isinstance(key, (list, tuple)):
unique_key = set(key)
key_type = {_determine_key_type(elt) for elt in unique_key}
if not key_type:
return None
if len(key_type) != 1:
raise ValueError(err_msg)
return key_type.pop()
if hasattr(key, 'dtype'):
try:
return array_dtype_to_str[key.dtype.kind]
except KeyError:
raise ValueError(err_msg)
raise ValueError(err_msg)
def _array_indexing(array, key, key_dtype, axis):
"""Index an array or scipy.sparse consistently across numpy version."""
if np_version < parse_version('1.12') or sp.issparse(array):
if key_dtype == 'bool':
key = np.asarray(key)
if isinstance(key, tuple):
key = list(key)
return array[key] if axis == 0 else array[:, key]
def _list_indexing(x_data, key, key_dtype):
"""Index a python list."""
if np.isscalar(key) or isinstance(key, slice):
# key is a slice or a scalar
return x_data[key]
if key_dtype == 'bool':
# key is a boolean array-like
return list(itertools.compress(x_data, key))
# key is a integer array-like of key
return [x_data[idx] for idx in key]
def _pandas_indexing(x_data, key, key_dtype, axis):
"""Index a pandas dataframe or a series."""
if hasattr(key, 'shape'):
key = np.asarray(key)
key = key if key.flags.writeable else key.copy()
elif isinstance(key, tuple):
key = list(key)
# check whether we should index with loc or iloc
indexer = x_data.iloc if key_dtype == 'int' else x_data.loc
return indexer[:, key] if axis else indexer[key]
def _safe_indexing(x_data, indices, *_, axis=0):
"""Return rows, items or columns of x_data using indices."""
if indices is None:
return x_data
if axis not in (0, 1):
raise ValueError(
"'axis' should be either 0 (to index rows) or 1 (to index "
"column). Got {} instead.".format(axis)
)
indices_dtype = _determine_key_type(indices)
if axis == 0 and indices_dtype == 'str':
raise ValueError(
"String indexing is not supported with 'axis=0'"
)
if axis == 1 and x_data.ndim != 2:
raise ValueError(
"'x_data' should be a 2D NumPy array, 2D sparse matrix or pandas "
"dataframe when indexing the columns (i.e. 'axis=1'). "
"Got {} instead with {} dimension(s).".format(type(x_data),
x_data.ndim)
)
if axis == 1 and indices_dtype == 'str' and not hasattr(x_data, 'loc'):
raise ValueError(
"Specifying the columns using strings is only supported for "
"pandas DataFrames"
)
if hasattr(x_data, "iloc"):
return _pandas_indexing(x_data, indices, indices_dtype, axis=axis)
if hasattr(x_data, "shape"):
return _array_indexing(x_data, indices, indices_dtype, axis=axis)
return _list_indexing(x_data, indices, indices_dtype)
def _is_arraylike(input_array):
"""Check whether the input is array-like."""
return (hasattr(input_array, '__len__') or
hasattr(input_array, 'shape') or
hasattr(input_array, '__array__'))
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant."""
if sp.issparse(iterable):
return iterable.tocsr()
if hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
if iterable is None:
return iterable
return np.array(iterable)
def _num_samples(x_data):
"""Return number of samples in array-like x_data."""
message = 'Expected sequence or array-like, got %s' % type(x_data)
if hasattr(x_data, 'fit') and callable(x_data.fit):
# Don't get num_samples from an ensembles length!
raise TypeError(message)
if not hasattr(x_data, '__len__') and not hasattr(x_data, 'shape'):
if hasattr(x_data, '__array__'):
x_data = np.asarray(x_data)
else:
raise TypeError(message)
if hasattr(x_data, 'shape') and x_data.shape is not None:
if len(x_data.shape) == 0:
raise TypeError("Singleton array %r cannot be considered a valid "
"collection." % x_data)
# Check that shape is returning an integer or default to len
# Dask dataframes may not return numeric shape[0] value
if isinstance(x_data.shape[0], numbers.Integral):
return x_data.shape[0]
try:
return len(x_data)
except TypeError as type_error:
raise TypeError(message) from type_error
def _check_fit_params(x_data, fit_params, indices=None):
"""Check and validate the parameters passed during ``fit``."""
fit_params_validated = {}
for param_key, param_value in fit_params.items():
if (not _is_arraylike(param_value) or
_num_samples(param_value) != _num_samples(x_data)):
# Non-indexable pass-through (for now for backward-compatibility).
# https://github.com/scikit-learn/scikit-learn/issues/15805
fit_params_validated[param_key] = param_value
else:
# Any other fit_params should support indexing
# (e.g. for cross-validation).
fit_params_validated[param_key] = _make_indexable(param_value)
fit_params_validated[param_key] = _safe_indexing(
fit_params_validated[param_key], indices
)
return fit_params_validated
def _safe_tags(estimator, key=None):
"""Safely get estimator tags."""
if hasattr(estimator, "_get_tags"):
tags_provider = "_get_tags()"
tags = estimator._get_tags()
elif hasattr(estimator, "_more_tags"):
tags_provider = "_more_tags()"
tags = {**_DEFAULT_TAGS, **estimator._more_tags()}
else:
tags_provider = "_DEFAULT_TAGS"
tags = _DEFAULT_TAGS
if key is not None:
if key not in tags:
raise ValueError(
f"The key {key} is not defined in {tags_provider} for the "
f"class {estimator.__class__.__name__}."
)
return tags[key]
return tags
def _is_pairwise(estimator):
"""Return ``True`` if estimator is pairwise."""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FutureWarning)
has_pairwise_attribute = hasattr(estimator, '_pairwise')
pairwise_attribute = getattr(estimator, '_pairwise', False)
pairwise_tag = _safe_tags(estimator, key="pairwise")
if has_pairwise_attribute:
if pairwise_attribute != pairwise_tag:
warnings.warn(
"_pairwise attribute is inconsistent with tags. Set the "
"estimator tags of your estimator instead", FutureWarning,
)
return pairwise_attribute
# Use pairwise tag when the attribute is not present
return pairwise_tag
def _safe_split(estimator, x_data, y_data, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if _is_pairwise(estimator):
if not hasattr(x_data, "shape"):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
# x_data is a precomputed square kernel matrix
if x_data.shape[0] != x_data.shape[1]:
raise ValueError("x_data should be a square kernel matrix")
if train_indices is None:
x_subset = x_data[np.ix_(indices, indices)]
else:
x_subset = x_data[np.ix_(indices, train_indices)]
else:
x_subset = _safe_indexing(x_data, indices)
if y_data is not None:
y_subset = _safe_indexing(y_data, indices)
else:
y_subset = None
return (x_subset, y_subset)
def _fit_and_score_weighted(estimator, x_data, y_data, scorer, train, test,
parameters, fit_params, error_score=np.nan,
sample_weights=None):
"""Expand :func:`sklearn.model_selection._validation._fit_and_score`."""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(x_data, fit_params, train)
if parameters is not None:
# clone after setting parameters in case any parameters
# are estimators (like pipeline steps)
# because pipeline doesn't clone steps in fit
cloned_parameters = {}
for (key, val) in parameters.items():
cloned_parameters[key] = clone(val, safe=False)
estimator = estimator.set_params(**cloned_parameters)
(x_train, y_train) = _safe_split(estimator, x_data, y_data, train)
(x_test, y_test) = _safe_split(estimator, x_data, y_data, test, train)
if sample_weights is not None:
sample_weights_test = sample_weights[test]
else:
sample_weights_test = None
try:
if y_train is None:
estimator.fit(x_train, **fit_params)
else:
estimator.fit(x_train, y_train, **fit_params)
except Exception:
if error_score == 'raise':
raise
if isinstance(error_score, numbers.Number):
test_score = error_score
warnings.warn("Estimator fit failed. The score on this train-test "
"partition for these parameters will be set to %f. "
"Details: \n%s" % (error_score, format_exc()),
FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a "
"numeric value. (Hint: if using 'raise', please "
"make sure that it has been spelled correctly.)")
else:
test_score = _score_weighted(estimator, x_test, y_test, scorer,
sample_weights=sample_weights_test)
return test_score
def _get_fit_parameters(fit_kwargs, steps, cls):
"""Retrieve fit parameters from ``fit_kwargs``."""
params = {name: {} for (name, step) in steps if step is not None}
step_names = list(params.keys())
for (param_name, param_val) in fit_kwargs.items():
param_split = param_name.split('__', 1)
if len(param_split) != 2:
raise ValueError(
f"Fit parameters for {cls} have to be given in the form "
f"'s__p', where 's' is the name of the step and 'p' the name "
f"of the parameter, got '{param_name}'")
try:
params[param_split[0]][param_split[1]] = param_val
except KeyError:
raise ValueError(
f"Expected one of {step_names} for step of fit parameter, got "
f"'{param_split[0]}' for parameter '{param_name}'")
return params
def _score_weighted(estimator, x_test, y_test, scorer, sample_weights=None):
"""Expand :func:`sklearn.model_selection._validation._score`."""
if y_test is None:
score = scorer(estimator, x_test, sample_weight=sample_weights)
else:
score = scorer(estimator, x_test, y_test, sample_weight=sample_weights)
error_msg = ("Scoring must return a number, got %s (%s) instead. "
"(scorer=%s)")
if hasattr(score, 'item'):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
score = score.item()
if not isinstance(score, numbers.Number):
raise ValueError(error_msg % (score, type(score), scorer))
return score
def _split_fit_kwargs(fit_kwargs, train_idx, test_idx):
"""Get split fit kwargs for single CV step."""
fit_kwargs_train = {}
fit_kwargs_test = {}
for (key, val) in fit_kwargs.items():
if 'sample_weight' in key and 'sample_weight_eval_set' not in key:
fit_kwargs_train[key] = deepcopy(val)[train_idx]
fit_kwargs_test[key] = deepcopy(val)[test_idx]
else:
fit_kwargs_train[key] = deepcopy(val)
fit_kwargs_test[key] = deepcopy(val)
return (fit_kwargs_train, fit_kwargs_test)
def _rfe_single_fit(rfe, estimator, x_data, y_data, train, test, scorer,
**fit_kwargs):
"""Return the score for a fit across one fold."""
(x_train, y_train) = _safe_split(estimator, x_data, y_data, train)
(x_test, y_test) = _safe_split(estimator, x_data, y_data, test, train)
(fit_kwargs_train, fit_kwargs_test) = _split_fit_kwargs(fit_kwargs, train,
test)
if 'sample_weight' in fit_kwargs_test:
fit_kwargs_test['sample_weights'] = fit_kwargs_test.pop(
'sample_weight')
def step_score(estimator, features):
"""Score for a single step in the recursive feature elimination."""
return _score_weighted(estimator, x_test[:, features], y_test, scorer,
**fit_kwargs_test)
return rfe._fit(x_train, y_train, step_score=step_score,
**fit_kwargs_train).scores_
def _map_features(features, support):
"""Map old features indices to new ones using boolean mask."""
feature_mapping = {}
new_idx = 0
for (old_idx, supported) in enumerate(support):
if supported:
val = new_idx
new_idx += 1
else:
val = None
feature_mapping[old_idx] = val
new_features = []
for feature in features:
new_feature = feature_mapping[feature]
if new_feature is not None:
new_features.append(new_feature)
return new_features
def _update_transformers_param(estimator, support):
"""Update ``transformers`` argument of ``ColumnTransformer`` steps."""
all_params = estimator.get_params()
params = []
for key in all_params:
if key.endswith('transformers'):
params.append(key)
if isinstance(estimator, (Pipeline, AdvancedPipeline)):
step = estimator.named_steps[key.split('__')[0]]
if not isinstance(step, ColumnTransformer):
raise TypeError(
f"Found 'transformers' parameter ('{key}'), but the "
f"corresponding pipeline step is not a "
f"ColumnTransformer (got '{type(step)}')")
else:
raise TypeError(
f"Found 'transformers' parameter ('{key}'), but the "
f"corresponding estimator is not a Pipeline or "
f"AdvancedPipeline")
new_params = {}
for param in params:
new_transformers = []
for transformer in all_params[param]:
new_columns = _map_features(transformer[2], support)
new_transformers.append(
(transformer[0], transformer[1], new_columns))
new_params[param] = new_transformers
estimator.set_params(**new_params)
def cross_val_score_weighted(estimator, x_data, y_data=None, groups=None,
scoring=None, cv=None, n_jobs=None, verbose=0,
fit_params=None, pre_dispatch='2*n_jobs',
error_score=np.nan, sample_weights=None):
"""Expand :func:`sklearn.model_selection.cross_val_score`."""
scorer = check_scoring(estimator, scoring=scoring)
(x_data, y_data, groups) = indexable(x_data, y_data, groups)
cv = check_cv(cv, y_data, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(
delayed(_fit_and_score_weighted)(
clone(estimator), x_data, y_data, scorer, train, test, None,
fit_params, error_score=error_score, sample_weights=sample_weights)
for train, test in cv.split(x_data, y_data, groups))
return np.array(scores)
def get_rfecv_transformer(rfecv_estimator):
"""Get transformer step of RFECV estimator."""
try:
check_is_fitted(rfecv_estimator)
except NotFittedError:
raise NotFittedError(
"RFECV instance used to initialize FeatureSelectionTransformer "
"must be fitted")
transformer = FeatureSelectionTransformer(
grid_scores=rfecv_estimator.grid_scores_,
n_features=rfecv_estimator.n_features_,
ranking=rfecv_estimator.ranking_,
support=rfecv_estimator.support_,
)
return transformer
def perform_efecv(estimator, x_data, y_data, **kwargs):
"""Perform exhaustive feature selection."""
x_data, y_data = check_X_y(
x_data, y_data, ensure_min_features=2, force_all_finite='allow-nan')
n_all_features = x_data.shape[1]
# Iterate over all possible feature combinations
supports = list(itertools.product([False, True], repeat=n_all_features))
supports.remove(tuple([False] * n_all_features))
logger.info(
"Testing all %i possible feature combinations for exhaustive feature "
"selection", len(supports))
grid_scores = []
for support in supports:
support = np.array(support)
features = np.arange(n_all_features)[support]
# Evaluate estimator on new subset of features
new_estimator = clone(estimator)
_update_transformers_param(new_estimator, support)
scores = cross_val_score_weighted(new_estimator, x_data[:, features],
y_data, **kwargs)
grid_scores.append(np.mean(scores))
logger.debug("Fitted estimator with %i features, CV score was %.5f",
support.sum(), np.mean(scores))
# Final parameters
grid_scores = np.array(grid_scores)
best_idx = np.argmax(grid_scores)
support = np.array(supports[best_idx])
features = np.arange(n_all_features)[support]
n_features = support.sum()
ranking = np.where(support, 1, 2)
transformer = FeatureSelectionTransformer(
grid_scores=grid_scores, n_features=n_features, ranking=ranking,
support=support)
# Get final estimator
best_estimator = clone(estimator)
_update_transformers_param(best_estimator, support)
best_estimator.fit(x_data[:, features], y_data,
**kwargs.get('fit_params', {}))
logger.info("Found optimal score %.5f for %i features",
grid_scores[best_idx], n_features)
return (best_estimator, transformer)
class AdvancedPipeline(Pipeline):
"""Expand :class:`sklearn.pipeline.Pipeline`."""
@property
def coef_(self):
"""numpy.ndarray: Model coefficients."""
return self.steps[-1][1].coef_
@property
def feature_importances_(self):
"""numpy.ndarray: Feature importances."""
return self.steps[-1][1].feature_importances_
def _check_final_step(self):
"""Check type of final step of pipeline."""
final_step = self.steps[-1][1]
if not isinstance(final_step, AdvancedTransformedTargetRegressor):
raise TypeError(
f"Expected estimator of type "
f"{AdvancedTransformedTargetRegressor} for final step of "
f"pipeline, got {final_step.__class__}")
def fit_target_transformer_only(self, y_data, **fit_kwargs):
"""Fit only ``transform`` step of of target regressor."""
self._check_final_step()
reg = self.steps[-1][1]
fit_params = _get_fit_parameters(fit_kwargs, self.steps,
self.__class__)
reg_fit_params = fit_params[self.steps[-1][0]]
reg.fit_transformer_only(y_data, **reg_fit_params)
def fit_transformers_only(self, x_data, y_data, **fit_kwargs):
"""Fit only ``transform`` steps of Pipeline."""
fit_params = _get_fit_parameters(fit_kwargs, self.steps,
self.__class__)
return self._fit(x_data, y_data, **fit_params)
def transform_only(self, x_data):
"""Only perform ``transform`` steps of Pipeline."""
for (_, transformer) in self.steps[:-1]:
x_data = transformer.transform(x_data)
return x_data
def transform_target_only(self, y_data):
"""Only perform ``transform`` steps of target regressor."""
self._check_final_step()
reg = self.steps[-1][1]
if not hasattr(reg, 'transformer_'):
raise NotFittedError(
"Transforming target not possible, final regressor is not "
"fitted yet, call fit() or fit_target_transformer_only() "
"first")
if y_data.ndim == 1:
y_data = y_data.reshape(-1, 1)
y_trans = reg.transformer_.transform(y_data)
if y_trans.ndim == 2 and y_trans.shape[1] == 1:
y_trans = y_trans.squeeze(axis=1)
return y_trans
class AdvancedRFE(RFE):
"""Expand :class:`sklearn.feature_selection.RFE`."""
def fit(self, x_data, y_data, **fit_kwargs):
"""Expand :meth:`fit` to accept kwargs."""
return self._fit(x_data, y_data, **fit_kwargs)
def _fit(self, x_data, y_data, step_score=None, **fit_kwargs):
"""Expand :meth:`_fit` to accept kwargs."""
# Parameter step_score controls the calculation of self.scores_
# step_score is not exposed to users
# and is used when implementing AdvancedRFECV
# self.scores_ will not be calculated when calling _fit through fit
x_data, y_data = check_X_y(x_data, y_data, "csc",
ensure_min_features=2,
force_all_finite=False)
# Initialization
n_features = x_data.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=bool)
ranking_ = np.ones(n_features, dtype=int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
_update_transformers_param(estimator, support_)
estimator.fit(x_data[:, features], y_data, **fit_kwargs)
# Get coefs (hasattr(estimator, 'coef_') raises a KeyError for
# XGBRegressor models
try:
coefs = estimator.coef_
except (AttributeError, KeyError):
coefs = getattr(estimator, 'feature_importances_', None)
if coefs is None:
raise RuntimeError("The classifier does not expose "
"'coef_' or 'feature_importances_' "
"attributes")
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# Transformer steps that reduce number of features is not supported
if len(ranks) != len(features):
raise NotImplementedError(
f"Estimators that contain transforming steps that reduce "
f"the number of features are not supported in "
f"{self.__class__}, got {len(features):d} features for ",
f"fit(), but only {len(ranks):d} elements for 'coefs_' / "
f"'feature_importances_' are provided. Estimator:\n"
f"{estimator}")
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
_update_transformers_param(self.estimator_, support_)
self.estimator_.fit(x_data[:, features], y_data, **fit_kwargs)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, x_data, **predict_kwargs):
"""Expand :meth:`predict()` to accept kwargs."""
check_is_fitted(self)
return self.estimator_.predict(self.transform(x_data),
**predict_kwargs)
class AdvancedRFECV(AdvancedRFE):
"""Expand :class:`sklearn.feature_selection.RFECV`."""
def __init__(self, estimator, step=1, min_features_to_select=1, cv=None,
scoring=None, verbose=0, n_jobs=None):
"""Original constructor of :class:`sklearn.feature_selection.RFECV`."""
self.estimator = estimator
self.step = step
self.min_features_to_select = min_features_to_select
self.cv = cv
self.scoring = scoring
self.verbose = verbose
self.n_jobs = n_jobs
def fit(self, x_data, y_data, groups=None, **fit_kwargs):
"""Expand :meth:`fit` to accept kwargs."""
x_data, y_data = check_X_y(
x_data, y_data, "csr", ensure_min_features=2,
force_all_finite=False)
# Initialization
cv = check_cv(self.cv, y_data,
classifier=is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = x_data.shape[1]
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
# Build an AdvancedRFE object, which will evaluate and score each
# possible feature count, down to self.min_features_to_select
rfe = AdvancedRFE(estimator=self.estimator,
n_features_to_select=self.min_features_to_select,
step=self.step, verbose=self.verbose)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter.
if effective_n_jobs(self.n_jobs) == 1:
(parallel, func) = (list, _rfe_single_fit)
else:
parallel = Parallel(n_jobs=self.n_jobs)
func = delayed(_rfe_single_fit)
scores = parallel(
func(rfe, self.estimator, x_data, y_data, train, test, scorer,
**fit_kwargs)
for train, test in cv.split(x_data, y_data, groups))
scores = np.sum(scores, axis=0)
scores_rev = scores[::-1]
argmax_idx = len(scores) - np.argmax(scores_rev) - 1
n_features_to_select = max(
n_features - (argmax_idx * step),
self.min_features_to_select)
# Re-execute an elimination with best_k over the whole set
rfe = AdvancedRFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose)
rfe.fit(x_data, y_data, **fit_kwargs)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
_update_transformers_param(self.estimator_, self.support_)
self.estimator_.fit(self.transform(x_data), y_data, **fit_kwargs)
# Fixing a normalization error, n is equal to
# get_n_splits(x_data, y_data) - 1 here, the scores are normalized by
# get_n_splits(x_data, y_data)
self.grid_scores_ = scores[::-1] / cv.get_n_splits(x_data, y_data,
groups)
return self
class AdvancedTransformedTargetRegressor(TransformedTargetRegressor):
"""Expand :class:`sklearn.compose.TransformedTargetRegressor`."""
@property
def coef_(self):
"""numpy.ndarray: Model coefficients."""
return self.regressor_.coef_
@property
def feature_importances_(self):
"""numpy.ndarray: Feature importances."""
return self.regressor_.feature_importances_
def fit(self, x_data, y_data, **fit_kwargs):
"""Expand :meth:`fit` to accept kwargs."""
(y_2d,
regressor_kwargs) = self.fit_transformer_only(y_data, **fit_kwargs)
# Transform y and convert back to 1d array if necessary
y_trans = self.transformer_.transform(y_2d)
if y_trans.ndim == 2 and y_trans.shape[1] == 1:
y_trans = y_trans.squeeze(axis=1)
# Perform linear regression if regressor is not given
if self.regressor is None:
self.regressor_ = LinearRegression()
else:
self.regressor_ = clone(self.regressor)
# Fit regressor with kwargs
self.regressor_.fit(x_data, y_trans, **regressor_kwargs)
return self
def fit_transformer_only(self, y_data, **fit_kwargs):
"""Fit only ``transformer`` step."""
y_data = check_array(y_data,
accept_sparse=False,
force_all_finite=True,
ensure_2d=False,
dtype='numeric')
self._training_dim = y_data.ndim
# Process kwargs
(_, regressor_kwargs) = self._get_fit_params(fit_kwargs)
# Transformers are designed to modify X which is 2D, modify y_data
# FIXME: Transformer does NOT use transformer_kwargs
if y_data.ndim == 1:
y_2d = y_data.reshape(-1, 1)
else:
y_2d = y_data
self._fit_transformer(y_2d)
return (y_2d, regressor_kwargs)
def predict(self, x_data, always_return_1d=True, **predict_kwargs):
"""Expand :meth:`predict()` to accept kwargs."""
check_is_fitted(self)
if not hasattr(self, 'regressor_'):
raise NotFittedError(
f"Regressor of {self.__class__} is not fitted yet, call fit() "
f"first")
# Kwargs for returning variance or covariance
if ('return_std' in predict_kwargs and 'return_std' in getfullargspec(
self.regressor_.predict).args):
raise NotImplementedError(
f"Using keyword argument 'return_std' for final regressor "
f"{self.regressor_.__class__} is not supported yet, only "
f"'return_var' is allowed. Expand the regressor to accept "
f"'return_var' instead (see 'esmvaltool/diag_scripts/mlr"
f"/models/gpr_sklearn.py' for an example)")
mlr.check_predict_kwargs(predict_kwargs)
return_var = predict_kwargs.get('return_var', False)
return_cov = predict_kwargs.get('return_cov', False)
# Prediction
prediction = self.regressor_.predict(x_data, **predict_kwargs)
if return_var or return_cov:
pred = prediction[0]
else:
pred = prediction
if pred.ndim == 1:
pred_trans = self.transformer_.inverse_transform(
pred.reshape(-1, 1))
else:
pred_trans = self.transformer_.inverse_transform(pred)
if self._to_be_squeezed(pred_trans, always_return_1d=always_return_1d):
pred_trans = pred_trans.squeeze(axis=1)
if not (return_var or return_cov):
return pred_trans
# Return scaled variance or covariance if desired
err = prediction[1]
if not hasattr(self.transformer_, 'scale_'):
raise NotImplementedError(
f"Transforming of additional prediction output (e.g. by "
f"'return_var' or 'return_cov') is not supported for "
f"transformer {self.transformer_.__class__} yet, the "
f"necessary attribute 'scale_' is missing")
scale = self.transformer_.scale_
if scale is not None:
err *= scale**2
if self._to_be_squeezed(err, always_return_1d=always_return_1d):
err = err.squeeze(axis=1)
return (pred_trans, err)
def _get_fit_params(self, fit_kwargs):
"""Separate ``transformer`` and ``regressor`` kwargs."""
steps = [
('transformer', self.transformer),
('regressor', self.regressor),
]
fit_params = _get_fit_parameters(fit_kwargs, steps, self.__class__)
fit_params.setdefault('transformer', {})
fit_params.setdefault('regressor', {})
# FIXME
if fit_params['transformer']:
raise NotImplementedError(
f"Fit parameters {fit_params['transformer']} for transformer "
f"{self.transformer.__class__} of {self.__class__} are not "
f"supported at the moment")
return (fit_params['transformer'], fit_params['regressor'])
def _fit_transformer(self, y_data):
"""Check transformer and fit transformer."""
if (self.transformer is not None and
(self.func is not None or self.inverse_func is not None)):
raise ValueError("'transformer' and functions 'func'/"
"'inverse_func' cannot both be set.")
if self.transformer is not None:
self.transformer_ = clone(self.transformer)
else:
if self.func is not None and self.inverse_func is None:
raise ValueError(
"When 'func' is provided, 'inverse_func' must also be "
"provided")
self.transformer_ = FunctionTransformer(
func=self.func, inverse_func=self.inverse_func, validate=True,
check_inverse=self.check_inverse)
self.transformer_.fit(y_data)
if self.check_inverse:
idx_selected = slice(None, None, max(1, y_data.shape[0] // 10))
y_sel = _safe_indexing(y_data, idx_selected)
y_sel_t = self.transformer_.transform(y_sel)
if not np.allclose(y_sel,
self.transformer_.inverse_transform(y_sel_t)):
warnings.warn("The provided functions or transformer are "
"not strictly inverse of each other. If "
"you are sure you want to proceed regardless, "
"set 'check_inverse=False'", UserWarning)
def _to_be_squeezed(self, array, always_return_1d=True):
"""Check if ``array`` should be squeezed or not."""
squeeze = array.ndim == 2 and array.shape[1] == 1
if not always_return_1d:
squeeze = squeeze and self._training_dim == 1
return squeeze
class FeatureSelectionTransformer(BaseEstimator, SelectorMixin):
"""Transformer step of a feature selection estimator."""
def __init__(self, grid_scores, n_features, ranking, support):
"""Initialize feature selection transformer."""
self.grid_scores = grid_scores
self.n_features = n_features
self.ranking = ranking
self.support = support
def fit(self, *_, **__):
"""Empty method."""
return self
def _get_support_mask(self):
"""Get support mask."""
return self.support
def _more_tags(self):
"""Additional estimator tags."""
more_tags = deepcopy(_DEFAULT_TAGS)
more_tags['allow_nan'] = True
return more_tags
|
import argparse, os
import pandas as pd
def main():
parser = argparse.ArgumentParser(
description="Get evaluation result for sentiment analysis task",
)
parser.add_argument(
"--data",
type=str,
required=True,
default="manifest/slue-voxceleb",
help="Root directory containing voxceleb1_slue data files,"
"This dir should contain audio/ voxceleb1_slue_{finetune,dev,test} folders ",
)
parser.add_argument(
"--pred-data",
type=str,
required=True,
default="datasets/slue-voxceleb/preds/vc1/w2v2-large-lv60k-ft-slue-vc1-12h-lr1e-5-s1-mt800000-8gpu-update280000",
help="Root directory containing voxceleb1_slue data files,"
"This dir should contain audio/ voxceleb1_slue_{finetune,dev,test} folders ",
)
args, _ = parser.parse_known_args()
for subset in ["dev", "test"]:
pred_csv = os.path.join(args.pred_data, f"{subset}.asr-pred.tsv")
data = pd.read_csv(pred_csv, delimiter="\t")
manifest_tsv = os.path.join(args.data, subset) + ".tsv"
output_tsv = os.path.join(args.data, subset) + ".pred.wrd"
try:
fid = open(output_tsv, "w")
for line in open(manifest_tsv).readlines()[1:]:
fileid, _ = line.strip().split("\t")
fileid = (
f"{fileid.split(".flac")[0]}-1.flac" # temp. need to delete future
)
fid.write(f"{list(data.pred_text[data.filename==fileid])[0]}\n")
fid.close()
print(f"Successfully generated file at {output_tsv}")
except:
print(f"something wrong when generating {output_tsv}")
return
if __name__ == "__main__":
main()
| import argparse, os
import pandas as pd
def main():
parser = argparse.ArgumentParser(
description="Get evaluation result for sentiment analysis task",
)
parser.add_argument(
"--data",
type=str,
required=True,
default="manifest/slue-voxceleb",
help="Root directory containing voxceleb1_slue data files,"
"This dir should contain audio/ voxceleb1_slue_{finetune,dev,test} folders ",
)
parser.add_argument(
"--pred-data",
type=str,
required=True,
default="datasets/slue-voxceleb/preds/vc1/w2v2-large-lv60k-ft-slue-vc1-12h-lr1e-5-s1-mt800000-8gpu-update280000",
help="Root directory containing voxceleb1_slue data files,"
"This dir should contain audio/ voxceleb1_slue_{finetune,dev,test} folders ",
)
args, _ = parser.parse_known_args()
for subset in ["dev", "test"]:
pred_csv = os.path.join(args.pred_data, f"{subset}.asr-pred.tsv")
data = pd.read_csv(pred_csv, delimiter="\t")
manifest_tsv = os.path.join(args.data, subset) + ".tsv"
output_tsv = os.path.join(args.data, subset) + ".pred.wrd"
try:
fid = open(output_tsv, "w")
for line in open(manifest_tsv).readlines()[1:]:
fileid, _ = line.strip().split("\t")
fileid = (
f"{fileid.split('.flac')[0]}-1.flac" # temp. need to delete future
)
fid.write(f"{list(data.pred_text[data.filename==fileid])[0]}\n")
fid.close()
print(f"Successfully generated file at {output_tsv}")
except:
print(f"something wrong when generating {output_tsv}")
return
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# run.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 1.0.0
import os
import sys
import cv2
import click
import traceback
from app import create_silhouette, SUBTRACTION_METHODS
# show version
def print_version(ctx, param, value): # pylint: disable=W0613
if not value or ctx.resilient_parsing:
return
click.echo('Version 1.0.0')
ctx.exit()
@click.command()
@click.argument('video', type=click.Path(exists=True))
@click.option('--frame-difference', default=15,
help='Positive/Negative integer value to specify the difference of frames between moving objects.')
@click.option('--method', type=click.Choice(SUBTRACTION_METHODS), default="mog",
help='The method that will be used in removing background.')
@click.option('--multithreaded', default=False, is_flag=True,
help="""
Enables multithreading to improve processing and rendering performance. This is dependent on how much logical CPUs you have on your computer.
""")
@click.option('--show-video/--no-show-video', default=True,
help='Shows video in a window.')
@click.option('--save-to-file', type=click.Path(writable=True), help='Path where the output file should be saved.')
@click.option('--disable-silhouette', default=False, is_flag=True, help="Disable silhouette generation.")
@click.option('--bg-learning-rate', default=None, type=click.FLOAT,
help="Background Subtraction Algorithm learning rate.")
@click.option('--bg-history', default=None, type=click.INT, help="Background Subtraction Algorithm length of history.")
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help='Show the version of the program.')
def main(video, frame_difference, method, multithreaded, show_video, save_to_file,
disable_silhouette, bg_learning_rate, bg_history): # pylint: disable=R0915
try:
# read the video twice
normal_video = cv2.VideoCapture(video)
adjusted_video = None
# video again if the silhouette generation is enabled
if disable_silhouette is False:
adjusted_video = cv2.VideoCapture(video)
video_writer = None
if save_to_file is not None and isinstance(save_to_file, str):
splitted = os.path.splitext(save_to_file)
formats_available = ['.mp4', '.avi']
# check if the format is supported
if splitted[1] not in formats_available:
raise ValueError(f"Unsupported format. Supported formats are{", ".join(formats_available)}.")
else:
# get the width and height of the video
width = normal_video.get(cv2.CAP_PROP_FRAME_WIDTH)
height = normal_video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# set default fourcc for mp4
fourcc = 0x21
if splitted[1] == '.avi':
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# create the video writer
video_writer = cv2.VideoWriter(save_to_file, fourcc, 20.0, (int(width), int(height)), False)
# disable multithreading if the number of logical CPUs is less than 2
num_threads = cv2.getNumberOfCPUs()
if num_threads < 2 and multithreaded is True:
multithreaded = False
thread_message = "Cannot run in multithreaded mode. Reverting to single-threaded mode since the "
thread_message += "number of logical CPUs is less than 2."
click.echo(thread_message)
# show notice about performance when running single-threaded mode on methods other than absdiff
if multithreaded is False and method != 'absdiff' and num_threads >= 2:
thread_message = f"Running on single-threaded mode using \"{method}\". If you are experiencing some jank/lag, "
thread_message += "re-run the program with --multithreaded flag present."
click.echo(thread_message)
intro_message = "To quit: press 'ctrl + c' when focused on the command line."
intro_message += " When focused on the video window press, 'q'."
# show message on how to quit
click.echo(intro_message)
# create silhouette of the video
result = create_silhouette(normal_video, adjusted_video,
frame_difference=frame_difference,
method=method,
multithreaded=multithreaded,
debug=show_video,
video_writer=video_writer,
no_silhouette=disable_silhouette,
bg_segm_history=bg_history,
bg_segm_lr=bg_learning_rate)
# show message to the user
if video_writer is not None and result is True:
click.echo(f"File has been saved to: {save_to_file}")
# delete the output file
if video_writer is not None and result is False:
os.remove(save_to_file)
except KeyboardInterrupt:
click.echo("Shutdown requested. Cleaning up resources.")
# delete the output file
if video_writer is not None:
os.remove(save_to_file)
except (Exception, ValueError): # pylint: disable=W0703
traceback.print_exc(file=sys.stdout)
# delete the output file
if video_writer is not None:
os.remove(save_to_file)
# release the resources used
normal_video.release()
if adjusted_video is not None:
adjusted_video.release()
# release the output file writer
if video_writer is not None:
video_writer.release()
# remove existing windows not yet closed
cv2.destroyAllWindows()
# execute the main function
if __name__ == "__main__":
main() # pylint: disable=E1120
| #!/usr/bin/env python
# run.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 1.0.0
import os
import sys
import cv2
import click
import traceback
from app import create_silhouette, SUBTRACTION_METHODS
# show version
def print_version(ctx, param, value): # pylint: disable=W0613
if not value or ctx.resilient_parsing:
return
click.echo('Version 1.0.0')
ctx.exit()
@click.command()
@click.argument('video', type=click.Path(exists=True))
@click.option('--frame-difference', default=15,
help='Positive/Negative integer value to specify the difference of frames between moving objects.')
@click.option('--method', type=click.Choice(SUBTRACTION_METHODS), default="mog",
help='The method that will be used in removing background.')
@click.option('--multithreaded', default=False, is_flag=True,
help="""
Enables multithreading to improve processing and rendering performance. This is dependent on how much logical CPUs you have on your computer.
""")
@click.option('--show-video/--no-show-video', default=True,
help='Shows video in a window.')
@click.option('--save-to-file', type=click.Path(writable=True), help='Path where the output file should be saved.')
@click.option('--disable-silhouette', default=False, is_flag=True, help="Disable silhouette generation.")
@click.option('--bg-learning-rate', default=None, type=click.FLOAT,
help="Background Subtraction Algorithm learning rate.")
@click.option('--bg-history', default=None, type=click.INT, help="Background Subtraction Algorithm length of history.")
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help='Show the version of the program.')
def main(video, frame_difference, method, multithreaded, show_video, save_to_file,
disable_silhouette, bg_learning_rate, bg_history): # pylint: disable=R0915
try:
# read the video twice
normal_video = cv2.VideoCapture(video)
adjusted_video = None
# video again if the silhouette generation is enabled
if disable_silhouette is False:
adjusted_video = cv2.VideoCapture(video)
video_writer = None
if save_to_file is not None and isinstance(save_to_file, str):
splitted = os.path.splitext(save_to_file)
formats_available = ['.mp4', '.avi']
# check if the format is supported
if splitted[1] not in formats_available:
raise ValueError(f"Unsupported format. Supported formats are{', '.join(formats_available)}.")
else:
# get the width and height of the video
width = normal_video.get(cv2.CAP_PROP_FRAME_WIDTH)
height = normal_video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# set default fourcc for mp4
fourcc = 0x21
if splitted[1] == '.avi':
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# create the video writer
video_writer = cv2.VideoWriter(save_to_file, fourcc, 20.0, (int(width), int(height)), False)
# disable multithreading if the number of logical CPUs is less than 2
num_threads = cv2.getNumberOfCPUs()
if num_threads < 2 and multithreaded is True:
multithreaded = False
thread_message = "Cannot run in multithreaded mode. Reverting to single-threaded mode since the "
thread_message += "number of logical CPUs is less than 2."
click.echo(thread_message)
# show notice about performance when running single-threaded mode on methods other than absdiff
if multithreaded is False and method != 'absdiff' and num_threads >= 2:
thread_message = f"Running on single-threaded mode using \"{method}\". If you are experiencing some jank/lag, "
thread_message += "re-run the program with --multithreaded flag present."
click.echo(thread_message)
intro_message = "To quit: press 'ctrl + c' when focused on the command line."
intro_message += " When focused on the video window press, 'q'."
# show message on how to quit
click.echo(intro_message)
# create silhouette of the video
result = create_silhouette(normal_video, adjusted_video,
frame_difference=frame_difference,
method=method,
multithreaded=multithreaded,
debug=show_video,
video_writer=video_writer,
no_silhouette=disable_silhouette,
bg_segm_history=bg_history,
bg_segm_lr=bg_learning_rate)
# show message to the user
if video_writer is not None and result is True:
click.echo(f"File has been saved to: {save_to_file}")
# delete the output file
if video_writer is not None and result is False:
os.remove(save_to_file)
except KeyboardInterrupt:
click.echo("Shutdown requested. Cleaning up resources.")
# delete the output file
if video_writer is not None:
os.remove(save_to_file)
except (Exception, ValueError): # pylint: disable=W0703
traceback.print_exc(file=sys.stdout)
# delete the output file
if video_writer is not None:
os.remove(save_to_file)
# release the resources used
normal_video.release()
if adjusted_video is not None:
adjusted_video.release()
# release the output file writer
if video_writer is not None:
video_writer.release()
# remove existing windows not yet closed
cv2.destroyAllWindows()
# execute the main function
if __name__ == "__main__":
main() # pylint: disable=E1120
|
import json
import os
import uuid
from fastapi import Depends, FastAPI, Request, Response
from ..db.session_table import delete_session, get_session, save_session
from .encryption import get_fernet
from .login import current_user, router as login_router, User
from .metadata import router as metadata_router
from .SessionMiddleware import SessionMiddleware
app = FastAPI(
description="Web service for interaction with operational metadata records",
root_path="/ws/secure",
title="Geomagnetism Metadata Web Service",
)
# NOTE: database used for sessions is started by ..app.app,
# which mounts this application at /ws/secure
app.add_middleware(
middleware_class=SessionMiddleware,
delete_session_callback=delete_session,
get_session_callback=get_session,
save_session_callback=save_session,
encryption=get_fernet(
os.getenv("SECRET_KEY", uuid.uuid4().hex),
os.getenv("SECRET_SALT", "secret_salt"),
),
path="/ws/secure",
session_cookie="PHPSESSID",
)
# include login routes to manage user
app.include_router(login_router)
app.include_router(metadata_router)
@app.get("/")
async def index(request: Request, user: User = Depends(current_user)):
"""Route to demo user login."""
if user:
link = f"""
Logged in as {user.email}<br/>
<a href="{request.url_for("logout")}">Logout</a>
"""
else:
link = f'<a href="{request.url_for('login')}">Login</a>'
return Response(
f"""<!doctype html>
<html>
<body>
{link}
<pre>{json.dumps(request.session, indent=2)}</pre>
</body>
</html>""",
media_type="text/html",
headers={"Cache-control": "no-cache"},
)
| import json
import os
import uuid
from fastapi import Depends, FastAPI, Request, Response
from ..db.session_table import delete_session, get_session, save_session
from .encryption import get_fernet
from .login import current_user, router as login_router, User
from .metadata import router as metadata_router
from .SessionMiddleware import SessionMiddleware
app = FastAPI(
description="Web service for interaction with operational metadata records",
root_path="/ws/secure",
title="Geomagnetism Metadata Web Service",
)
# NOTE: database used for sessions is started by ..app.app,
# which mounts this application at /ws/secure
app.add_middleware(
middleware_class=SessionMiddleware,
delete_session_callback=delete_session,
get_session_callback=get_session,
save_session_callback=save_session,
encryption=get_fernet(
os.getenv("SECRET_KEY", uuid.uuid4().hex),
os.getenv("SECRET_SALT", "secret_salt"),
),
path="/ws/secure",
session_cookie="PHPSESSID",
)
# include login routes to manage user
app.include_router(login_router)
app.include_router(metadata_router)
@app.get("/")
async def index(request: Request, user: User = Depends(current_user)):
"""Route to demo user login."""
if user:
link = f"""
Logged in as {user.email}<br/>
<a href="{request.url_for("logout")}">Logout</a>
"""
else:
link = f'<a href="{request.url_for("login")}">Login</a>'
return Response(
f"""<!doctype html>
<html>
<body>
{link}
<pre>{json.dumps(request.session, indent=2)}</pre>
</body>
</html>""",
media_type="text/html",
headers={"Cache-control": "no-cache"},
)
|
import argparse
import collections
import itertools
import json
import os
import re
import subprocess as sp
import sys
DEFAULT_TIME_LIMIT = 2
TEST_RESULT_TYPE = {
'pass': 'OK ',
'file_not_found': " ? ",
'compile_error': 'CE ',
'wrong_answer': 'WA ',
'runtime_error': 'RE ',
'time_limit_exceed': 'TLE',
'memory_limit_exceed': 'MLE',
}
MEMORY_LIMIT = 512
COMPILE_OUT = 'a.out'
RUN_OUT = 'b.out'
ERROR_OUT = 'e.out'
CPP_COMPILE_CMD = ['g++', '-O2', '-static', '-o', COMPILE_OUT, '']
RUN_CMD = {
'cpp': ['/usr/bin/time', '--verbose', './' + COMPILE_OUT],
'py': ['/usr/bin/time', '--verbose', 'python3', '', '<', '']
}
COMPARE_CMD = ['diff', '-Z', RUN_OUT, '']
TEST_HARNESS_CMD = ['python3', '', '', RUN_OUT, '']
FAIL_SCRIPT = 'Test Failed.\n Error Code: {}\n'
class _TestEntry:
__slots__ = {'setid', 'caseid', 'fin', 'fout', 'rtype', 'time', 'memory'}
def __init__(self, setid, caseid):
self.setid = setid
self.caseid = caseid
self.fin = self.fout = self.rtype = self.time = self.memory = None
def __lt__(self, other):
if self.setid != other.setid:
return self.setid < other.setid
return self.caseid < other.caseid
def __repr__(self):
rep = {
'setid': self.setid,
'caseid': self.caseid,
'fin': self.fin,
'fout': self.fout,
'rtype': self.rtype,
'time': self.time,
'memory': self.memory
}
return json.dumps(rep, separators=(',', ':'), indent=2)
def find_test_cases(folder):
files = os.listdir(folder)
entry_dict = {}
for f in files:
name = f
if f.endswith('.in'):
out = False
f = f[:-3]
elif f.endswith('.out'):
out = True
f = f[:-4]
else:
continue
if f in entry_dict:
if not entry_dict[f].fout:
entry_dict[f].fout = os.path.join(folder, name)
else:
entry_dict[f].fin = os.path.join(folder, name)
else:
i = f.find('.')
f = f[i+1:]
if '.' in f:
i = f.find('.')
setid, caseid = f[:i], f[i+1:]
elif '-' in f:
i = f.find('-')
setid, caseid = f[:i], f[i+1:]
elif f.startswith('sample'):
setid, caseid = 'sample', f[6:]
if not caseid:
caseid = 1
elif f.startswith('samp'):
setid, caseid = 'sample', f[4:]
if not caseid:
caseid = 1
elif re.match(r'(\d+)([a-z])', f):
setid, caseid = f[:-1], ord(f[-1]) - ord('a') + 1
else:
setid, caseid = f, 1
setid = int(setid) if setid != 'sample' else 0
caseid = int(caseid) if caseid != 'sample' else 1
entry = _TestEntry(setid, caseid)
if out:
entry.fout = os.path.join(folder, name)
else:
entry.fin = os.path.join(folder, name)
entry_dict[name[:name.rfind('.')]] = entry
entries = sorted(list(entry_dict.values()))
group, setid = [], ''
for entry in entries:
if entry.setid != setid:
group.append({
'setid': entry.setid,
'cases': []
})
setid = entry.setid
group[-1]['cases'].append(entry)
return group
def compile(src):
if src.endswith('.py'):
RUN_CMD['py'][-3] = src
return True, []
CPP_COMPILE_CMD[-1] = src
proc = sp.Popen(CPP_COMPILE_CMD, stderr=sp.PIPE)
err = [line.decode('utf-8').strip() for line in proc.stderr]
success = os.path.isfile(COMPILE_OUT)
return success, err
def run(case_group, lang, limit):
total = passed = 0
cmd = RUN_CMD[lang]
for group in case_group:
setid = 'sample' if group['setid'] == 0 else group['setid']
print(f'Test Set {setid}')
success = True
for case in group['cases']:
if args.early_stop and not success:
print(f'Case #{case.caseid:02d}: -- | Runtime: --, Memory: --')
continue
stats = {}
proc = None
with open(case.fin, 'r') as fin, open(RUN_OUT, 'w') as fout, open(ERROR_OUT, 'w') as ferr:
try:
if lang == 'py':
cmd[-1] = case.fin
proc = sp.run(cmd, stdin=fin, stdout=fout, stderr=ferr, timeout=limit+1)
except sp.TimeoutExpired:
stats['time'] = limit+1
stats['memory'] = -1
with open(ERROR_OUT, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('User time'):
stats['time'] = float(line[line.rfind(':')+1:])
elif line.startswith('Maximum resident set size'):
stats['memory'] = float(line[line.rfind(':')+1:]) / 1000
case.time = stats['time']
case.memory = stats['memory']
if proc is not None and proc.returncode != 0:
if proc.stderr:
print(proc.stderr.decode('utf-8'))
if proc.stdout:
print(proc.stderr.decode('utf-8'))
case.rtype = TEST_RESULT_TYPE['runtime_error']
elif stats['time'] > limit:
case.rtype = TEST_RESULT_TYPE['time_limit_exceed']
elif stats['memory'] > MEMORY_LIMIT:
case.rtype = TEST_RESULT_TYPE['memory_limit_exceed']
else:
ok = test(case)
case.rtype = TEST_RESULT_TYPE['pass'] if ok else TEST_RESULT_TYPE['wrong_answer']
ok = case.rtype == TEST_RESULT_TYPE['pass']
success &= ok
if case.rtype == TEST_RESULT_TYPE['time_limit_exceed']:
time = f'{limit:.2f}+'
memory = ' -- '
else:
time = f'{case.time:.2f}s'
memory = f'{case.memory:.3f}'
print(f'Case #{case.caseid:02d}: {case.rtype} | Runtime: {time}, Memory: {memory}MB')
print()
group['status'] = 'Pass' if success else 'Fail'
if setid != 'sample':
total += 1
if success:
passed += 1
print(f'Total {passed}/{total} test sets passed.')
def test(case):
fout = case.fout
if args.tol != -1:
return _test_num_close(RUN_OUT, fout)
elif args.harness:
return _test_harness(args.harness, case.fin, RUN_OUT, fout)
else:
return _test_diff(fout)
def _test_diff(sol):
COMPARE_CMD[-1] = sol
proc = sp.run(COMPARE_CMD, stdout=sp.PIPE, stderr=sp.PIPE)
if proc.stdout:
print(proc.stdout.decode('utf-8'))
if proc.stderr:
print(proc.stderr.decode('utf-8'))
return not proc.stderr and not proc.stdout
def _test_num_close(ans, sol):
with open(ans, 'r') as fans, open(sol, 'r') as fsol:
for la, ls in itertools.zip_longest(iter(fans), iter(fsol), fillvalue=''):
if not la or not ls:
return False
try:
la, ls = float(la), float(ls)
if abs(la - ls) > args.tol:
return False
except Exception:
return False
return True
def _test_harness(test_file, input_file, ans, sol):
TEST_HARNESS_CMD[1] = test_file
TEST_HARNESS_CMD[2] = input_file
TEST_HARNESS_CMD[-1] = sol
proc = sp.run(TEST_HARNESS_CMD, stdout=sp.PIPE, stderr=sp.PIPE)
if proc.stdout:
print(proc.stdout.decode('utf-8'))
if proc.stderr:
print(proc.stderr.decode('utf-8'))
return not proc.stderr and not proc.stdout
def print_result(case_group):
# not used
total = ok = 0
for group in case_group:
setid = 'Sample' if group['setid'] == 0 else group['setid']
print(f'Test Set {setid}: {group['status']}')
if setid != 'Sample':
total += 1
if (group['status'] == 'Pass'):
ok += 1
for case in group['cases']:
print(f'Case #{case.caseid}: {case.rtype} | Runtime: {case.time:.2f}s, Memory: {case.memory:.3f}MB')
print()
print(f'Total {ok}/{total} test sets passed.')
def cleanup(compile_file=True):
if compile_file and os.path.exists(COMPILE_OUT):
os.remove(COMPILE_OUT)
if os.path.exists(RUN_OUT):
os.remove(RUN_OUT)
if os.path.exists(ERROR_OUT):
os.remove(ERROR_OUT)
def check(src, test_folder, lang, limit):
# cleanup old files
cleanup()
# file src file
if not os.path.isfile(src):
print(f"File {os.path.abspath(src)} does not exist.")
# list test cases
if not os.path.isdir(test_folder):
print(f"Path {os.path.abspath(test_folder)} does not exist or is not a folder.")
case_group = find_test_cases(test_folder)
# compile src if needed
success, error = compile(src)
if error:
for line in error:
print(line)
if not success:
print(FAIL_SCRIPT.format(TEST_RESULT_TYPE['compile_error']))
sys.exit(1)
# run src on test cases
run(case_group, lang, limit)
# cleanup, keep compile files in case manual debugging is needed
cleanup(args.cleanup)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--src', '-s', type=str, help='source program file', required=True)
parser.add_argument('--data', '-d', type=str, help='test folder', required=True)
parser.add_argument('--lang', '-l', type=str, help='language used. Now support cpp and py', required=True)
parser.add_argument('--tol', '-t', type=float, help='Threshold to numeric answers.', required=False, default=-1)
parser.add_argument('--harness', '-H', type=str, help='program to test correctness', required=False)
parser.add_argument('--timeout', '-T', type=float, help='timeout limit on each test case.', required=False, default=2)
parser.add_argument('--cleanup', '-c', type=str, help='delete compiled file after finishing the testing', required=False, default='')
parser.add_argument('--early-stop', '-e', type=str, help='stop current test set when one case if failed', default='')
args = parser.parse_args()
if args.lang not in {'cpp', 'py'}:
print('Language only support cpp or py now.')
sys.exit(1)
if args.tol != -1 and args.harness:
print('tol and harness cannot be set together.')
sys.exit(1)
check(args.src, args.data, args.lang, args.timeout)
| import argparse
import collections
import itertools
import json
import os
import re
import subprocess as sp
import sys
DEFAULT_TIME_LIMIT = 2
TEST_RESULT_TYPE = {
'pass': 'OK ',
'file_not_found': " ? ",
'compile_error': 'CE ',
'wrong_answer': 'WA ',
'runtime_error': 'RE ',
'time_limit_exceed': 'TLE',
'memory_limit_exceed': 'MLE',
}
MEMORY_LIMIT = 512
COMPILE_OUT = 'a.out'
RUN_OUT = 'b.out'
ERROR_OUT = 'e.out'
CPP_COMPILE_CMD = ['g++', '-O2', '-static', '-o', COMPILE_OUT, '']
RUN_CMD = {
'cpp': ['/usr/bin/time', '--verbose', './' + COMPILE_OUT],
'py': ['/usr/bin/time', '--verbose', 'python3', '', '<', '']
}
COMPARE_CMD = ['diff', '-Z', RUN_OUT, '']
TEST_HARNESS_CMD = ['python3', '', '', RUN_OUT, '']
FAIL_SCRIPT = 'Test Failed.\n Error Code: {}\n'
class _TestEntry:
__slots__ = {'setid', 'caseid', 'fin', 'fout', 'rtype', 'time', 'memory'}
def __init__(self, setid, caseid):
self.setid = setid
self.caseid = caseid
self.fin = self.fout = self.rtype = self.time = self.memory = None
def __lt__(self, other):
if self.setid != other.setid:
return self.setid < other.setid
return self.caseid < other.caseid
def __repr__(self):
rep = {
'setid': self.setid,
'caseid': self.caseid,
'fin': self.fin,
'fout': self.fout,
'rtype': self.rtype,
'time': self.time,
'memory': self.memory
}
return json.dumps(rep, separators=(',', ':'), indent=2)
def find_test_cases(folder):
files = os.listdir(folder)
entry_dict = {}
for f in files:
name = f
if f.endswith('.in'):
out = False
f = f[:-3]
elif f.endswith('.out'):
out = True
f = f[:-4]
else:
continue
if f in entry_dict:
if not entry_dict[f].fout:
entry_dict[f].fout = os.path.join(folder, name)
else:
entry_dict[f].fin = os.path.join(folder, name)
else:
i = f.find('.')
f = f[i+1:]
if '.' in f:
i = f.find('.')
setid, caseid = f[:i], f[i+1:]
elif '-' in f:
i = f.find('-')
setid, caseid = f[:i], f[i+1:]
elif f.startswith('sample'):
setid, caseid = 'sample', f[6:]
if not caseid:
caseid = 1
elif f.startswith('samp'):
setid, caseid = 'sample', f[4:]
if not caseid:
caseid = 1
elif re.match(r'(\d+)([a-z])', f):
setid, caseid = f[:-1], ord(f[-1]) - ord('a') + 1
else:
setid, caseid = f, 1
setid = int(setid) if setid != 'sample' else 0
caseid = int(caseid) if caseid != 'sample' else 1
entry = _TestEntry(setid, caseid)
if out:
entry.fout = os.path.join(folder, name)
else:
entry.fin = os.path.join(folder, name)
entry_dict[name[:name.rfind('.')]] = entry
entries = sorted(list(entry_dict.values()))
group, setid = [], ''
for entry in entries:
if entry.setid != setid:
group.append({
'setid': entry.setid,
'cases': []
})
setid = entry.setid
group[-1]['cases'].append(entry)
return group
def compile(src):
if src.endswith('.py'):
RUN_CMD['py'][-3] = src
return True, []
CPP_COMPILE_CMD[-1] = src
proc = sp.Popen(CPP_COMPILE_CMD, stderr=sp.PIPE)
err = [line.decode('utf-8').strip() for line in proc.stderr]
success = os.path.isfile(COMPILE_OUT)
return success, err
def run(case_group, lang, limit):
total = passed = 0
cmd = RUN_CMD[lang]
for group in case_group:
setid = 'sample' if group['setid'] == 0 else group['setid']
print(f'Test Set {setid}')
success = True
for case in group['cases']:
if args.early_stop and not success:
print(f'Case #{case.caseid:02d}: -- | Runtime: --, Memory: --')
continue
stats = {}
proc = None
with open(case.fin, 'r') as fin, open(RUN_OUT, 'w') as fout, open(ERROR_OUT, 'w') as ferr:
try:
if lang == 'py':
cmd[-1] = case.fin
proc = sp.run(cmd, stdin=fin, stdout=fout, stderr=ferr, timeout=limit+1)
except sp.TimeoutExpired:
stats['time'] = limit+1
stats['memory'] = -1
with open(ERROR_OUT, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('User time'):
stats['time'] = float(line[line.rfind(':')+1:])
elif line.startswith('Maximum resident set size'):
stats['memory'] = float(line[line.rfind(':')+1:]) / 1000
case.time = stats['time']
case.memory = stats['memory']
if proc is not None and proc.returncode != 0:
if proc.stderr:
print(proc.stderr.decode('utf-8'))
if proc.stdout:
print(proc.stderr.decode('utf-8'))
case.rtype = TEST_RESULT_TYPE['runtime_error']
elif stats['time'] > limit:
case.rtype = TEST_RESULT_TYPE['time_limit_exceed']
elif stats['memory'] > MEMORY_LIMIT:
case.rtype = TEST_RESULT_TYPE['memory_limit_exceed']
else:
ok = test(case)
case.rtype = TEST_RESULT_TYPE['pass'] if ok else TEST_RESULT_TYPE['wrong_answer']
ok = case.rtype == TEST_RESULT_TYPE['pass']
success &= ok
if case.rtype == TEST_RESULT_TYPE['time_limit_exceed']:
time = f'{limit:.2f}+'
memory = ' -- '
else:
time = f'{case.time:.2f}s'
memory = f'{case.memory:.3f}'
print(f'Case #{case.caseid:02d}: {case.rtype} | Runtime: {time}, Memory: {memory}MB')
print()
group['status'] = 'Pass' if success else 'Fail'
if setid != 'sample':
total += 1
if success:
passed += 1
print(f'Total {passed}/{total} test sets passed.')
def test(case):
fout = case.fout
if args.tol != -1:
return _test_num_close(RUN_OUT, fout)
elif args.harness:
return _test_harness(args.harness, case.fin, RUN_OUT, fout)
else:
return _test_diff(fout)
def _test_diff(sol):
COMPARE_CMD[-1] = sol
proc = sp.run(COMPARE_CMD, stdout=sp.PIPE, stderr=sp.PIPE)
if proc.stdout:
print(proc.stdout.decode('utf-8'))
if proc.stderr:
print(proc.stderr.decode('utf-8'))
return not proc.stderr and not proc.stdout
def _test_num_close(ans, sol):
with open(ans, 'r') as fans, open(sol, 'r') as fsol:
for la, ls in itertools.zip_longest(iter(fans), iter(fsol), fillvalue=''):
if not la or not ls:
return False
try:
la, ls = float(la), float(ls)
if abs(la - ls) > args.tol:
return False
except Exception:
return False
return True
def _test_harness(test_file, input_file, ans, sol):
TEST_HARNESS_CMD[1] = test_file
TEST_HARNESS_CMD[2] = input_file
TEST_HARNESS_CMD[-1] = sol
proc = sp.run(TEST_HARNESS_CMD, stdout=sp.PIPE, stderr=sp.PIPE)
if proc.stdout:
print(proc.stdout.decode('utf-8'))
if proc.stderr:
print(proc.stderr.decode('utf-8'))
return not proc.stderr and not proc.stdout
def print_result(case_group):
# not used
total = ok = 0
for group in case_group:
setid = 'Sample' if group['setid'] == 0 else group['setid']
print(f'Test Set {setid}: {group["status"]}')
if setid != 'Sample':
total += 1
if (group['status'] == 'Pass'):
ok += 1
for case in group['cases']:
print(f'Case #{case.caseid}: {case.rtype} | Runtime: {case.time:.2f}s, Memory: {case.memory:.3f}MB')
print()
print(f'Total {ok}/{total} test sets passed.')
def cleanup(compile_file=True):
if compile_file and os.path.exists(COMPILE_OUT):
os.remove(COMPILE_OUT)
if os.path.exists(RUN_OUT):
os.remove(RUN_OUT)
if os.path.exists(ERROR_OUT):
os.remove(ERROR_OUT)
def check(src, test_folder, lang, limit):
# cleanup old files
cleanup()
# file src file
if not os.path.isfile(src):
print(f"File {os.path.abspath(src)} does not exist.")
# list test cases
if not os.path.isdir(test_folder):
print(f"Path {os.path.abspath(test_folder)} does not exist or is not a folder.")
case_group = find_test_cases(test_folder)
# compile src if needed
success, error = compile(src)
if error:
for line in error:
print(line)
if not success:
print(FAIL_SCRIPT.format(TEST_RESULT_TYPE['compile_error']))
sys.exit(1)
# run src on test cases
run(case_group, lang, limit)
# cleanup, keep compile files in case manual debugging is needed
cleanup(args.cleanup)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--src', '-s', type=str, help='source program file', required=True)
parser.add_argument('--data', '-d', type=str, help='test folder', required=True)
parser.add_argument('--lang', '-l', type=str, help='language used. Now support cpp and py', required=True)
parser.add_argument('--tol', '-t', type=float, help='Threshold to numeric answers.', required=False, default=-1)
parser.add_argument('--harness', '-H', type=str, help='program to test correctness', required=False)
parser.add_argument('--timeout', '-T', type=float, help='timeout limit on each test case.', required=False, default=2)
parser.add_argument('--cleanup', '-c', type=str, help='delete compiled file after finishing the testing', required=False, default='')
parser.add_argument('--early-stop', '-e', type=str, help='stop current test set when one case if failed', default='')
args = parser.parse_args()
if args.lang not in {'cpp', 'py'}:
print('Language only support cpp or py now.')
sys.exit(1)
if args.tol != -1 and args.harness:
print('tol and harness cannot be set together.')
sys.exit(1)
check(args.src, args.data, args.lang, args.timeout)
|
"""Load an action in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import pype.hosts.blender.api.plugin
logger = logging.getLogger("pype").getChild("blender").getChild("load_action")
class BlendActionLoader(pype.hosts.blender.api.plugin.AssetLoader):
"""Load action from a .blend file.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["action"]
representations = ["blend"]
label = "Link Action"
icon = "code-fork"
color = "orange"
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = pype.hosts.blender.api.plugin.asset_name(asset, subset)
container_name = pype.hosts.blender.api.plugin.asset_name(
asset, subset, namespace
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
collection = bpy.context.scene.collection
collection.children.link(bpy.data.collections[lib_container])
animation_container = collection.children[lib_container].make_local()
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in animation_container.objects:
obj = obj.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
objects_list.append(obj)
animation_container.pop(blender.pipeline.AVALON_PROPERTY)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
bpy.ops.object.select_all(action='DESELECT')
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container["objectName"]}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in pype.hosts.blender.api.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
strips = []
for obj in list(collection_metadata["objects"]):
# Get all the strips that use the action
arm_objs = [
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
for armature_obj in arm_objs:
if armature_obj.animation_data is not None:
for track in armature_obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action == obj.animation_data.action:
strips.append(strip)
bpy.data.actions.remove(obj.animation_data.action)
bpy.data.objects.remove(obj)
lib_container = collection_metadata["lib_container"]
bpy.data.collections.remove(bpy.data.collections[lib_container])
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
str(libpath), link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
anim_container = scene.collection.children[lib_container].make_local()
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in anim_container.objects:
obj = obj.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
for strip in strips:
strip.action = anim_data.action
strip.action_frame_end = anim_data.action.frame_range[1]
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": collection.name})
objects_list.append(obj)
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
for obj in list(objects):
# Get all the strips that use the action
arm_objs = [
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
for armature_obj in arm_objs:
if armature_obj.animation_data is not None:
for track in armature_obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action == obj.animation_data.action:
track.strips.remove(strip)
bpy.data.actions.remove(obj.animation_data.action)
bpy.data.objects.remove(obj)
bpy.data.collections.remove(bpy.data.collections[lib_container])
bpy.data.collections.remove(collection)
return True
| """Load an action in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import pype.hosts.blender.api.plugin
logger = logging.getLogger("pype").getChild("blender").getChild("load_action")
class BlendActionLoader(pype.hosts.blender.api.plugin.AssetLoader):
"""Load action from a .blend file.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["action"]
representations = ["blend"]
label = "Link Action"
icon = "code-fork"
color = "orange"
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = pype.hosts.blender.api.plugin.asset_name(asset, subset)
container_name = pype.hosts.blender.api.plugin.asset_name(
asset, subset, namespace
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
collection = bpy.context.scene.collection
collection.children.link(bpy.data.collections[lib_container])
animation_container = collection.children[lib_container].make_local()
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in animation_container.objects:
obj = obj.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
objects_list.append(obj)
animation_container.pop(blender.pipeline.AVALON_PROPERTY)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
bpy.ops.object.select_all(action='DESELECT')
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in pype.hosts.blender.api.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
strips = []
for obj in list(collection_metadata["objects"]):
# Get all the strips that use the action
arm_objs = [
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
for armature_obj in arm_objs:
if armature_obj.animation_data is not None:
for track in armature_obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action == obj.animation_data.action:
strips.append(strip)
bpy.data.actions.remove(obj.animation_data.action)
bpy.data.objects.remove(obj)
lib_container = collection_metadata["lib_container"]
bpy.data.collections.remove(bpy.data.collections[lib_container])
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
str(libpath), link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
anim_container = scene.collection.children[lib_container].make_local()
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in anim_container.objects:
obj = obj.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
for strip in strips:
strip.action = anim_data.action
strip.action_frame_end = anim_data.action.frame_range[1]
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": collection.name})
objects_list.append(obj)
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (avalon-core:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
for obj in list(objects):
# Get all the strips that use the action
arm_objs = [
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
for armature_obj in arm_objs:
if armature_obj.animation_data is not None:
for track in armature_obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action == obj.animation_data.action:
track.strips.remove(strip)
bpy.data.actions.remove(obj.animation_data.action)
bpy.data.objects.remove(obj)
bpy.data.collections.remove(bpy.data.collections[lib_container])
bpy.data.collections.remove(collection)
return True
|
# PyTorch utils
import logging
import math
import os
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
import thop # for FLOPS computation
except ImportError:
thop = None
logger = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
def git_describe():
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
if Path('.git').exists():
return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1]
else:
return ''
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
n = torch.cuda.device_count()
if n > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(device.split(',') if device else range(n)):
p = torch.cuda.get_device_properties(i)
s += f"{"" if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
#logger.info(s) # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(x, ops, n=100, device=None):
# profile a pytorch module or list of modules. Example usage:
# x = torch.randn(16, 3, 640, 640) # input
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
print(f"\n{"Params":>12s}{"GFLOPS":>12s}{"forward (ms)":>16s}{"backward (ms)":>16s}{"input":>24s}{"output":>24s}")
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
except:
flops = 0
for _ in range(n):
t[0] = time_synchronized()
y = m(x)
t[1] = time_synchronized()
try:
_ = y.sum().backward()
t[2] = time_synchronized()
except: # no backward method
t[2] = float('nan')
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
def is_parallel(model):
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
except (ImportError, Exception):
fs = ''
#logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)
# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
| # PyTorch utils
import logging
import math
import os
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
import thop # for FLOPS computation
except ImportError:
thop = None
logger = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
def git_describe():
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
if Path('.git').exists():
return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1]
else:
return ''
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
n = torch.cuda.device_count()
if n > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(device.split(',') if device else range(n)):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
#logger.info(s) # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(x, ops, n=100, device=None):
# profile a pytorch module or list of modules. Example usage:
# x = torch.randn(16, 3, 640, 640) # input
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
except:
flops = 0
for _ in range(n):
t[0] = time_synchronized()
y = m(x)
t[1] = time_synchronized()
try:
_ = y.sum().backward()
t[2] = time_synchronized()
except: # no backward method
t[2] = float('nan')
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
def is_parallel(model):
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
except (ImportError, Exception):
fs = ''
#logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)
# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
|
dados = dict()
dados1 = list()
somaidade = 0
while True:
dados['nome'] = str(input('Nome: '))
while True:
dados['sexo'] = str(input('Sexo: [F/M] ')).upper()[0]
if dados['sexo'] in 'MF':
break
print('Error!! Digite novamente.')
dados['idade'] = int(input('Idade: '))
dados1.append(dados.copy())
while True:
cont = str(input('Deseja continuar [S/N] ')).upper()[0]
if cont in 'SN':
break
print('Erro! Responda apenas S ou N.')
somaidade += dados['idade']
if cont == 'N':
break
media = somaidade / len(dados1)
print('=-' * 26)
print(f'A) Foi cadastrado {len(dados1)} pessoas')
print(f'B) A media de idade é de {media:.2f} anos.')
print('C) As mulheres cadastradas foram: ', end='')
for c in dados1:
if c['sexo'] == 'F':
print(c['nome'], end=' ')
print()
print('D) As idades acima da media foram: ')
for c in dados1:
if c['idade'] > media:
print(f' nome = {c['nome']}; sexo = {c['sexo']}; idade = {c['idade']}')
print(f'{'<<Encerrado>>':^30}')
'''
galera = list()
pessoa = dict()
soma = media = 0
while True:
pessoa.clear()
pessoa['nome'] = str(input('Nome: '))
while True:
pessoa['sexo'] = str(input('Sexo [M/F] ')).upper()[0]
if pessoa['sexo'] in 'MF':
break
print('Error! Por favor, digite apenas M ou F.')
pessoa['idade'] = int(input('Idade: '))
soma += pessoa['idade']
galera.append(pessoa.copy())
while True:
resp = str(input('Quer continuar? [S/N] ')).upper()[0]
if resp in 'SN':
break
print('Error! Responda apenas S ou N.')
if resp == 'N':
break
print('-=' * 30)
print(f'A) Ao todo temos {len(galera)} pessoas cadastradas.')
media = soma / len(galera)
print(f'B) A media de idade é de {media:.2f} anos.')
print('C) As mulheres cadastradas foram ', end='')
for p in galera:
if p['sexo'] in 'Ff':
print(f'{p['nome']} ', end='')
print()
print('D) Lista das pessoas que estão acima da media: ')
for p in galera:
if p['idade'] >= media:
print(' ')
for k, v in p.items():
print(f'{k} = {v}; ', end='')
print()
print('<< ENCERRADO >>')''' |
dados = dict()
dados1 = list()
somaidade = 0
while True:
dados['nome'] = str(input('Nome: '))
while True:
dados['sexo'] = str(input('Sexo: [F/M] ')).upper()[0]
if dados['sexo'] in 'MF':
break
print('Error!! Digite novamente.')
dados['idade'] = int(input('Idade: '))
dados1.append(dados.copy())
while True:
cont = str(input('Deseja continuar [S/N] ')).upper()[0]
if cont in 'SN':
break
print('Erro! Responda apenas S ou N.')
somaidade += dados['idade']
if cont == 'N':
break
media = somaidade / len(dados1)
print('=-' * 26)
print(f'A) Foi cadastrado {len(dados1)} pessoas')
print(f'B) A media de idade é de {media:.2f} anos.')
print('C) As mulheres cadastradas foram: ', end='')
for c in dados1:
if c['sexo'] == 'F':
print(c['nome'], end=' ')
print()
print('D) As idades acima da media foram: ')
for c in dados1:
if c['idade'] > media:
print(f' nome = {c["nome"]}; sexo = {c["sexo"]}; idade = {c["idade"]}')
print(f'{"<<Encerrado>>":^30}')
'''
galera = list()
pessoa = dict()
soma = media = 0
while True:
pessoa.clear()
pessoa['nome'] = str(input('Nome: '))
while True:
pessoa['sexo'] = str(input('Sexo [M/F] ')).upper()[0]
if pessoa['sexo'] in 'MF':
break
print('Error! Por favor, digite apenas M ou F.')
pessoa['idade'] = int(input('Idade: '))
soma += pessoa['idade']
galera.append(pessoa.copy())
while True:
resp = str(input('Quer continuar? [S/N] ')).upper()[0]
if resp in 'SN':
break
print('Error! Responda apenas S ou N.')
if resp == 'N':
break
print('-=' * 30)
print(f'A) Ao todo temos {len(galera)} pessoas cadastradas.')
media = soma / len(galera)
print(f'B) A media de idade é de {media:.2f} anos.')
print('C) As mulheres cadastradas foram ', end='')
for p in galera:
if p['sexo'] in 'Ff':
print(f'{p["nome"]} ', end='')
print()
print('D) Lista das pessoas que estão acima da media: ')
for p in galera:
if p['idade'] >= media:
print(' ')
for k, v in p.items():
print(f'{k} = {v}; ', end='')
print()
print('<< ENCERRADO >>')''' |
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2022 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from datetime import datetime
import io
import json
import logging
from multiprocessing import dummy
import os
from pygeoapi.util import DATETIME_FORMAT, JobStatus
LOGGER = logging.getLogger(__name__)
class BaseManager:
"""generic Manager ABC"""
def __init__(self, manager_def):
"""
Initialize object
:param manager_def: manager definition
:returns: `pygeoapi.process.manager.base.BaseManager`
"""
self.name = manager_def['name']
self.is_async = False
self.connection = manager_def.get('connection', None)
self.output_dir = manager_def.get('output_dir', None)
def get_jobs(self, status=None):
"""
Get process jobs, optionally filtered by status
:param status: job status (accepted, running, successful,
failed, results) (default is all)
:returns: `list` of jobs (identifier, status, process identifier)
"""
raise NotImplementedError()
def add_job(self, job_metadata):
"""
Add a job
:param job_metadata: `dict` of job metadata
:returns: `str` added job identifier
"""
raise NotImplementedError()
def update_job(self, job_id, update_dict):
"""
Updates a job
:param job_id: job identifier
:param update_dict: `dict` of property updates
:returns: `bool` of status result
"""
raise NotImplementedError()
def get_job(self, job_id):
"""
Get a job (!)
:param job_id: job identifier
:returns: `dict` of job result
"""
raise NotImplementedError()
def get_job_result(self, job_id):
"""
Returns the actual output from a completed process
:param job_id: job identifier
:returns: `tuple` of mimetype and raw output
"""
raise NotImplementedError()
def delete_job(self, job_id):
"""
Deletes a job and associated results/outputs
:param job_id: job identifier
:returns: `bool` of status result
"""
raise NotImplementedError()
def _execute_handler_async(self, p, job_id, data_dict):
"""
This private execution handler executes a process in a background
thread using `multiprocessing.dummy`
https://docs.python.org/3/library/multiprocessing.html#module-multiprocessing.dummy # noqa
:param p: `pygeoapi.process` object
:param job_id: job identifier
:param data_dict: `dict` of data parameters
:returns: tuple of None (i.e. initial response payload)
and JobStatus.accepted (i.e. initial job status)
"""
_process = dummy.Process(
target=self._execute_handler_sync,
args=(p, job_id, data_dict)
)
_process.start()
return 'application/json', None, JobStatus.accepted
def _execute_handler_sync(self, p, job_id, data_dict):
"""
Synchronous execution handler
If the manager has defined `output_dir`, then the result
will be written to disk
output store. There is no clean-up of old process outputs.
:param p: `pygeoapi.process` object
:param job_id: job identifier
:param data_dict: `dict` of data parameters
:returns: tuple of MIME type, response payload and status
"""
process_id = p.metadata['id']
current_status = JobStatus.accepted
job_metadata = {
'identifier': job_id,
'process_id': process_id,
'job_start_datetime': datetime.utcnow().strftime(
DATETIME_FORMAT),
'job_end_datetime': None,
'status': current_status.value,
'location': None,
'mimetype': None,
'message': 'Job accepted and ready for execution',
'progress': 5
}
self.add_job(job_metadata)
try:
if self.output_dir is not None:
filename = '{}-{}'.format(p.metadata['id'], job_id)
job_filename = os.path.join(self.output_dir, filename)
else:
job_filename = None
current_status = JobStatus.running
jfmt, outputs = p.execute(data_dict)
self.update_job(job_id, {
'status': current_status.value,
'message': 'Writing job output',
'progress': 95
})
if self.output_dir is not None:
LOGGER.debug('writing output to {}'.format(job_filename))
if isinstance(outputs, dict):
mode = 'w'
data = json.dumps(outputs, sort_keys=True, indent=4)
encoding = 'utf-8'
elif isinstance(outputs, bytes):
mode = 'wb'
data = outputs
encoding = None
with io.open(job_filename, mode, encoding=encoding) as fh:
fh.write(data)
current_status = JobStatus.successful
job_update_metadata = {
'job_end_datetime': datetime.utcnow().strftime(
DATETIME_FORMAT),
'status': current_status.value,
'location': job_filename,
'mimetype': jfmt,
'message': 'Job complete',
'progress': 100
}
self.update_job(job_id, job_update_metadata)
except Exception as err:
# TODO assess correct exception type and description to help users
# NOTE, the /results endpoint should return the error HTTP status
# for jobs that failed, ths specification says that failing jobs
# must still be able to be retrieved with their error message
# intact, and the correct HTTP error status at the /results
# endpoint, even if the /result endpoint correctly returns the
# failure information (i.e. what one might assume is a 200
# response).
current_status = JobStatus.failed
code = 'InvalidParameterValue'
outputs = {
'code': code,
'description': 'Error updating job'
}
LOGGER.error(err)
job_metadata = {
'job_end_datetime': datetime.utcnow().strftime(
DATETIME_FORMAT),
'status': current_status.value,
'location': None,
'mimetype': None,
'message': f'{code}: {outputs['description']}'
}
jfmt = 'application/json'
self.update_job(job_id, job_metadata)
return jfmt, outputs, current_status
def execute_process(self, p, job_id, data_dict, is_async=False):
"""
Default process execution handler
:param p: `pygeoapi.process` object
:param job_id: job identifier
:param data_dict: `dict` of data parameters
:param is_async: `bool` specifying sync or async processing.
:returns: tuple of MIME type, response payload and status
"""
if not is_async:
LOGGER.debug('Synchronous execution')
return self._execute_handler_sync(p, job_id, data_dict)
else:
LOGGER.debug('Asynchronous execution')
return self._execute_handler_async(p, job_id, data_dict)
def __repr__(self):
return '<BaseManager> {}'.format(self.name)
| # =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2022 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from datetime import datetime
import io
import json
import logging
from multiprocessing import dummy
import os
from pygeoapi.util import DATETIME_FORMAT, JobStatus
LOGGER = logging.getLogger(__name__)
class BaseManager:
"""generic Manager ABC"""
def __init__(self, manager_def):
"""
Initialize object
:param manager_def: manager definition
:returns: `pygeoapi.process.manager.base.BaseManager`
"""
self.name = manager_def['name']
self.is_async = False
self.connection = manager_def.get('connection', None)
self.output_dir = manager_def.get('output_dir', None)
def get_jobs(self, status=None):
"""
Get process jobs, optionally filtered by status
:param status: job status (accepted, running, successful,
failed, results) (default is all)
:returns: `list` of jobs (identifier, status, process identifier)
"""
raise NotImplementedError()
def add_job(self, job_metadata):
"""
Add a job
:param job_metadata: `dict` of job metadata
:returns: `str` added job identifier
"""
raise NotImplementedError()
def update_job(self, job_id, update_dict):
"""
Updates a job
:param job_id: job identifier
:param update_dict: `dict` of property updates
:returns: `bool` of status result
"""
raise NotImplementedError()
def get_job(self, job_id):
"""
Get a job (!)
:param job_id: job identifier
:returns: `dict` of job result
"""
raise NotImplementedError()
def get_job_result(self, job_id):
"""
Returns the actual output from a completed process
:param job_id: job identifier
:returns: `tuple` of mimetype and raw output
"""
raise NotImplementedError()
def delete_job(self, job_id):
"""
Deletes a job and associated results/outputs
:param job_id: job identifier
:returns: `bool` of status result
"""
raise NotImplementedError()
def _execute_handler_async(self, p, job_id, data_dict):
"""
This private execution handler executes a process in a background
thread using `multiprocessing.dummy`
https://docs.python.org/3/library/multiprocessing.html#module-multiprocessing.dummy # noqa
:param p: `pygeoapi.process` object
:param job_id: job identifier
:param data_dict: `dict` of data parameters
:returns: tuple of None (i.e. initial response payload)
and JobStatus.accepted (i.e. initial job status)
"""
_process = dummy.Process(
target=self._execute_handler_sync,
args=(p, job_id, data_dict)
)
_process.start()
return 'application/json', None, JobStatus.accepted
def _execute_handler_sync(self, p, job_id, data_dict):
"""
Synchronous execution handler
If the manager has defined `output_dir`, then the result
will be written to disk
output store. There is no clean-up of old process outputs.
:param p: `pygeoapi.process` object
:param job_id: job identifier
:param data_dict: `dict` of data parameters
:returns: tuple of MIME type, response payload and status
"""
process_id = p.metadata['id']
current_status = JobStatus.accepted
job_metadata = {
'identifier': job_id,
'process_id': process_id,
'job_start_datetime': datetime.utcnow().strftime(
DATETIME_FORMAT),
'job_end_datetime': None,
'status': current_status.value,
'location': None,
'mimetype': None,
'message': 'Job accepted and ready for execution',
'progress': 5
}
self.add_job(job_metadata)
try:
if self.output_dir is not None:
filename = '{}-{}'.format(p.metadata['id'], job_id)
job_filename = os.path.join(self.output_dir, filename)
else:
job_filename = None
current_status = JobStatus.running
jfmt, outputs = p.execute(data_dict)
self.update_job(job_id, {
'status': current_status.value,
'message': 'Writing job output',
'progress': 95
})
if self.output_dir is not None:
LOGGER.debug('writing output to {}'.format(job_filename))
if isinstance(outputs, dict):
mode = 'w'
data = json.dumps(outputs, sort_keys=True, indent=4)
encoding = 'utf-8'
elif isinstance(outputs, bytes):
mode = 'wb'
data = outputs
encoding = None
with io.open(job_filename, mode, encoding=encoding) as fh:
fh.write(data)
current_status = JobStatus.successful
job_update_metadata = {
'job_end_datetime': datetime.utcnow().strftime(
DATETIME_FORMAT),
'status': current_status.value,
'location': job_filename,
'mimetype': jfmt,
'message': 'Job complete',
'progress': 100
}
self.update_job(job_id, job_update_metadata)
except Exception as err:
# TODO assess correct exception type and description to help users
# NOTE, the /results endpoint should return the error HTTP status
# for jobs that failed, ths specification says that failing jobs
# must still be able to be retrieved with their error message
# intact, and the correct HTTP error status at the /results
# endpoint, even if the /result endpoint correctly returns the
# failure information (i.e. what one might assume is a 200
# response).
current_status = JobStatus.failed
code = 'InvalidParameterValue'
outputs = {
'code': code,
'description': 'Error updating job'
}
LOGGER.error(err)
job_metadata = {
'job_end_datetime': datetime.utcnow().strftime(
DATETIME_FORMAT),
'status': current_status.value,
'location': None,
'mimetype': None,
'message': f'{code}: {outputs["description"]}'
}
jfmt = 'application/json'
self.update_job(job_id, job_metadata)
return jfmt, outputs, current_status
def execute_process(self, p, job_id, data_dict, is_async=False):
"""
Default process execution handler
:param p: `pygeoapi.process` object
:param job_id: job identifier
:param data_dict: `dict` of data parameters
:param is_async: `bool` specifying sync or async processing.
:returns: tuple of MIME type, response payload and status
"""
if not is_async:
LOGGER.debug('Synchronous execution')
return self._execute_handler_sync(p, job_id, data_dict)
else:
LOGGER.debug('Asynchronous execution')
return self._execute_handler_async(p, job_id, data_dict)
def __repr__(self):
return '<BaseManager> {}'.format(self.name)
|
import requests
import sys
from urllib.parse import urljoin
import arrow
import config
import db_utils
import slack_utils
BASE_URL = "https://cdn-api.co-vin.in"
def make_covin_request(request_url, params=None) -> requests.Response:
num_retries = 3
response = requests.get(
request_url,
params=params,
headers={
"Accept": "application/json",
"Accept-Language": "en-US",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"
}
)
while num_retries > 0:
num_retries -= 1
if response.ok:
break
response.raise_for_status()
return response
def get_all_states():
relative_url = "api/v2/admin/location/states"
request_url = urljoin(BASE_URL, relative_url)
response = make_covin_request(request_url)
resp_json = response.json()
states_dict = {
state["state_id"]: state["state_name"]
for state in resp_json["states"]
}
print("-------- STATES MAP --------")
for key, value in states_dict.items():
print(f"{key} ->> {value}")
def get_all_districts_for_state(state_id):
relative_url = f"api/v2/admin/location/districts/{state_id}"
request_url = urljoin(BASE_URL, relative_url)
response = make_covin_request(request_url)
resp_json = response.json()
districts_dict = {
district["district_id"]: district["district_name"]
for district in resp_json["districts"]
}
print("-------- DISTRICTS MAP --------")
for key, value in districts_dict.items():
print(f"{key} ->> {value}")
def parse_slot_results(response: requests.Response):
resp_json = response.json()
centers = resp_json.get("centers")
if not centers:
print("No centers found")
return
available_sessions_with_center_info = [
{
"center_id": center["center_id"],
"name": center["name"],
"pincode": center["pincode"],
"session_id": session["session_id"],
"available_capacity": session["available_capacity"],
"slot_date": session["date"],
"min_age_limit": session["min_age_limit"],
"vaccine": session["vaccine"],
}
for center in centers
for session in center.get("sessions") or []
if (
(config.CHECK_FOR_DOSE1 and session["available_capacity_dose1"] > 0) or
(config.CHECK_FOR_DOSE2 and session["available_capacity_dose2"] > 0)
)
]
if not config.NOTIFIED_FOR_18_PLUS and config.CHECK_FOR_18_YRS:
sessions_with_slots_for_18_plus = [
s for s in available_sessions_with_center_info
if int(s["min_age_limit"]) == 18
]
if sessions_with_slots_for_18_plus:
notify(sessions_with_slots_for_18_plus)
config.NOTIFIED_FOR_18_PLUS = True
if not config.NOTIFIED_FOR_45_PLUS and config.CHECK_FOR_45_YRS:
sessions_with_slots_for_45_plus = [
s for s in available_sessions_with_center_info
if int(s["min_age_limit"]) == 45
]
if sessions_with_slots_for_45_plus:
notify(sessions_with_slots_for_45_plus)
config.NOTIFIED_FOR_45_PLUS = True
if config.NOTIFIED_FOR_18_PLUS and config.NOTIFIED_FOR_45_PLUS:
sys.exit(0)
def check_slot_availability_by_district(district_id):
relative_url = "api/v2/appointment/sessions/public/calendarByDistrict"
request_url = urljoin(BASE_URL, relative_url)
curr_date = arrow.utcnow().to("Asia/Kolkata")
response = make_covin_request(
request_url=request_url,
params={"district_id": district_id, "date": curr_date.strftime("%d-%m-%Y")}
)
parse_slot_results(response)
def check_slot_availability_by_pincode(pincode: str):
relative_url = "api/v2/appointment/sessions/public/calendarByPin"
request_url = urljoin(BASE_URL, relative_url)
curr_date = arrow.utcnow().to("Asia/Kolkata")
response = make_covin_request(
request_url=request_url,
params={"pincode": pincode, "date": curr_date.strftime("%d-%m-%Y")},
)
parse_slot_results(response)
def notify(sessions_list):
send_message_for_vaccine_slots(sessions_list)
def send_message_for_vaccine_slots(sessions_list):
print(sessions_list)
slack_access_token = config.SLACK_ACCESS_TOKEN
if not slack_access_token:
print("Skipping slack alert as no access token found in config")
return
min_age = sessions_list[0]['min_age_limit']
send_info = db_utils.get_send_info()
whitelisted_centers = []
if config.PREFERRED_CENTER_FILTER:
whitelisted_centers = [int(i) for i in config.PREFERRED_CENTER_FILTER.split(",")]
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Vaccination slots for age {min_age} plus are available in following centers"
}
}
]
preferred_center_blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Vaccination slots for age {min_age} plus are available in following centers"
}
}
]
for session_info in sessions_list:
# Add to send_info it doesn't exist
session_id = session_info["session_id"]
if session_id not in send_info:
send_info[session_id] = {
"num_sends": 0
}
last_send_info = send_info[session_id]
if last_send_info["num_sends"] > 5:
last_send_dt = arrow.get(last_send_info["last_send_dt"])
if arrow.utcnow() < last_send_dt.shift(hours=1):
# Don't send notifications for a session more than 5 times in a 1 hour window.
continue
# Reset num_sends as rate limit doesn't apply here.
send_info[session_id] = {
"num_sends": 0
}
blocks.append({
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"{session_info["name"]}({session_info["pincode"]}) -> {session_info["available_capacity"]} -> {session_info["vaccine"]} -> {session_info["slot_date"]}"
}
]
})
if whitelisted_centers and int(session_info["center_id"]) in whitelisted_centers:
preferred_center_blocks.append({
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"{session_info["name"]}({session_info["pincode"]}) -> {session_info["available_capacity"]} -> {session_info["vaccine"]} -> {session_info["slot_date"]}"
}
]
})
send_info[session_id] = {
"last_send_dt": str(arrow.utcnow()),
"num_sends": send_info[session_id]["num_sends"] + 1,
"center_name": session_info["name"],
}
if len(blocks) == 1:
print(f"No new notifications to send for {min_age}. Returning.")
return
slack_channel_ids = []
if config.SLACK_CHANNEL_ID:
slack_channel_ids = [config.SLACK_CHANNEL_ID]
slack_user_ids = []
if config.SLACK_USER_ID:
slack_user_ids = [config.SLACK_USER_ID]
slack_utils.send_message(
access_token=slack_access_token,
text=f"Vaccination slots for age {min_age} plus are open!",
blocks=blocks,
slack_channel_ids=slack_channel_ids,
slack_user_ids=slack_user_ids,
)
if len(preferred_center_blocks) > 1 and config.PREFERRED_CENTER_SLACK_ACCESS_TOKEN:
slack_utils.send_message(
access_token=config.PREFERRED_CENTER_SLACK_ACCESS_TOKEN,
text=f"Vaccination slots for age {min_age} plus are open!",
blocks=preferred_center_blocks,
slack_channel_ids=slack_channel_ids,
slack_user_ids=slack_user_ids,
)
db_utils.set_send_info(send_info)
if __name__ == "__main__":
print(f"---------- START {arrow.utcnow().to("Asia/Kolkata")} -----------")
if config.ZIPCODE:
check_slot_availability_by_pincode(config.ZIPCODE)
sys.exit(0)
if config.DISTRICT_ID:
check_slot_availability_by_district(config.DISTRICT_ID)
sys.exit(0)
print("One of ZIPCODE or DISTRICT_ID must be specified in config")
# Gets slot availability for district 392 and notifies if slots are available.
# check_slot_availability_by_district("392")
# Get slot availability for pincode 400706 and notifies if slots are available.
# check_slot_availability_by_pincode("400706")
# Prints all states (id and name map) - Useful to fetch state_id to list districts
# get_all_states
# Prints all districts in state 21 (id and name map)
# get_all_districts_for_state(21)
| import requests
import sys
from urllib.parse import urljoin
import arrow
import config
import db_utils
import slack_utils
BASE_URL = "https://cdn-api.co-vin.in"
def make_covin_request(request_url, params=None) -> requests.Response:
num_retries = 3
response = requests.get(
request_url,
params=params,
headers={
"Accept": "application/json",
"Accept-Language": "en-US",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"
}
)
while num_retries > 0:
num_retries -= 1
if response.ok:
break
response.raise_for_status()
return response
def get_all_states():
relative_url = "api/v2/admin/location/states"
request_url = urljoin(BASE_URL, relative_url)
response = make_covin_request(request_url)
resp_json = response.json()
states_dict = {
state["state_id"]: state["state_name"]
for state in resp_json["states"]
}
print("-------- STATES MAP --------")
for key, value in states_dict.items():
print(f"{key} ->> {value}")
def get_all_districts_for_state(state_id):
relative_url = f"api/v2/admin/location/districts/{state_id}"
request_url = urljoin(BASE_URL, relative_url)
response = make_covin_request(request_url)
resp_json = response.json()
districts_dict = {
district["district_id"]: district["district_name"]
for district in resp_json["districts"]
}
print("-------- DISTRICTS MAP --------")
for key, value in districts_dict.items():
print(f"{key} ->> {value}")
def parse_slot_results(response: requests.Response):
resp_json = response.json()
centers = resp_json.get("centers")
if not centers:
print("No centers found")
return
available_sessions_with_center_info = [
{
"center_id": center["center_id"],
"name": center["name"],
"pincode": center["pincode"],
"session_id": session["session_id"],
"available_capacity": session["available_capacity"],
"slot_date": session["date"],
"min_age_limit": session["min_age_limit"],
"vaccine": session["vaccine"],
}
for center in centers
for session in center.get("sessions") or []
if (
(config.CHECK_FOR_DOSE1 and session["available_capacity_dose1"] > 0) or
(config.CHECK_FOR_DOSE2 and session["available_capacity_dose2"] > 0)
)
]
if not config.NOTIFIED_FOR_18_PLUS and config.CHECK_FOR_18_YRS:
sessions_with_slots_for_18_plus = [
s for s in available_sessions_with_center_info
if int(s["min_age_limit"]) == 18
]
if sessions_with_slots_for_18_plus:
notify(sessions_with_slots_for_18_plus)
config.NOTIFIED_FOR_18_PLUS = True
if not config.NOTIFIED_FOR_45_PLUS and config.CHECK_FOR_45_YRS:
sessions_with_slots_for_45_plus = [
s for s in available_sessions_with_center_info
if int(s["min_age_limit"]) == 45
]
if sessions_with_slots_for_45_plus:
notify(sessions_with_slots_for_45_plus)
config.NOTIFIED_FOR_45_PLUS = True
if config.NOTIFIED_FOR_18_PLUS and config.NOTIFIED_FOR_45_PLUS:
sys.exit(0)
def check_slot_availability_by_district(district_id):
relative_url = "api/v2/appointment/sessions/public/calendarByDistrict"
request_url = urljoin(BASE_URL, relative_url)
curr_date = arrow.utcnow().to("Asia/Kolkata")
response = make_covin_request(
request_url=request_url,
params={"district_id": district_id, "date": curr_date.strftime("%d-%m-%Y")}
)
parse_slot_results(response)
def check_slot_availability_by_pincode(pincode: str):
relative_url = "api/v2/appointment/sessions/public/calendarByPin"
request_url = urljoin(BASE_URL, relative_url)
curr_date = arrow.utcnow().to("Asia/Kolkata")
response = make_covin_request(
request_url=request_url,
params={"pincode": pincode, "date": curr_date.strftime("%d-%m-%Y")},
)
parse_slot_results(response)
def notify(sessions_list):
send_message_for_vaccine_slots(sessions_list)
def send_message_for_vaccine_slots(sessions_list):
print(sessions_list)
slack_access_token = config.SLACK_ACCESS_TOKEN
if not slack_access_token:
print("Skipping slack alert as no access token found in config")
return
min_age = sessions_list[0]['min_age_limit']
send_info = db_utils.get_send_info()
whitelisted_centers = []
if config.PREFERRED_CENTER_FILTER:
whitelisted_centers = [int(i) for i in config.PREFERRED_CENTER_FILTER.split(",")]
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Vaccination slots for age {min_age} plus are available in following centers"
}
}
]
preferred_center_blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Vaccination slots for age {min_age} plus are available in following centers"
}
}
]
for session_info in sessions_list:
# Add to send_info it doesn't exist
session_id = session_info["session_id"]
if session_id not in send_info:
send_info[session_id] = {
"num_sends": 0
}
last_send_info = send_info[session_id]
if last_send_info["num_sends"] > 5:
last_send_dt = arrow.get(last_send_info["last_send_dt"])
if arrow.utcnow() < last_send_dt.shift(hours=1):
# Don't send notifications for a session more than 5 times in a 1 hour window.
continue
# Reset num_sends as rate limit doesn't apply here.
send_info[session_id] = {
"num_sends": 0
}
blocks.append({
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"{session_info['name']}({session_info['pincode']}) -> {session_info['available_capacity']} -> {session_info['vaccine']} -> {session_info['slot_date']}"
}
]
})
if whitelisted_centers and int(session_info["center_id"]) in whitelisted_centers:
preferred_center_blocks.append({
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"{session_info['name']}({session_info['pincode']}) -> {session_info['available_capacity']} -> {session_info['vaccine']} -> {session_info['slot_date']}"
}
]
})
send_info[session_id] = {
"last_send_dt": str(arrow.utcnow()),
"num_sends": send_info[session_id]["num_sends"] + 1,
"center_name": session_info["name"],
}
if len(blocks) == 1:
print(f"No new notifications to send for {min_age}. Returning.")
return
slack_channel_ids = []
if config.SLACK_CHANNEL_ID:
slack_channel_ids = [config.SLACK_CHANNEL_ID]
slack_user_ids = []
if config.SLACK_USER_ID:
slack_user_ids = [config.SLACK_USER_ID]
slack_utils.send_message(
access_token=slack_access_token,
text=f"Vaccination slots for age {min_age} plus are open!",
blocks=blocks,
slack_channel_ids=slack_channel_ids,
slack_user_ids=slack_user_ids,
)
if len(preferred_center_blocks) > 1 and config.PREFERRED_CENTER_SLACK_ACCESS_TOKEN:
slack_utils.send_message(
access_token=config.PREFERRED_CENTER_SLACK_ACCESS_TOKEN,
text=f"Vaccination slots for age {min_age} plus are open!",
blocks=preferred_center_blocks,
slack_channel_ids=slack_channel_ids,
slack_user_ids=slack_user_ids,
)
db_utils.set_send_info(send_info)
if __name__ == "__main__":
print(f"---------- START {arrow.utcnow().to('Asia/Kolkata')} -----------")
if config.ZIPCODE:
check_slot_availability_by_pincode(config.ZIPCODE)
sys.exit(0)
if config.DISTRICT_ID:
check_slot_availability_by_district(config.DISTRICT_ID)
sys.exit(0)
print("One of ZIPCODE or DISTRICT_ID must be specified in config")
# Gets slot availability for district 392 and notifies if slots are available.
# check_slot_availability_by_district("392")
# Get slot availability for pincode 400706 and notifies if slots are available.
# check_slot_availability_by_pincode("400706")
# Prints all states (id and name map) - Useful to fetch state_id to list districts
# get_all_states
# Prints all districts in state 21 (id and name map)
# get_all_districts_for_state(21)
|
from collections import defaultdict
posts = defaultdict(dict)
while True:
inp = input()
if inp == 'drop the media':
break
inp = inp.split(' ')
command = inp[0]
rest_input = inp[1:]
if command == 'post':
posts[rest_input[0]] = {'likes': 0, 'dislikes': 0, 'comments': []}
elif command == 'like':
posts[rest_input[0]]['likes'] += 1
elif command == 'dislike':
posts[rest_input[0]]['dislikes'] += 1
elif command == 'comment':
post_name, commentator, *content = rest_input
posts[post_name]['comments'].append({'commentator': commentator, 'content': ' '.join(content)})
for post_name, data in posts.items():
print(f'Post: {post_name} | Likes: {data['likes']} | Dislikes: {data['dislikes']}')
print('Comments:')
if data['comments']:
for comment in data['comments']:
print(f'* {comment['commentator']}: {comment['content']}')
else:
print('None')
| from collections import defaultdict
posts = defaultdict(dict)
while True:
inp = input()
if inp == 'drop the media':
break
inp = inp.split(' ')
command = inp[0]
rest_input = inp[1:]
if command == 'post':
posts[rest_input[0]] = {'likes': 0, 'dislikes': 0, 'comments': []}
elif command == 'like':
posts[rest_input[0]]['likes'] += 1
elif command == 'dislike':
posts[rest_input[0]]['dislikes'] += 1
elif command == 'comment':
post_name, commentator, *content = rest_input
posts[post_name]['comments'].append({'commentator': commentator, 'content': ' '.join(content)})
for post_name, data in posts.items():
print(f'Post: {post_name} | Likes: {data["likes"]} | Dislikes: {data["dislikes"]}')
print('Comments:')
if data['comments']:
for comment in data['comments']:
print(f'* {comment["commentator"]}: {comment["content"]}')
else:
print('None')
|
from datetime import datetime
def salvar(nomeArquivoEntrada):
arquivoEntrada =open(nomeArquivoEntrada,'r')
nomeAluno =str(input('Qual é o seu nome? ')).strip()
nomeAtividade = str(input('Qual é a atividade? ')).strip()
if '.py' in nomeAtividade:
nomeAtividade = nomeAtividade.replace('.py','')
dataHoraAtual = datetime.now().strftime('%d%m%Y%H%M%S')
nomeArquivoSaida =f'{nomeAluno}_' \
f'{nomeAtividade}_' \
f'{datetime.now().strftime('%d%m%Y%H%M%S')}.txt'
arquivoSaida = open(nomeArquivoSaida,'w')
for linha in arquivoEntrada:
arquivoSaida.write(linha)
if __name__ == '__main__':
arquivoEntrada = str(input('Qual o arquivo a ser salvo? '))
salvar(arquivoEntrada)
print('Arquivo salvo com sucesso') | from datetime import datetime
def salvar(nomeArquivoEntrada):
arquivoEntrada =open(nomeArquivoEntrada,'r')
nomeAluno =str(input('Qual é o seu nome? ')).strip()
nomeAtividade = str(input('Qual é a atividade? ')).strip()
if '.py' in nomeAtividade:
nomeAtividade = nomeAtividade.replace('.py','')
dataHoraAtual = datetime.now().strftime('%d%m%Y%H%M%S')
nomeArquivoSaida =f'{nomeAluno}_' \
f'{nomeAtividade}_' \
f'{datetime.now().strftime("%d%m%Y%H%M%S")}.txt'
arquivoSaida = open(nomeArquivoSaida,'w')
for linha in arquivoEntrada:
arquivoSaida.write(linha)
if __name__ == '__main__':
arquivoEntrada = str(input('Qual o arquivo a ser salvo? '))
salvar(arquivoEntrada)
print('Arquivo salvo com sucesso') |
import base64
from ocs_ci.framework import config
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs import constants
from ocs_ci.helpers.helpers import storagecluster_independent_check
class RGW(object):
"""
Wrapper class for interaction with a cluster's RGW service
"""
def __init__(self, namespace=None):
self.namespace = (
namespace if namespace else config.ENV_DATA["cluster_namespace"]
)
if storagecluster_independent_check():
sc_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW
else:
sc_name = constants.DEFAULT_STORAGECLASS_RGW
self.storageclass = OCP(
kind="storageclass", namespace=namespace, resource_name=sc_name
)
self.s3_internal_endpoint = (
self.storageclass.get().get("parameters").get("endpoint")
)
self.region = self.storageclass.get().get("parameters").get("region")
# Todo: Implement retrieval in cases where CephObjectStoreUser is available
self.key_id = None
self.secret_key = None
self.s3_resource = None
def get_credentials(self, secret_name=constants.NOOBAA_OBJECTSTOREUSER_SECRET):
"""
Get Endpoint, Access key and Secret key from OCS secret. Endpoint is
taken from rgw exposed service. Use rgw_endpoint fixture in test to get
it exposed.
Args:
secret_name (str): Name of secret to be used
for getting RGW credentials
Returns:
tuple: Endpoint, Access key, Secret key
"""
if (
secret_name == constants.NOOBAA_OBJECTSTOREUSER_SECRET
and storagecluster_independent_check()
):
secret_name = constants.EXTERNAL_MODE_NOOBAA_OBJECTSTOREUSER_SECRET
secret_ocp_obj = OCP(kind=constants.SECRET, namespace=self.namespace)
route_ocp_obj = OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
creds_secret_obj = secret_ocp_obj.get(secret_name)
if config.DEPLOYMENT["external_mode"]:
endpoint = route_ocp_obj.get(
resource_name=constants.RGW_SERVICE_EXTERNAL_MODE
)
else:
endpoint = route_ocp_obj.get(
resource_name=constants.RGW_SERVICE_INTERNAL_MODE
)
endpoint = f"http://{endpoint["status"]["ingress"][0]["host"]}"
access_key = base64.b64decode(
creds_secret_obj.get("data").get("AccessKey")
).decode("utf-8")
secret_key = base64.b64decode(
creds_secret_obj.get("data").get("SecretKey")
).decode("utf-8")
return (endpoint, access_key, secret_key)
| import base64
from ocs_ci.framework import config
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs import constants
from ocs_ci.helpers.helpers import storagecluster_independent_check
class RGW(object):
"""
Wrapper class for interaction with a cluster's RGW service
"""
def __init__(self, namespace=None):
self.namespace = (
namespace if namespace else config.ENV_DATA["cluster_namespace"]
)
if storagecluster_independent_check():
sc_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW
else:
sc_name = constants.DEFAULT_STORAGECLASS_RGW
self.storageclass = OCP(
kind="storageclass", namespace=namespace, resource_name=sc_name
)
self.s3_internal_endpoint = (
self.storageclass.get().get("parameters").get("endpoint")
)
self.region = self.storageclass.get().get("parameters").get("region")
# Todo: Implement retrieval in cases where CephObjectStoreUser is available
self.key_id = None
self.secret_key = None
self.s3_resource = None
def get_credentials(self, secret_name=constants.NOOBAA_OBJECTSTOREUSER_SECRET):
"""
Get Endpoint, Access key and Secret key from OCS secret. Endpoint is
taken from rgw exposed service. Use rgw_endpoint fixture in test to get
it exposed.
Args:
secret_name (str): Name of secret to be used
for getting RGW credentials
Returns:
tuple: Endpoint, Access key, Secret key
"""
if (
secret_name == constants.NOOBAA_OBJECTSTOREUSER_SECRET
and storagecluster_independent_check()
):
secret_name = constants.EXTERNAL_MODE_NOOBAA_OBJECTSTOREUSER_SECRET
secret_ocp_obj = OCP(kind=constants.SECRET, namespace=self.namespace)
route_ocp_obj = OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
creds_secret_obj = secret_ocp_obj.get(secret_name)
if config.DEPLOYMENT["external_mode"]:
endpoint = route_ocp_obj.get(
resource_name=constants.RGW_SERVICE_EXTERNAL_MODE
)
else:
endpoint = route_ocp_obj.get(
resource_name=constants.RGW_SERVICE_INTERNAL_MODE
)
endpoint = f"http://{endpoint['status']['ingress'][0]['host']}"
access_key = base64.b64decode(
creds_secret_obj.get("data").get("AccessKey")
).decode("utf-8")
secret_key = base64.b64decode(
creds_secret_obj.get("data").get("SecretKey")
).decode("utf-8")
return (endpoint, access_key, secret_key)
|
import random
import time
import string
import requests
import logging
from threading import Thread
import time
import datetime
import random
# Import Detection From Stealth
from .stealth import stealth
from .get_acrawler import get_acrawler
from playwright import sync_playwright
playwright = None
def get_playwright():
global playwright
if playwright is None:
try:
playwright = sync_playwright().start()
except Exception as e:
raise e
return playwright
class browser:
def __init__(
self,
**kwargs,
):
self.debug = kwargs.get("debug", False)
self.proxy = kwargs.get("proxy", None)
self.api_url = kwargs.get("api_url", None)
self.referrer = kwargs.get("referer", "https://www.tiktok.com/")
self.language = kwargs.get("language", "en")
self.executablePath = kwargs.get("executablePath", None)
self.did = kwargs.get("custom_did", None)
find_redirect = kwargs.get("find_redirect", False)
args = kwargs.get("browser_args", [])
options = kwargs.get("browser_options", {})
if len(args) == 0:
self.args = []
else:
self.args = args
self.options = {
"headless": True,
"handleSIGINT": True,
"handleSIGTERM": True,
"handleSIGHUP": True,
}
if self.proxy is not None:
if "@" in self.proxy:
server_prefix = self.proxy.split("://")[0]
address = self.proxy.split("@")[1]
self.options["proxy"] = {
"server": server_prefix + "://" + address,
"username": self.proxy.split("://")[1].split(":")[0],
"password": self.proxy.split("://")[1].split("@")[0].split(":")[1],
}
else:
self.options["proxy"] = {"server": self.proxy}
self.options.update(options)
if self.executablePath is not None:
self.options["executablePath"] = self.executablePath
try:
self.browser = get_playwright().webkit.launch(
args=self.args, **self.options
)
except Exception as e:
raise e
logging.critical(e)
context = self.create_context(set_useragent=True)
page = context.newPage()
self.get_params(page)
context.close()
def get_params(self, page) -> None:
# self.browser_language = await self.page.evaluate("""() => { return
# navigator.language || navigator.userLanguage; }""")
self.browser_language = ""
# self.timezone_name = await self.page.evaluate("""() => { return
# Intl.DateTimeFormat().resolvedOptions().timeZone; }""")
self.timezone_name = ""
# self.browser_platform = await self.page.evaluate("""() => { return window.navigator.platform; }""")
self.browser_platform = ""
# self.browser_name = await self.page.evaluate("""() => { return window.navigator.appCodeName; }""")
self.browser_name = ""
# self.browser_version = await self.page.evaluate("""() => { return window.navigator.appVersion; }""")
self.browser_version = ""
self.width = page.evaluate("""() => { return screen.width; }""")
self.height = page.evaluate("""() => { return screen.height; }""")
def create_context(self, set_useragent=False):
iphone = playwright.devices["iPhone 11 Pro"]
iphone["viewport"] = {
"width": random.randint(320, 1920),
"height": random.randint(320, 1920),
}
iphone["deviceScaleFactor"] = random.randint(1, 3)
iphone["isMobile"] = random.randint(1, 2) == 1
iphone["hasTouch"] = random.randint(1, 2) == 1
context = self.browser.newContext(**iphone)
if set_useragent:
self.userAgent = iphone["userAgent"]
return context
def base36encode(self, number, alphabet="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""Converts an integer to a base36 string."""
base36 = ""
sign = ""
if number < 0:
sign = "-"
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def gen_verifyFp(self):
chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"[:]
chars_len = len(chars)
scenario_title = self.base36encode(int(time.time() * 1000))
uuid = [0] * 36
uuid[8] = "_"
uuid[13] = "_"
uuid[18] = "_"
uuid[23] = "_"
uuid[14] = "4"
for i in range(36):
if uuid[i] != 0:
continue
r = int(random.random() * chars_len)
uuid[i] = chars[int((3 & r) | 8 if i == 19 else r)]
return f'verify_{scenario_title.lower()}_{''.join(uuid)}'
def sign_url(self, **kwargs):
url = kwargs.get("url", None)
if url is None:
raise Exception("sign_url required a url parameter")
context = self.create_context()
page = context.newPage()
verifyFp = "".join(
random.choice(
string.ascii_lowercase + string.ascii_uppercase + string.digits
)
for i in range(16)
)
if kwargs.get("gen_new_verifyFp", False):
verifyFp = self.gen_verifyFp()
else:
verifyFp = kwargs.get(
"custom_verifyFp",
"verify_khgp4f49_V12d4mRX_MdCO_4Wzt_Ar0k_z4RCQC9pUDpX",
)
if kwargs.get("custom_did") is not None:
did = kwargs.get("custom_did", None)
elif self.did is None:
did = str(random.randint(10000, 999999999))
else:
did = self.did
page.setContent("<script> " + get_acrawler() + " </script>")
evaluatedPage = page.evaluate(
'''() => {
var url = "'''
+ url
+ "&verifyFp="
+ verifyFp
+ """&did="""
+ did
+ """"
var token = window.byted_acrawler.sign({url: url});
return token;
}"""
)
context.close()
return (
verifyFp,
did,
evaluatedPage,
)
def clean_up(self):
try:
self.browser.close()
except Exception:
logging.info("cleanup failed")
# playwright.stop()
def find_redirect(self, url):
self.page.goto(url, {"waitUntil": "load"})
self.redirect_url = self.page.url
def __format_proxy(self, proxy):
if proxy is not None:
return {"http": proxy, "https": proxy}
else:
return None
def __get_js(self):
return requests.get(
"https://sf16-muse-va.ibytedtos.com/obj/rc-web-sdk-gcs/acrawler.js",
proxies=self.__format_proxy(self.proxy),
).text
| import random
import time
import string
import requests
import logging
from threading import Thread
import time
import datetime
import random
# Import Detection From Stealth
from .stealth import stealth
from .get_acrawler import get_acrawler
from playwright import sync_playwright
playwright = None
def get_playwright():
global playwright
if playwright is None:
try:
playwright = sync_playwright().start()
except Exception as e:
raise e
return playwright
class browser:
def __init__(
self,
**kwargs,
):
self.debug = kwargs.get("debug", False)
self.proxy = kwargs.get("proxy", None)
self.api_url = kwargs.get("api_url", None)
self.referrer = kwargs.get("referer", "https://www.tiktok.com/")
self.language = kwargs.get("language", "en")
self.executablePath = kwargs.get("executablePath", None)
self.did = kwargs.get("custom_did", None)
find_redirect = kwargs.get("find_redirect", False)
args = kwargs.get("browser_args", [])
options = kwargs.get("browser_options", {})
if len(args) == 0:
self.args = []
else:
self.args = args
self.options = {
"headless": True,
"handleSIGINT": True,
"handleSIGTERM": True,
"handleSIGHUP": True,
}
if self.proxy is not None:
if "@" in self.proxy:
server_prefix = self.proxy.split("://")[0]
address = self.proxy.split("@")[1]
self.options["proxy"] = {
"server": server_prefix + "://" + address,
"username": self.proxy.split("://")[1].split(":")[0],
"password": self.proxy.split("://")[1].split("@")[0].split(":")[1],
}
else:
self.options["proxy"] = {"server": self.proxy}
self.options.update(options)
if self.executablePath is not None:
self.options["executablePath"] = self.executablePath
try:
self.browser = get_playwright().webkit.launch(
args=self.args, **self.options
)
except Exception as e:
raise e
logging.critical(e)
context = self.create_context(set_useragent=True)
page = context.newPage()
self.get_params(page)
context.close()
def get_params(self, page) -> None:
# self.browser_language = await self.page.evaluate("""() => { return
# navigator.language || navigator.userLanguage; }""")
self.browser_language = ""
# self.timezone_name = await self.page.evaluate("""() => { return
# Intl.DateTimeFormat().resolvedOptions().timeZone; }""")
self.timezone_name = ""
# self.browser_platform = await self.page.evaluate("""() => { return window.navigator.platform; }""")
self.browser_platform = ""
# self.browser_name = await self.page.evaluate("""() => { return window.navigator.appCodeName; }""")
self.browser_name = ""
# self.browser_version = await self.page.evaluate("""() => { return window.navigator.appVersion; }""")
self.browser_version = ""
self.width = page.evaluate("""() => { return screen.width; }""")
self.height = page.evaluate("""() => { return screen.height; }""")
def create_context(self, set_useragent=False):
iphone = playwright.devices["iPhone 11 Pro"]
iphone["viewport"] = {
"width": random.randint(320, 1920),
"height": random.randint(320, 1920),
}
iphone["deviceScaleFactor"] = random.randint(1, 3)
iphone["isMobile"] = random.randint(1, 2) == 1
iphone["hasTouch"] = random.randint(1, 2) == 1
context = self.browser.newContext(**iphone)
if set_useragent:
self.userAgent = iphone["userAgent"]
return context
def base36encode(self, number, alphabet="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""Converts an integer to a base36 string."""
base36 = ""
sign = ""
if number < 0:
sign = "-"
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def gen_verifyFp(self):
chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"[:]
chars_len = len(chars)
scenario_title = self.base36encode(int(time.time() * 1000))
uuid = [0] * 36
uuid[8] = "_"
uuid[13] = "_"
uuid[18] = "_"
uuid[23] = "_"
uuid[14] = "4"
for i in range(36):
if uuid[i] != 0:
continue
r = int(random.random() * chars_len)
uuid[i] = chars[int((3 & r) | 8 if i == 19 else r)]
return f'verify_{scenario_title.lower()}_{"".join(uuid)}'
def sign_url(self, **kwargs):
url = kwargs.get("url", None)
if url is None:
raise Exception("sign_url required a url parameter")
context = self.create_context()
page = context.newPage()
verifyFp = "".join(
random.choice(
string.ascii_lowercase + string.ascii_uppercase + string.digits
)
for i in range(16)
)
if kwargs.get("gen_new_verifyFp", False):
verifyFp = self.gen_verifyFp()
else:
verifyFp = kwargs.get(
"custom_verifyFp",
"verify_khgp4f49_V12d4mRX_MdCO_4Wzt_Ar0k_z4RCQC9pUDpX",
)
if kwargs.get("custom_did") is not None:
did = kwargs.get("custom_did", None)
elif self.did is None:
did = str(random.randint(10000, 999999999))
else:
did = self.did
page.setContent("<script> " + get_acrawler() + " </script>")
evaluatedPage = page.evaluate(
'''() => {
var url = "'''
+ url
+ "&verifyFp="
+ verifyFp
+ """&did="""
+ did
+ """"
var token = window.byted_acrawler.sign({url: url});
return token;
}"""
)
context.close()
return (
verifyFp,
did,
evaluatedPage,
)
def clean_up(self):
try:
self.browser.close()
except Exception:
logging.info("cleanup failed")
# playwright.stop()
def find_redirect(self, url):
self.page.goto(url, {"waitUntil": "load"})
self.redirect_url = self.page.url
def __format_proxy(self, proxy):
if proxy is not None:
return {"http": proxy, "https": proxy}
else:
return None
def __get_js(self):
return requests.get(
"https://sf16-muse-va.ibytedtos.com/obj/rc-web-sdk-gcs/acrawler.js",
proxies=self.__format_proxy(self.proxy),
).text
|
# ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <randonlang@gmail.com>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import subprocess as sp
import os
import re
import sys
import glob
import inspect
import datetime
import threading
import platform
import ctypes
import platform
from ctypes import cdll
from ctypes.util import find_library
import jittor_utils as jit_utils
from jittor_utils import LOG, run_cmd, find_exe, cc_path, cc_type, cache_path
from . import pyjt_compiler
from jittor_utils import lock
from jittor_utils import install_cuda
from jittor import __version__
import hashlib
def find_jittor_path():
return os.path.dirname(__file__)
def make_cache_dir(cache_path):
if not os.path.isdir(cache_path):
LOG.i(f"Create cache dir: {cache_path}")
os.mkdir(cache_path)
def shsplit(s):
s1 = s.split(' ')
s2 = []
count = 0
for s in s1:
nc = s.count('"') + s.count('\'')
if count&1:
count += nc
s2[-1] += " "
s2[-1] += s
else:
count = nc
s2.append(s)
return s2
def remove_flags(flags, rm_flags):
flags = shsplit(flags)
output = []
for s in flags:
ss = s.replace("\"", "")
for rm in rm_flags:
if ss.startswith(rm) or ss.endswith(rm):
break
else:
output.append(s)
return " ".join(output)
def moveback_flags(flags, rm_flags):
flags = shsplit(flags)
output = []
output2 = []
for s in flags:
ss = s.replace("\"", "")
for rm in rm_flags:
if ss.startswith(rm) or ss.endswith(rm):
output2.append(s)
break
else:
output.append(s)
return " ".join(output+output2)
def map_flags(flags, func):
flags = shsplit(flags)
output = []
for s in flags:
output.append(func(s))
return " ".join(output)
def compile(compiler, flags, inputs, output, combind_build=False, cuda_flags="", obj_dirname="obj_files"):
def do_compile(cmd):
if jit_utils.cc:
return jit_utils.cc.cache_compile(cmd, cache_path, jittor_path)
else:
run_cmd(cmd)
return True
base_output = os.path.basename(output).split('.')[0]
if os.name == 'nt':
# windows do not combind build, need gen def
combind_build = False
# windows need xxxx.lib
afile = output.rsplit('.', 1)[0] + ".lib"
afile = os.path.join(cache_path, afile)
if cc_type != 'cl':
# initialize order in windows seems reversed
inputs = list(inputs[::-1])
link = link + f' -Wl,--export-all-symbols,--out-implib,"{afile}" '
if not os.path.isabs(output):
output = os.path.join(cache_path, output)
# don't recompile object file in inputs
obj_files = []
ex_obj_files = []
new_inputs = []
obj_dir = os.path.join(cache_path, obj_dirname)
os.makedirs(obj_dir, exist_ok=True)
for name in inputs:
if name[-1] in 'oab':
ex_obj_files.append(name)
else:
new_inputs.append(os.path.join(jittor_path, name))
obj_files.append(os.path.join(
obj_dir, os.path.basename(name)+".o"))
inputs = new_inputs
cm = lambda s: f"\"{s}\""
cms = lambda arr: [f"\"{s}\"" for s in arr ]
if len(inputs) == 1 or combind_build:
cmd = f"\"{compiler}\" {" ".join(cms(inputs))} {flags} -o {cm(output)}"
return do_compile(fix_cl_flags(cmd))
# split compile object file and link
# remove -l -L flags when compile object files
oflags = remove_flags(flags, ['-l', '-L', '-Wl,', '.lib', '-shared'])
cmds = []
for input, obj_file in zip(inputs, obj_files):
cc = compiler
nflags = oflags
cmd = f"{cm(input)} {nflags} {lto_flags} -c -o {cm(obj_file)}"
if input.endswith(".cu"):
if has_cuda:
cmd = f"\"{nvcc_path}\" {cuda_flags} {cmd}"
cmd = convert_nvcc_flags(fix_cl_flags(cmd))
else:
continue
else:
cmd = f"\"{cc}\" {cmd}"
cmd = fix_cl_flags(cmd)
if "nan_checker" in input:
# nan checker needs to disable fast_math
cmd = cmd.replace("--use_fast_math", "")
cmd = cmd.replace("-Ofast", "-O2")
cmds.append(cmd)
jit_utils.run_cmds(cmds, cache_path, jittor_path, "Compiling "+base_output)
obj_files += ex_obj_files
if os.name == 'nt':
dumpdef_path = os.path.join(jittor_path, "utils", "dumpdef.py")
cmd = f"\"{sys.executable}\" \"{dumpdef_path}\" {" ".join(cms(obj_files))} -Fo: \"{output}.def\""
do_compile(fix_cl_flags(cmd))
cmd = f"\"{compiler}\" {" ".join(cms(obj_files))} -o {cm(output)} {flags} {lto_flags}"
return do_compile(fix_cl_flags(cmd))
def gen_jit_tests():
all_src = glob.glob(jittor_path+"/src/**/*.cc", recursive=True)
jit_declares = []
re_def = re.compile("JIT_TEST\\((.*?)\\)")
names = set()
test_defs = []
for src_name in all_src:
with open(src_name, 'rb') as f:
src = f.read().decode('utf8')
defs = re_def.findall(src)
for name in defs:
LOG.vv(f"Find test {name} from {src_name}")
assert name not in names, f"Conflict test name {name}"
names.add(name)
jit_declares.append(f"JIT_TEST({name});")
test_defs.append(f"""
/* From {src_name} */
// @pyjt({name})
static inline void test_{name}() {{ jit_test_{name}(); }}
""")
jit_declares = "\n ".join(jit_declares)
jit_src = f"""
#pragma once
#include "common.h"
void expect_error(std::function<void()> func) {{
try {{ func(); }}
catch (...) {{ return; }}
CHECK(0) << "Missing error";
}}
namespace jittor {{
{jit_declares}
// @pyjt(tests)
// @attrs(submodule)
namespace tests {{
{"".join(test_defs)}
}}
}} // jittor
"""
LOG.vvvv(jit_src)
with open(os.path.join(cache_path, "gen", "jit_tests.h"), 'w', encoding='utf8') as f:
f.write(jit_src)
def gen_jit_flags():
all_src = glob.glob(jittor_path+"/src/**/*.cc", recursive=True)
jit_declares = []
re_def = re.compile("DEFINE_FLAG(_WITH_SETTER)?\\((.*?)\\);", re.DOTALL)
flags_defs = []
visit = {}
for src_name in all_src:
with open(src_name, 'rb') as f:
src = f.read().decode("utf8")
defs = re_def.findall(src)
for _, args in defs:
args = args.split(",")
type = args[0].strip()
name = args[1].strip()
if not has_cuda and "cuda" in name and name!="use_cuda":
continue
default = args[2].strip()
doc = ",".join(args[3:])
doc = eval(f"({doc})")
LOG.vv(f"Find define {name} from {src_name}")
if name in visit:
continue
visit[name] = 1
jit_declares.append(f"DECLARE_FLAG({type}, {name});")
alias = []
if name == "use_cuda":
alias = ["use_device", "use_acl"]
elif name == "auto_mixed_precision_level":
alias = ["amp_level"]
get_names = ",".join(["__get__"+a for a in [name]+alias])
set_names = ",".join(["__set__"+a for a in [name]+alias])
flags_defs.append(f"""
/* {name}(type:{type}, default:{default}): {doc} */
// @pyjt({get_names})
{type} _get_{name}() {{ return {name}; }}
// @pyjt({set_names})
void _set_{name}({type} v) {{ set_{name}(v); }}
{f'''// @pyjt({set_names})
void _set_{name}(bool v) {{ set_{name}(v); }}
''' if type=="int" else ""}
""")
jit_declares = "\n ".join(jit_declares)
jit_src = f"""
#include "utils/flags.h"
namespace jittor {{
{jit_declares}
// @pyjt(Flags)
struct _Flags {{
// @pyjt(__init__)
_Flags() {{}}
{"".join(flags_defs)}
}};
}} // jittor
"""
LOG.vvvv(jit_src)
with open(os.path.join(cache_path, "gen", "jit_flags.h"), 'w', encoding='utf8') as f:
f.write(jit_src)
def gen_jit_op_maker(op_headers, export=False, extra_flags=""):
def add_src(
cc_func_name,
cc_args,
op_name,
op_args,
src,
pybind_name,
py_args,
jit_cc_src,
doc_string,
attrs
):
has_ir = set(["add", "sub", "mul", "matmul", "truediv", "floordiv", "mod", "divmod", "pow", "lshift", "rshift", "and", "xor", "or"])
pybind_names = [ s.strip() for s in pybind_name.split(",")]
cc_make_args = [ arg.replace("VarHolder*", "Var*") for arg in cc_args ]
op_make_args = [ arg.replace("->var", "") for arg in op_args ]
py_args = [ arg.replace("Var*", "VarHolder*") for arg in py_args ]
op_args = []
cc_args_with_default = []
for i, arg in enumerate(cc_args):
pre_arg = arg.split()[-1].split('=')[0]
op_arg = None
if arg.startswith("VarHolder*"):
op_arg = pre_arg+"->var"
elif arg.startswith("vector<VarHolder*>"):
op_arg = f"convert({pre_arg})"
if "&&" in arg:
if op_arg == None:
op_arg = "move("+pre_arg+")"
op_make_args[i] = "move("+pre_arg+")"
if op_arg==None: op_arg = pre_arg
op_args.append(op_arg)
py_arg = py_args[i]
if "_a=" not in py_arg:
cc_args_with_default.append(arg)
continue
py_arg = py_arg.split("_a=")[1]
cc_args_with_default.append(arg + "=" + py_arg)
cc_args = cc_args_with_default
# steps of Op creation:
# 1. new op
# 2. new output var (create_output in op constructor)
# 3. take over op's output VarPtr from outputs_holder
# 4. set op's output
# 5. set op's input
# 6. infer shape(op->init())
if "multiple_outputs" not in attrs:
jit_cc_src.append(f"""
VarPtr make_{cc_func_name}({", ".join(cc_make_args)}) {{
auto _op = new {op_name}({", ".join(op_make_args)});
if (_op->outputs_holder.size() != 1) {{
delete _op;
LOGf << "Wrong output size of" << \"{op_name}\";
}}
if (_op->flags.get(NodeFlags::_forwarded)) {{
VarPtr _out(move(_op->outputs_holder[0]));
delete _op;
return _out;
}}
_op->outputs_holder[0]->set_inputs({{_op}});
VarPtr _out(move(_op->outputs_holder[0]));
{src.replace("->var","")};
_op->init();
return _out;
}}
""")
else:
jit_cc_src.append(f"""
vector<VarPtr> make_{cc_func_name}({", ".join(cc_make_args)}) {{
auto _op = new {op_name}({", ".join(op_make_args)});
if (_op->flags.get(NodeFlags::_forwarded)) {{
vector<VarPtr> _outs = move(_op->outputs_holder);
delete _op;
return _outs;
}}
vector<VarPtr> _outs = move(_op->outputs_holder);
for (uint i=0; i<_outs.size(); i++)
_outs[i]->set_inputs({{_op}});
{src.replace("->var","")};
_op->init();
return _outs;
}}
""")
if pybind_name == 'None':
return
pyjt_names = []
for pybind_name in pybind_names:
if pybind_name.startswith("__"):
pyjt_names.append("Var."+pybind_name)
else:
pyjt_names.append(pybind_name)
if len(cc_args)>0 and cc_args[0].startswith("VarHolder* "):
pyjt_names.append("Var."+pybind_name)
if "multiple_outputs" in attrs:
jit_cc_src.append(f"""
/*{doc_string}*/
// @pyjt({",".join(pyjt_names)})
vector_to_tuple<VarHolder*> {cc_func_name}({", ".join(cc_args)}) {{
{ f'return make_vh_vector(make_{cc_func_name}({', '.join(op_args)}));'
if "replace_outputs" not in attrs else
f'''auto rt = make_vh_vector(make_{cc_func_name}({", ".join(op_args)}));
ASSERT(rt.size() == outputs.size());
for (int i=0; i<outputs.size(); i++)
outputs[i]->assign(rt[i]);
return rt;
'''}
}}
""")
else:
jit_cc_src.append(f"""
/*{doc_string}*/
// @pyjt({",".join(pyjt_names)})
VarHolder* {cc_func_name}({", ".join(cc_args)}) {{
return new VarHolder(make_{cc_func_name}({", ".join(op_args)}));
}}
""")
need_ir_define = False
ir_name = None
for pybind_name in pybind_names:
if pybind_name.startswith("__") and pybind_name[2:-2] in has_ir:
need_ir_define = True
assert ir_name is None
ir_name = pybind_name[2:-2]
if need_ir_define:
assert len(cc_args)>0 and cc_args[0].startswith("VarHolder* ")
this = cc_args[0].split()[-1]
jit_cc_src.append(f"""
// @pyjt(Var.__i{ir_name}__)
// @attrs(return_self)
VarHolder* i{cc_func_name}({", ".join(cc_args)}) {{
*{this} = make_{cc_func_name}({", ".join(op_args)});
return {this};
}}
""")
assert len(cc_args)>1 and cc_args[1].startswith("VarHolder* "), cc_args
r_cc_args = [cc_args[1], cc_args[0]] + cc_args[2:]
r_py_args = [py_args[1], py_args[0]] + py_args[2:]
jit_cc_src.append(f"""
VarHolder* r{cc_func_name}({", ".join(r_cc_args)}) {{
return new VarHolder(make_{cc_func_name}({", ".join(op_args)}));
}}
""")
jit_cc_src = []
jit_headers = ""
initer = []
pybind_reg = '(/\\*(.*?)\\*/\\s*)?(//\\s*@pybind\\(([^\\n]*)\\)\\s*)?'
pybind_attrs_reg = pybind_reg + '(//\\s*@attrs\\(([^\\n]*)\\)\\s*)?'
for header in op_headers:
# xxx_xxx_op
name = os.path.basename(header)
name = os.path.splitext(name)[0]
# xxx_xxx
assert name.endswith("_op")
func_name = name[:-3]
# XxxXxxOp
name2 = map(lambda s:s[:1].upper() + s[1:], name.split('_'))
name2 = "".join(name2)
with open(header, encoding='utf8') as f:
src = f.read()
# XxxXxxOp(args)
res = re.findall(pybind_attrs_reg + '[^~]('+name2+"\\([^\\n]*\\))", src, re.S)
assert len(res) >= 1, "Wrong op args in " + header
# registe op
cc_name = header[:-2] + ".cc"
constructors = []
for i in range(len(res)):
name = 'make_'+func_name+'_'*i
constructors.append(f"{{ &typeid(&{name}), (void*)&{name} }}")
constructors = ",".join(constructors)
var_member_reg = r"\n\s*Var\b(.*);"
var_member_match = re.findall(var_member_reg, src)
var_member_match = " ".join(var_member_match)
for c in "*,": var_member_match = var_member_match.replace(c, " ")
var_member = var_member_match.split()
LOG.vv("var_member_match "+var_member_match)
LOG.vv("var_member "+str(var_member))
var_member_src = [ f"VAR_MEMBER_NAME_AND_OFFSET({name}, {name2})" for name in var_member ]
var_member_src = ",".join(var_member_src)
initer.append(f'\n op_registe({{ '{func_name}', R'({cc_name})", extra_flags, {{{constructors}}}, {{{var_member_src}}} }});')
for hid, h_def in enumerate(res):
h_def = list(h_def)
# // @attrs(...)
attrs = {}
if h_def[4] != "":
attrs = pyjt_compiler.parse_attrs(h_def[5])
del h_def[4:6]
# /* doc_string */
# // @pybind(bind_name)
# XxxXxxOp(args_def)
doc_string = h_def[1].strip()
h_def = h_def[2:]
args_def = h_def[2][len(name2)+1:-1]
bind_name = h_def[1]
if bind_name == "":
bind_name = func_name
if args_def=="":
args = []
else:
args = list(map(lambda s: s.split()[-1].split('=')[0], args_def.split(',')))
# py_args: "arg"_a=default
py_args = []
new_args_def = []
new_args = []
# source of convert VarHolder* to Var*
vh2v_src = []
more_src = []
for arg, arg_def in zip(args, args_def.split(',')):
py_arg = f'"{arg}"_a'
if '=' in arg_def:
py_arg += "=" + arg_def.split('=')[-1]
arg_def = arg_def.split('=')[0]
py_args.append(py_arg)
arg_type = arg_def[:-(len(arg)+1)].strip()
if arg_type == "Var*":
new_args_def.append("VarHolder* " + arg)
vh2v_src.append(arg + "->var")
new_args.append(arg + "->var")
elif arg_type.startswith("vector<Var*>"):
new_args_def.append(
arg_type.replace("Var", "VarHolder")+' '+arg)
new_args.append(arg)
more_src.append(f"_op->add_inputs({arg});")
elif arg_type.startswith("VarSlices"):
new_args_def.append(arg_def)
new_args.append(arg)
more_src.append(f"""
vector<Var*> svars;
for (int i=0; i<_op->vs.n; i++)
if (_op->vs.slices[i].is_var())
svars.push_back(_op->vs.slices[i].var);
_op->add_inputs(svars);""")
else:
new_args_def.append(arg_def)
new_args.append(arg)
vh2v_src = "_op->set_inputs({" + ", ".join(vh2v_src) + "});" + \
"".join(more_src)
LOG.vvvv(f"Find op: {name2} args: {new_args}")
# if header.startswith("src/"):
# jit_headers += f"#include \"{header[4:]}\"\n"
# else:
jit_headers += f"#include \"{header}\"\n"
add_src(
func_name+'_'*hid,
new_args_def,
name2,
new_args,
vh2v_src,
bind_name,
py_args,
jit_cc_src,
doc_string,
attrs
)
if func_name in ["binary", "unary", "reduce"]:
# generate binary op alias
with open(os.path.join(jittor_path, f"src/ops/{func_name}_op.cc"), encoding="utf-8") as f:
src = f.read()
src = src.split(f"unordered_set<string> {func_name}_ops = ""{")[1].split("};")[0]
match_result = re.findall(pybind_reg + "\"([a-z_A-Z0-9]*)\"", src, re.S)
# remove /* doc_string */ pattern
res2 = [ (_[3], _[4]) for _ in match_result ]
LOG.vvvv(f"All supported {func_name} ops: {res2}")
# remove op args
if func_name == "reduce":
args_def = new_args_def[:1] + new_args_def[2:]
py_args_s = py_args[:1] + py_args[2:]
else:
args_def = new_args_def[:-1]
py_args_s = py_args[:-1]
# find the last type id(float64)
# add "_" suffix for all function
if func_name == "unary":
last_tid = res2.index(("","float64"))
# for each functor
for tid, (bind_name, func_name2) in enumerate(res2):
# get certain op doc_string
doc_string2 = match_result[tid][1].strip()
if len(doc_string2) == 0:
doc_string2 = doc_string
# add _ for types
if func_name == "unary" and tid <= last_tid:
func_name3 = func_name2 + "_"
elif func_name == "reduce":
func_name4 = func_name2
func_name2 = "reduce_" + func_name2
func_name3 = func_name2
else:
func_name3 = func_name2
if len(bind_name) == 0:
bind_name = func_name2
if func_name == "reduce":
args = new_args[:1] + [f'ns_{func_name4}'] + new_args[2:]
else:
args = new_args[:-1] + [f'ns_{func_name2}']
add_src(
func_name3+'_'*hid,
args_def,
name2,
args,
vh2v_src,
bind_name,
py_args_s,
jit_cc_src,
doc_string2,
attrs
)
jit_src = f"""
#pragma once
#include "pyjt/py_obj_holder.h"
#include "var.h"
#include "var_holder.h"
#include "ops/op_register.h"
{jit_headers}
namespace jittor {{
// fix make_array(py::array) undefine reference
#pragma GCC visibility push(default)
#define JIT_NAMESPACE {export+"_maker" if export else "jit_op_maker"}
// @pyjt(ops)
// @attrs(submodule{",core_name="+export if export else ""})
namespace JIT_NAMESPACE {{
{"".join(jit_cc_src)}
void initer() {{
string extra_flags = R"({extra_flags})";
{"".join(initer)}
}}
int caller = (initer(), 0);
}} // JIT_NAMESPACE
}} // jittor
{f'''
namespace jittor {{
extern void pyjt_def_{export}(PyObject*);
}}
static void init_module(PyModuleDef* mdef, PyObject* m) {{
mdef->m_doc = "User defined custom ops";
jittor::pyjt_def_{export}(m);
}}
PYJT_MODULE_INIT({export});
''' if export else ""}
"""
return jit_src
@lock.lock_scope()
def compile_custom_op(header, source, op_name, warp=True):
"""Compile a single custom op
header: code of op header, not path
source: code of op source, not path
op_name: op_name of this op, it will used for
generation of header and source files, if the
type name of op is XxxXxxOp, op_name should be
xxx_xxx
warp: if true, warp a snippet for header and source
"""
if warp:
header = f"""
#pragma once
#include "op.h"
#include "var.h"
namespace jittor {{
{header}
}}
"""
source = f"""
#include "{op_name}_op.h"
namespace jittor {{
{source}
}}
"""
cops_dir = os.path.join(cache_path, "custom_ops")
make_cache_dir(cops_dir)
hname = os.path.join(cops_dir, op_name+"_op.h")
ccname = os.path.join(cops_dir, op_name+"_op.cc")
with open(hname, 'w', encoding='utf8') as f:
f.write(header)
with open(ccname, 'w', encoding='utf8') as f:
f.write(source)
m = compile_custom_ops([hname, ccname])
return getattr(m, op_name)
@lock.lock_scope()
def compile_custom_ops(
filenames,
extra_flags="",
return_module=False,
dlopen_flags=None,
gen_name_ = ""):
"""Compile custom ops
filenames: path of op source files, filenames must be
pairs of xxx_xxx_op.cc and xxx_xxx_op.h, and the
type name of op must be XxxXxxOp.
extra_flags: extra compile flags
return_module: return module rather than ops(default: False)
return: compiled ops
"""
if dlopen_flags is None:
dlopen_flags = os.RTLD_GLOBAL | os.RTLD_NOW
if platform.system() == 'Linux':
dlopen_flags |= os.RTLD_DEEPBIND
srcs = {}
headers = {}
builds = []
includes = []
pyjt_includes = []
for name in filenames:
name = os.path.realpath(name)
if name.endswith(".cc") or name.endswith(".cpp") or name.endswith(".cu"):
builds.append(name)
if name.endswith(".h"):
dirname = os.path.dirname(name)
if dirname.endswith("inc"):
includes.append(dirname)
with open(name, "r", encoding='utf8') as f:
if "@pyjt" in f.read():
pyjt_includes.append(name)
bname = os.path.basename(name)
bname = os.path.splitext(bname)[0]
if bname.endswith("_op"):
bname = bname[:-3]
if name.endswith(".cc"):
srcs[bname] = name
elif name.endswith(".h"):
includes.append(os.path.dirname(name))
headers[bname] = name
assert len(srcs) == len(headers), "Source and header names not match"
for name in srcs:
assert name in headers, f"Header of op {name} not found"
gen_name = "gen_ops_" + "_".join(headers.keys())
if gen_name_ != "":
gen_name = gen_name_
if len(gen_name) > 50:
gen_name = gen_name[:50] + "___hash" + hashlib.md5(gen_name.encode()).hexdigest()[:6]
includes = sorted(list(set(includes)))
includes = "".join(map(lambda x: f" -I\"{x}\" ", includes))
LOG.vvvv(f"Include flags:{includes}")
op_extra_flags = includes + extra_flags
lib_path = os.path.join(cache_path, "custom_ops")
make_cache_dir(lib_path)
gen_src_fname = os.path.join(lib_path, gen_name+".cc")
gen_head_fname = os.path.join(lib_path, gen_name+".h")
gen_lib = os.path.join(lib_path, gen_name+extension_suffix)
libname = gen_name + lib_suffix
op_extra_flags += f" -L\"{lib_path}\" -l\"{libname}\" "
gen_src = gen_jit_op_maker(headers.values(), export=gen_name, extra_flags=op_extra_flags)
pyjt_compiler.compile_single(gen_head_fname, gen_src_fname, src=gen_src)
# gen src initialize first
builds.insert(0, gen_src_fname)
def insert_anchor(gen_src, anchor_str, insert_str):
# insert insert_str after anchor_str into gen_src
return gen_src.replace(anchor_str, anchor_str+insert_str, 1)
for name in pyjt_includes:
LOG.v("handle pyjt_include ", name)
bname = os.path.basename(name).split(".")[0]
gen_src_fname = os.path.join(cache_path, "custom_ops", gen_name+"_"+bname+".cc")
pyjt_compiler.compile_single(name, gen_src_fname)
builds.insert(1, gen_src_fname)
gen_src = insert_anchor(gen_src,
"namespace jittor {",
f"extern void pyjt_def_{bname}(PyObject* m);")
gen_src = insert_anchor(gen_src,
"init_module(PyModuleDef* mdef, PyObject* m) {",
f"jittor::pyjt_def_{bname}(m);")
with open(gen_head_fname, "w", encoding='utf8') as f:
f.write(gen_src)
LOG.vvv(f"Build custum ops lib:{gen_lib}")
LOG.vvvv(f"Build sources:{builds}")
compile(cc_path, extra_flags+cc_flags+opt_flags+includes, builds, gen_lib)
# add python path and import
LOG.vvv(f"Import custum ops lib:{gen_lib}")
lib_path = os.path.join(cache_path, "custom_ops")
if lib_path not in os.sys.path:
os.sys.path.append(lib_path)
# unlock scope when initialize
with lock.unlock_scope():
with jit_utils.import_scope(dlopen_flags):
exec(f"import {gen_name}")
mod = locals()[gen_name]
if return_module:
return mod
return mod.ops
def get_full_path_of_executable(name):
full_path = os.path.abspath(name)
while os.path.islink(full_path):
full_path = os.path.realpath(full_path)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
return get_full_path_of_executable(find_exe(name))
def compile_extern():
# compile llvm passes
if cc_type != "clang" or platform.system() != 'Linux':
return
global kernel_opt_flags
cache_path_llvm = os.path.join(cache_path, "llvm")
jittor_path_llvm = os.path.join(jittor_path, "extern", "llvm")
clang_dir = os.path.dirname(get_full_path_of_executable(cc_path))
assert clang_dir.endswith("bin") and "llvm" in clang_dir, f"Wrong clang_dir: {clang_dir}"
llvm_include = os.path.abspath(os.path.join(clang_dir, "..", "include"))
assert os.path.isdir(llvm_include), "LLVM include path not found"
make_cache_dir(cache_path_llvm)
files = os.listdir(jittor_path_llvm)
# test_pass.cc is used for test link problem of llvm pass plugin
test_pass_path = os.path.join(cache_path_llvm, "test_pass.cc")
with open(test_pass_path, 'w', encoding='utf8') as f:
f.write("int main() {return 0;}")
# -fno-rtti fix link error
# -Wl,-znodelete fix segfault
# https://github.com/sampsyo/llvm-pass-skeleton/issues/7#issuecomment-401834287
# -D_GLIBCXX_USE_CXX11_ABI=0 fix undefined symbol: createPrinterPass
# https://stackoverflow.com/questions/37366291/undefined-symbol-for-self-built-llvm-opt
# try different flags
try_flags = [
" -Wl,-znodelete -D_GLIBCXX_USE_CXX11_ABI=0 ",
" -Wl,-znodelete ",
]
found_flags_id = -1
for fname in files:
for i, flag in enumerate(try_flags):
if found_flags_id != -1 and found_flags_id != i:
continue
so_name = os.path.join(cache_path_llvm, os.path.splitext(fname)[0]+f".{i}.so")
compile(
cc_path,
f"{cc_flags} {opt_flags} {flag} -I'{llvm_include}'",
[os.path.join(jittor_path_llvm, fname)],
so_name
)
# if not found available flags, we test it.
if found_flags_id == -1:
try:
s = run_cmd(
f"{cc_path} {cc_flags} -Xclang -load -Xclang '{so_name}' {test_pass_path}",
cache_path_llvm,
print_error=False
)
except Exception as e:
LOG.v(f"Try flag {flag} failed: {e}")
continue
found_flags_id = i
kernel_opt_flags += f" -Xclang -load -Xclang '{so_name}' "
break
else:
LOG.w("Clang is used, but LLVM pass plugin is unable to link.")
break
LOG.vv(f"Compile extern llvm passes: {str(files)}")
def check_cuda():
if not nvcc_path:
return
global cc_flags, has_cuda, core_link_flags, cuda_dir, cuda_lib, cuda_include, cuda_home, cuda_bin
cuda_dir = os.path.dirname(get_full_path_of_executable(nvcc_path))
cuda_bin = cuda_dir
cuda_home = os.path.abspath(os.path.join(cuda_dir, ".."))
# try default nvidia-cuda-toolkit in Ubuntu 20.04
# assert cuda_dir.endswith("bin") and "cuda" in cuda_dir.lower(), f"Wrong cuda_dir: {cuda_dir}"
cuda_include = os.path.abspath(os.path.join(cuda_dir, "..", "include"))
cuda_lib = os.path.abspath(os.path.join(cuda_dir, "..", "lib64"))
if nvcc_path == "/usr/bin/nvcc":
# this nvcc is install by package manager
cuda_lib = "/usr/lib/x86_64-linux-gnu"
cuda_include2 = os.path.join(jittor_path, "extern","cuda","inc")
cc_flags += f" -DHAS_CUDA -DIS_CUDA -I\"{cuda_include}\" -I\"{cuda_include2}\" "
if os.name == 'nt':
cuda_lib = os.path.abspath(os.path.join(cuda_dir, "..", "lib", "x64"))
# cc_flags += f" \"{cuda_lib}\\cudart.lib\" "
cuda_lib_path = glob.glob(cuda_bin+"/cudart64*")[0]
cc_flags += f" -lcudart -L\"{cuda_lib}\" -L\"{cuda_bin}\" "
dll = ctypes.CDLL(cuda_lib_path, dlopen_flags)
ret = dll.cudaDeviceSynchronize()
assert ret == 0
else:
cc_flags += f" -lcudart -L\"{cuda_lib}\" "
# ctypes.CDLL(cuda_lib+"/libcudart.so", import_flags)
ctypes.CDLL(cuda_lib+"/libcudart.so", dlopen_flags)
has_cuda = 1
def check_cache_compile():
files = [
"src/utils/cache_compile.cc",
"src/utils/log.cc",
"src/utils/tracer.cc",
"src/utils/jit_utils.cc",
"src/utils/str_utils.cc",
]
if os.name == 'nt':
files = [ x.replace('/', '\\') for x in files ]
global jit_utils_core_files
jit_utils_core_files = files
recompile = compile(cc_path, cc_flags+f" {opt_flags} ", files, jit_utils.cache_path+'/jit_utils_core'+extension_suffix, True)
if recompile and jit_utils.cc:
LOG.e("jit_utils updated, please rerun your command.")
sys.exit(0)
if not jit_utils.cc:
with jit_utils.import_scope(import_flags):
jit_utils.try_import_jit_utils_core()
assert jit_utils.cc
# recompile, generate cache key
compile(cc_path, cc_flags+f" {opt_flags} ", files, jit_utils.cache_path+'/jit_utils_core'+extension_suffix, True)
def env_or_try_find(name, bname):
if name in os.environ:
path = os.environ[name]
if path != "":
version = jit_utils.get_version(path)
LOG.i(f"Found {bname}{version} at {path}")
return path
return try_find_exe(bname)
def try_find_exe(*args):
try:
return find_exe(*args)
except:
LOG.v(f"{args[0]} not found.")
return ""
def check_pybt(gdb_path, python_path):
if gdb_path=='' or python_path=='':
return False
return True
# TODO: prev we use below code to check has py-bt or nor
# but it is too slow, so we comment it,
# find a better way to check py-bt exist
# ret = sp.getoutput(f"{gdb_path} --batch {python_path} -ex 'help py-bt'")
# if 'python frame' in ret:
# LOG.v("py-bt found in gdb.")
# return True
# return False
def check_debug_flags():
global is_debug
is_debug = 0
if os.environ.get("debug")=="1":
is_debug = 1
global cc_flags
cc_flags += " -g -DNODE_MEMCHECK "
cc_flags = " "
# os.RTLD_NOW | os.RTLD_GLOBAL cause segfault when import torch first
import_flags = os.RTLD_NOW | os.RTLD_GLOBAL
if platform.system() == 'Linux':
import_flags |= os.RTLD_DEEPBIND
# if cc_type=="icc":
# # weird link problem, icc omp library may conflict and cause segfault
# import_flags = os.RTLD_NOW | os.RTLD_GLOBAL
dlopen_flags = os.RTLD_NOW | os.RTLD_GLOBAL
if platform.system() == 'Linux':
import_flags |= os.RTLD_DEEPBIND
with jit_utils.import_scope(import_flags):
jit_utils.try_import_jit_utils_core()
jittor_path = find_jittor_path()
if os.name == 'nt':
# prevent windows recompile
jittor_path = jittor_path.lower()
check_debug_flags()
sys.path.append(cache_path)
LOG.i(f"Jittor({__version__}) src: {jittor_path}")
LOG.i(f"{jit_utils.cc_type} at {jit_utils.cc_path}{jit_utils.get_version(jit_utils.cc_path)}")
LOG.i(f"cache_path: {cache_path}")
with jit_utils.import_scope(import_flags):
jit_utils.try_import_jit_utils_core()
python_path = sys.executable
# sometime python do not return the correct sys executable
# this will happend when multiple python version installed
ex_python_path = python_path + '.' + str(sys.version_info.minor)
if os.path.isfile(ex_python_path):
python_path = ex_python_path
# if jtcuda is already installed
nvcc_path = None
if install_cuda.has_installation() or os.name == 'nt':
nvcc_path = install_cuda.install_cuda()
if nvcc_path:
nvcc_path = try_find_exe(nvcc_path)
# check system installed cuda
if not nvcc_path:
nvcc_path = env_or_try_find('nvcc_path', 'nvcc') or \
try_find_exe('/usr/local/cuda/bin/nvcc') or \
try_find_exe('/usr/bin/nvcc') or \
try_find_exe('/opt/cuda/bin/nvcc')
# if system has no cuda, install jtcuda
if not nvcc_path:
nvcc_path = install_cuda.install_cuda()
if nvcc_path:
nvcc_path = try_find_exe(nvcc_path)
if nvcc_path is None:
nvcc_path = ""
if "nvcc_path" in os.environ:
nvcc_path = os.environ["nvcc_path"]
gdb_path = env_or_try_find('gdb_path', 'gdb')
addr2line_path = try_find_exe('addr2line')
has_pybt = check_pybt(gdb_path, python_path)
if nvcc_path:
# gen cuda key for cache_path
cu = "cu"
v = jit_utils.get_version(nvcc_path)[1:-1]
nvcc_version = list(map(int,v.split('.')))
cu += v
try:
r, s = sp.getstatusoutput(f"log_v=0 {sys.executable} -m jittor_utils.query_cuda_cc")
if r==0:
s = sorted(list(set(s.strip().split())))
cu += "_sm_" + "_".join(s)
if "cuda_arch" not in os.environ:
os.environ["cuda_arch"] = " ".join(cu)
cu = cu.replace(":", "").replace(" ", "")
except:
pass
LOG.i("cuda key:", cu)
cache_path = os.path.join(cache_path, cu)
sys.path.append(cache_path)
def check_clang_latest_supported_cpu():
output = run_cmd('clang --print-supported-cpus')
apple_cpus = [l.strip() for l in output.split('\n') if 'apple-a' in l]
apple_cpus_id = max([int(cpu[7:]) for cpu in apple_cpus])
return f'apple-a{apple_cpus_id}'
# cc_flags += " -Wall -Werror -Wno-unknown-pragmas -std=c++14 -fPIC "
cc_flags += " -Wall -Wno-unknown-pragmas -std=c++14 -fPIC "
# 1. Arch/CPU specific optimization
if platform.machine() in ["x86_64", "AMD64"]:
cc_flags += " -march=native "
elif platform.machine() == 'arm64' and platform.system() == "Darwin":
cc_flags += f" -mcpu={check_clang_latest_supported_cpu()} "
cc_flags += " -fdiagnostics-color=always "
# 2. Non standard include path
if platform.system() == 'Darwin' and platform.machine() == 'arm64':
cc_flags += " -I/opt/homebrew/include "
# 3. User specified flags
if "cc_flags" in os.environ:
cc_flags += os.environ["cc_flags"] + ' '
cc_flags += " -lstdc++ -ldl -shared "
if platform.system() == 'Darwin':
# TODO: if not using apple clang, there is no need to add -lomp
cc_flags += "-undefined dynamic_lookup -lomp "
if os.environ.get('CONDA_PREFIX', None):
cc_flags += f" -L{os.path.join(os.environ["CONDA_PREFIX"], "lib")} "
if platform.machine() == "arm64":
cc_flags += " -L/opt/homebrew/lib "
opt_flags = ""
py_include = jit_utils.get_py3_include_path()
LOG.v(f"py_include: {py_include}")
extension_suffix = jit_utils.get_py3_extension_suffix()
lib_suffix = extension_suffix.rsplit(".", 1)[0]
LOG.v(f"extension_suffix: {extension_suffix}")
so = ".so" if os.name != 'nt' else ".dll"
kernel_opt_flags = os.environ.get("kernel_flags", "") + opt_flags
if platform.system() == 'Darwin':
# TODO: if not using apple clang, cannot add -Xpreprocessor
kernel_opt_flags += " -Xpreprocessor -fopenmp "
elif cc_type != 'cl':
kernel_opt_flags += " -fopenmp "
def fix_cl_flags(cmd):
output = shsplit(cmd)
output2 = []
libpaths = []
for s in output:
if s.startswith("-l") and ("cpython" in s or "lib" in s):
if platform.system() == 'Darwin':
fname = s[2:] + ".so"
for path in reversed(libpaths):
full = os.path.join(path, fname).replace("\"", "")
if os.path.isfile(full):
output2.append(full)
break
else:
output2.append(s)
else:
output2.append(f"-l:{s[2:]}.so")
elif s.startswith("-L"):
libpaths.append(s[2:])
output2.append(f"{s} -Wl,-rpath,{s[2:]}")
else:
output2.append(s)
return " ".join(output2)
if os.name == 'nt':
if cc_type == 'g++':
pass
elif cc_type == 'cl':
py3_link_path = os.path.join(
os.path.dirname(sys.executable),
"libs",
)
cc_flags = remove_flags(cc_flags, ["-f", "-m"])
cc_flags = cc_flags.replace("-std=c++14", "-std=c++17")
cc_flags = cc_flags.replace("-lstdc++", "")
cc_flags = cc_flags.replace("-ldl", "")
cc_flags += f" -L\"{py3_link_path}\" -lpython3{sys.version_info.minor} "
cc_flags += " -EHa -MD -utf-8 "
import jittor_utils
if jittor_utils.msvc_path:
mp = jittor_utils.msvc_path
cc_flags += f' -nologo -I"{mp}\\VC\\include" -I"{mp}\\win10_kits\\include\\ucrt" -I"{mp}\\win10_kits\\include\\shared" -I"{mp}\\win10_kits\\include\\um" -DNOMINMAX '
cc_flags += f' -L"{mp}\\VC\\lib" -L"{mp}\\win10_kits\\lib\\um\\x64" -L"{mp}\\win10_kits\\lib\\ucrt\\x64" '
win_libpaths = {}
def fix_cl_flags(cmd):
cmd = cmd.replace(".o ", ".obj ")
cmd = cmd.replace(".o\"", ".obj\"")
if cmd.endswith(".o"): cmd += "bj"
if " -o " in cmd:
if " -shared " in cmd:
cmd = cmd.replace(" -o ", " -Fe: ")
output = shsplit(cmd.split("-Fe:")[1].strip())[0]
base_output = os.path.basename(output).split('.')[0]
cmd += f" -DEF:\"{output}.def\" -IGNORE:4102 -IGNORE:4197 -IGNORE:4217 "
elif " -c -o " in cmd:
cmd = cmd.replace(" -c -o ", " -c -Fo: ")
flags = shsplit(cmd)
output = []
output2 = []
for f in flags:
if f.startswith("-link"):
pass
elif f.startswith("-l"):
output2.append(f[2:]+".lib")
elif f.startswith("-LIB"):
output2.append(f)
elif f.startswith("-LD"):
output.append(f)
elif f.startswith("-L"):
path = f[2:].replace("\"", "")
if path not in win_libpaths:
win_libpaths[path] = 1
os.add_dll_directory(path)
os.environ["PATH"] = f";{path};" + os.environ["PATH"]
output2.append("-LIBPATH:"+f[2:])
elif ".lib" in f:
output2.append(f)
elif f.startswith("-DEF:"):
output2.append(f)
elif f.startswith("-W") or f.startswith("-f"):
pass
elif f.startswith("-std="):
output.append(f.replace("=", ":"))
else:
output.append(f)
cmd = " ".join(output)
if len(output2):
cmd += " -link " + " ".join(output2)
cmd = cmd.replace("-include", "-FI")
cmd = cmd.replace("-shared", "-LD")
return cmd
if ' -O' not in cc_flags:
if os.environ.get("debug", "0") == "1":
opt_flags += " -O0 "
else:
opt_flags += " -O2 "
kernel_opt_flags += " -Ofast "
lto_flags = ""
if os.environ.get("enable_lto") == "1":
if cc_type == "icc":
lto_flags = " -flto -ipo -ipo-c "
elif cc_type == "g++":
lto_flags = " -flto -fuse-linker-plugin "
else:
lto_flags = " -flto "
make_cache_dir(cache_path)
make_cache_dir(os.path.join(cache_path, "jit"))
make_cache_dir(os.path.join(cache_path, "obj_files"))
make_cache_dir(os.path.join(cache_path, "gen"))
make_cache_dir(os.path.join(cache_path, "tmp"))
ck_path = os.path.join(cache_path, "checkpoints")
make_cache_dir(ck_path)
# build cache_compile
cc_flags += f" -I\"{os.path.join(jittor_path, "src")}\" "
cc_flags += py_include
check_cache_compile()
LOG.v(f"Get cache_compile: {jit_utils.cc}")
# check cuda
has_cuda = 0
check_cuda()
nvcc_flags = os.environ.get("nvcc_flags", "")
if has_cuda:
nvcc_flags += cc_flags
def convert_nvcc_flags(nvcc_flags):
# nvcc don't support -Wall option
if os.name == 'nt':
nvcc_flags = nvcc_flags.replace("-fp:", "-Xcompiler -fp:")
nvcc_flags = nvcc_flags.replace("-EH", "-Xcompiler -EH")
nvcc_flags = nvcc_flags.replace("-M", "-Xcompiler -M")
nvcc_flags = nvcc_flags.replace("-utf", "-Xcompiler -utf")
nvcc_flags = nvcc_flags.replace("-nologo", "")
nvcc_flags = nvcc_flags.replace("-std:", "-std=")
nvcc_flags = nvcc_flags.replace("-Fo:", "-o")
nvcc_flags = nvcc_flags.replace("-LD", "-shared")
nvcc_flags = nvcc_flags.replace("-LIBPATH:", "-L")
nvcc_flags = nvcc_flags.replace("-link", "")
def func(x):
if ".lib" not in x: return x
x = x.replace("\"", "")
a = os.path.dirname(x)
b = os.path.basename(x)
if not b.endswith(".lib"):
return x
return f"-L\"{a}\" -l{b[:-4]}"
nvcc_flags = map_flags(nvcc_flags, func)
if nvcc_version >= [11,4]:
nvcc_flags = nvcc_flags.replace("-std=c++17", "-std=c++14 -Xcompiler -std:c++14")
else:
nvcc_flags = nvcc_flags.replace("-std=c++17", "")
nvcc_flags = nvcc_flags.replace("-Wall", "")
nvcc_flags = nvcc_flags.replace("-Wno-unknown-pragmas", "")
nvcc_flags = nvcc_flags.replace("-fopenmp", "")
nvcc_flags = nvcc_flags.replace("-march", "-Xcompiler -march")
nvcc_flags = nvcc_flags.replace("-Werror", "")
nvcc_flags = nvcc_flags.replace("-fPIC", "-Xcompiler -fPIC")
nvcc_flags = nvcc_flags.replace("-fdiagnostics", "-Xcompiler -fdiagnostics")
nvcc_flags += f" -x cu --cudart=shared -ccbin=\"{cc_path}\" --use_fast_math "
# nvcc warning is noise
nvcc_flags += " -w "
nvcc_flags += f" -I\"{os.path.join(jittor_path, "extern/cuda/inc")}\" "
if os.environ.get("cuda_debug", "0") == "1":
nvcc_flags += " -G "
return nvcc_flags
nvcc_flags = convert_nvcc_flags(nvcc_flags)
# from .acl_compiler import check_acl
from .extern.acl import acl_compiler
jit_utils.add_backend(acl_compiler)
for mod in jit_utils.backends:
if mod.check():
break
# build core
gen_jit_flags()
gen_jit_tests()
op_headers = glob.glob(jittor_path+"/src/ops/**/*op.h", recursive=True)
jit_src = gen_jit_op_maker(op_headers)
LOG.vvvv(jit_src)
with open(os.path.join(cache_path, "gen", "jit_op_maker.h"), 'w', encoding='utf8') as f:
f.write(jit_src)
cc_flags += f' -I\"{cache_path}\" -L\"{cache_path}\" -L\"{jit_utils.cache_path}\" '
# gen pyjt
pyjt_gen_src = pyjt_compiler.compile(cache_path, jittor_path)
# initialize order:
# 1. registers
# 2. generate source
# 3. op_utils
# 4. other
files2 = pyjt_gen_src
ext_args = 'c[cu]' if has_cuda else 'cc'
files4 = glob.glob(jittor_path+"/src/**/*."+ext_args, recursive=True)
files4 = [ f[len(jittor_path)+1:] for f in files4 ]
# files4 = run_cmd('find -L src | grep '+grep_args, jittor_path).splitlines()
at_beginning = [
"src/ops/op_utils.cc",
"src/ops/op_register.cc",
"src/init.cc",
"src/event_queue.cc",
"src/mem/allocator/sfrl_allocator.cc",
"src/mem/allocator.cc",
"src/misc/nano_string.cc",
]
at_last = [
"src/profiler/profiler.cc",
"src/executor.cc",
]
if os.name == 'nt':
at_beginning = [ x.replace('/','\\') for x in at_beginning ]
at_last = [ x.replace('/','\\') for x in at_last ]
for i in range(len(at_beginning)):
files4.remove(at_beginning[i])
files4.insert(i, at_beginning[i])
for v in at_last:
files4.remove(v)
files4.append(v)
registers = [ name for name in files4 if "register" in name ]
for name in registers: files4.remove(name)
files = registers + files2 + files4
for file in jit_utils_core_files:
files.remove(file)
LOG.vv("compile order:", files)
if platform.system() == 'Linux':
libname = {"clang":"omp", "icc":"iomp5", "g++":"gomp"}[cc_type]
libname = ctypes.util.find_library(libname)
assert libname is not None, "openmp library not found"
ctypes.CDLL(libname, os.RTLD_NOW | os.RTLD_GLOBAL)
if platform.machine()=='sw_64':
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
data_gz_path = os.path.join(jittor_path, "utils", "data.gz")
use_data_gz = os.path.isfile(data_gz_path)
if os.environ.get("use_data_gz", "1") == "0":
use_data_gz = False
if use_data_gz:
import gzip
with gzip.open(data_gz_path, 'rb') as f:
data = f.read()
md5 = hashlib.md5(data).hexdigest()
target_md5 = None
data_gz_md5_path = os.path.join(cache_path, "data.md5")
if os.path.isfile(data_gz_md5_path):
with open(data_gz_md5_path, 'r') as f:
target_md5 = f.read()
data_o_path = os.path.join(cache_path, "data.o")
if target_md5 != md5:
data_s_path = os.path.join(cache_path, "data.cc")
with open(data_s_path, "w") as f:
f.write(data.decode("utf8"))
dflags = (cc_flags+opt_flags)\
.replace("-Wall", "") \
.replace("-Werror", "") \
.replace("-shared", "")
vdp = os.path.join(jittor_path, "src", "utils", "vdp")
run_cmd(fix_cl_flags(f"{cc_path} {dflags} -include \"{vdp}\" \"{data_s_path}\" -c -o \"{data_o_path}\""))
os.remove(data_s_path)
with open(data_gz_md5_path, 'w') as f:
f.write(md5)
files.append(data_o_path)
files = [f for f in files if "__data__" not in f]
cc_flags += f" -l\"jit_utils_core{lib_suffix}\" "
compile(cc_path, cc_flags+opt_flags, files, 'jittor_core'+extension_suffix)
cc_flags += f" -l\"jittor_core{lib_suffix}\" "
# TODO: move to compile_extern.py
# compile_extern()
with jit_utils.import_scope(import_flags):
import jittor_core as core
flags = core.Flags()
if has_cuda:
nvcc_flags = " " + os.environ.get("nvcc_flags", "") + " "
nvcc_flags += convert_nvcc_flags(cc_flags)
nvcc_version = list(jit_utils.get_int_version(nvcc_path))
max_arch = 1000
if nvcc_version < [11,]:
max_arch = 75
elif nvcc_version < [11,1]:
max_arch = 80
if len(flags.cuda_archs):
min_arch = 30
archs = []
for arch in flags.cuda_archs:
if arch<min_arch:
LOG.w(f"CUDA arch({arch})<{min_arch} is not supported")
continue
if arch>max_arch:
LOG.w(f"CUDA arch({arch})>{max_arch} will be backward-compatible")
arch = max_arch
archs.append(arch)
flags.cuda_archs = archs
nvcc_flags += f" -arch=compute_{min(archs)} "
nvcc_flags += ''.join(map(lambda x:f' -code=sm_{x} ', archs))
flags.cc_path = cc_path
flags.cc_type = cc_type
flags.cc_flags = cc_flags + kernel_opt_flags
flags.nvcc_path = nvcc_path
flags.nvcc_flags = nvcc_flags
flags.python_path = python_path
flags.cache_path = cache_path
flags.jittor_path = jittor_path
flags.gdb_path = gdb_path
flags.addr2line_path = addr2line_path
flags.has_pybt = has_pybt
core.set_lock_path(lock.lock_path)
| # ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <randonlang@gmail.com>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import subprocess as sp
import os
import re
import sys
import glob
import inspect
import datetime
import threading
import platform
import ctypes
import platform
from ctypes import cdll
from ctypes.util import find_library
import jittor_utils as jit_utils
from jittor_utils import LOG, run_cmd, find_exe, cc_path, cc_type, cache_path
from . import pyjt_compiler
from jittor_utils import lock
from jittor_utils import install_cuda
from jittor import __version__
import hashlib
def find_jittor_path():
return os.path.dirname(__file__)
def make_cache_dir(cache_path):
if not os.path.isdir(cache_path):
LOG.i(f"Create cache dir: {cache_path}")
os.mkdir(cache_path)
def shsplit(s):
s1 = s.split(' ')
s2 = []
count = 0
for s in s1:
nc = s.count('"') + s.count('\'')
if count&1:
count += nc
s2[-1] += " "
s2[-1] += s
else:
count = nc
s2.append(s)
return s2
def remove_flags(flags, rm_flags):
flags = shsplit(flags)
output = []
for s in flags:
ss = s.replace("\"", "")
for rm in rm_flags:
if ss.startswith(rm) or ss.endswith(rm):
break
else:
output.append(s)
return " ".join(output)
def moveback_flags(flags, rm_flags):
flags = shsplit(flags)
output = []
output2 = []
for s in flags:
ss = s.replace("\"", "")
for rm in rm_flags:
if ss.startswith(rm) or ss.endswith(rm):
output2.append(s)
break
else:
output.append(s)
return " ".join(output+output2)
def map_flags(flags, func):
flags = shsplit(flags)
output = []
for s in flags:
output.append(func(s))
return " ".join(output)
def compile(compiler, flags, inputs, output, combind_build=False, cuda_flags="", obj_dirname="obj_files"):
def do_compile(cmd):
if jit_utils.cc:
return jit_utils.cc.cache_compile(cmd, cache_path, jittor_path)
else:
run_cmd(cmd)
return True
base_output = os.path.basename(output).split('.')[0]
if os.name == 'nt':
# windows do not combind build, need gen def
combind_build = False
# windows need xxxx.lib
afile = output.rsplit('.', 1)[0] + ".lib"
afile = os.path.join(cache_path, afile)
if cc_type != 'cl':
# initialize order in windows seems reversed
inputs = list(inputs[::-1])
link = link + f' -Wl,--export-all-symbols,--out-implib,"{afile}" '
if not os.path.isabs(output):
output = os.path.join(cache_path, output)
# don't recompile object file in inputs
obj_files = []
ex_obj_files = []
new_inputs = []
obj_dir = os.path.join(cache_path, obj_dirname)
os.makedirs(obj_dir, exist_ok=True)
for name in inputs:
if name[-1] in 'oab':
ex_obj_files.append(name)
else:
new_inputs.append(os.path.join(jittor_path, name))
obj_files.append(os.path.join(
obj_dir, os.path.basename(name)+".o"))
inputs = new_inputs
cm = lambda s: f"\"{s}\""
cms = lambda arr: [f"\"{s}\"" for s in arr ]
if len(inputs) == 1 or combind_build:
cmd = f"\"{compiler}\" {' '.join(cms(inputs))} {flags} -o {cm(output)}"
return do_compile(fix_cl_flags(cmd))
# split compile object file and link
# remove -l -L flags when compile object files
oflags = remove_flags(flags, ['-l', '-L', '-Wl,', '.lib', '-shared'])
cmds = []
for input, obj_file in zip(inputs, obj_files):
cc = compiler
nflags = oflags
cmd = f"{cm(input)} {nflags} {lto_flags} -c -o {cm(obj_file)}"
if input.endswith(".cu"):
if has_cuda:
cmd = f"\"{nvcc_path}\" {cuda_flags} {cmd}"
cmd = convert_nvcc_flags(fix_cl_flags(cmd))
else:
continue
else:
cmd = f"\"{cc}\" {cmd}"
cmd = fix_cl_flags(cmd)
if "nan_checker" in input:
# nan checker needs to disable fast_math
cmd = cmd.replace("--use_fast_math", "")
cmd = cmd.replace("-Ofast", "-O2")
cmds.append(cmd)
jit_utils.run_cmds(cmds, cache_path, jittor_path, "Compiling "+base_output)
obj_files += ex_obj_files
if os.name == 'nt':
dumpdef_path = os.path.join(jittor_path, "utils", "dumpdef.py")
cmd = f"\"{sys.executable}\" \"{dumpdef_path}\" {' '.join(cms(obj_files))} -Fo: \"{output}.def\""
do_compile(fix_cl_flags(cmd))
cmd = f"\"{compiler}\" {' '.join(cms(obj_files))} -o {cm(output)} {flags} {lto_flags}"
return do_compile(fix_cl_flags(cmd))
def gen_jit_tests():
all_src = glob.glob(jittor_path+"/src/**/*.cc", recursive=True)
jit_declares = []
re_def = re.compile("JIT_TEST\\((.*?)\\)")
names = set()
test_defs = []
for src_name in all_src:
with open(src_name, 'rb') as f:
src = f.read().decode('utf8')
defs = re_def.findall(src)
for name in defs:
LOG.vv(f"Find test {name} from {src_name}")
assert name not in names, f"Conflict test name {name}"
names.add(name)
jit_declares.append(f"JIT_TEST({name});")
test_defs.append(f"""
/* From {src_name} */
// @pyjt({name})
static inline void test_{name}() {{ jit_test_{name}(); }}
""")
jit_declares = "\n ".join(jit_declares)
jit_src = f"""
#pragma once
#include "common.h"
void expect_error(std::function<void()> func) {{
try {{ func(); }}
catch (...) {{ return; }}
CHECK(0) << "Missing error";
}}
namespace jittor {{
{jit_declares}
// @pyjt(tests)
// @attrs(submodule)
namespace tests {{
{"".join(test_defs)}
}}
}} // jittor
"""
LOG.vvvv(jit_src)
with open(os.path.join(cache_path, "gen", "jit_tests.h"), 'w', encoding='utf8') as f:
f.write(jit_src)
def gen_jit_flags():
all_src = glob.glob(jittor_path+"/src/**/*.cc", recursive=True)
jit_declares = []
re_def = re.compile("DEFINE_FLAG(_WITH_SETTER)?\\((.*?)\\);", re.DOTALL)
flags_defs = []
visit = {}
for src_name in all_src:
with open(src_name, 'rb') as f:
src = f.read().decode("utf8")
defs = re_def.findall(src)
for _, args in defs:
args = args.split(",")
type = args[0].strip()
name = args[1].strip()
if not has_cuda and "cuda" in name and name!="use_cuda":
continue
default = args[2].strip()
doc = ",".join(args[3:])
doc = eval(f"({doc})")
LOG.vv(f"Find define {name} from {src_name}")
if name in visit:
continue
visit[name] = 1
jit_declares.append(f"DECLARE_FLAG({type}, {name});")
alias = []
if name == "use_cuda":
alias = ["use_device", "use_acl"]
elif name == "auto_mixed_precision_level":
alias = ["amp_level"]
get_names = ",".join(["__get__"+a for a in [name]+alias])
set_names = ",".join(["__set__"+a for a in [name]+alias])
flags_defs.append(f"""
/* {name}(type:{type}, default:{default}): {doc} */
// @pyjt({get_names})
{type} _get_{name}() {{ return {name}; }}
// @pyjt({set_names})
void _set_{name}({type} v) {{ set_{name}(v); }}
{f'''// @pyjt({set_names})
void _set_{name}(bool v) {{ set_{name}(v); }}
''' if type=="int" else ""}
""")
jit_declares = "\n ".join(jit_declares)
jit_src = f"""
#include "utils/flags.h"
namespace jittor {{
{jit_declares}
// @pyjt(Flags)
struct _Flags {{
// @pyjt(__init__)
_Flags() {{}}
{"".join(flags_defs)}
}};
}} // jittor
"""
LOG.vvvv(jit_src)
with open(os.path.join(cache_path, "gen", "jit_flags.h"), 'w', encoding='utf8') as f:
f.write(jit_src)
def gen_jit_op_maker(op_headers, export=False, extra_flags=""):
def add_src(
cc_func_name,
cc_args,
op_name,
op_args,
src,
pybind_name,
py_args,
jit_cc_src,
doc_string,
attrs
):
has_ir = set(["add", "sub", "mul", "matmul", "truediv", "floordiv", "mod", "divmod", "pow", "lshift", "rshift", "and", "xor", "or"])
pybind_names = [ s.strip() for s in pybind_name.split(",")]
cc_make_args = [ arg.replace("VarHolder*", "Var*") for arg in cc_args ]
op_make_args = [ arg.replace("->var", "") for arg in op_args ]
py_args = [ arg.replace("Var*", "VarHolder*") for arg in py_args ]
op_args = []
cc_args_with_default = []
for i, arg in enumerate(cc_args):
pre_arg = arg.split()[-1].split('=')[0]
op_arg = None
if arg.startswith("VarHolder*"):
op_arg = pre_arg+"->var"
elif arg.startswith("vector<VarHolder*>"):
op_arg = f"convert({pre_arg})"
if "&&" in arg:
if op_arg == None:
op_arg = "move("+pre_arg+")"
op_make_args[i] = "move("+pre_arg+")"
if op_arg==None: op_arg = pre_arg
op_args.append(op_arg)
py_arg = py_args[i]
if "_a=" not in py_arg:
cc_args_with_default.append(arg)
continue
py_arg = py_arg.split("_a=")[1]
cc_args_with_default.append(arg + "=" + py_arg)
cc_args = cc_args_with_default
# steps of Op creation:
# 1. new op
# 2. new output var (create_output in op constructor)
# 3. take over op's output VarPtr from outputs_holder
# 4. set op's output
# 5. set op's input
# 6. infer shape(op->init())
if "multiple_outputs" not in attrs:
jit_cc_src.append(f"""
VarPtr make_{cc_func_name}({", ".join(cc_make_args)}) {{
auto _op = new {op_name}({", ".join(op_make_args)});
if (_op->outputs_holder.size() != 1) {{
delete _op;
LOGf << "Wrong output size of" << \"{op_name}\";
}}
if (_op->flags.get(NodeFlags::_forwarded)) {{
VarPtr _out(move(_op->outputs_holder[0]));
delete _op;
return _out;
}}
_op->outputs_holder[0]->set_inputs({{_op}});
VarPtr _out(move(_op->outputs_holder[0]));
{src.replace("->var","")};
_op->init();
return _out;
}}
""")
else:
jit_cc_src.append(f"""
vector<VarPtr> make_{cc_func_name}({", ".join(cc_make_args)}) {{
auto _op = new {op_name}({", ".join(op_make_args)});
if (_op->flags.get(NodeFlags::_forwarded)) {{
vector<VarPtr> _outs = move(_op->outputs_holder);
delete _op;
return _outs;
}}
vector<VarPtr> _outs = move(_op->outputs_holder);
for (uint i=0; i<_outs.size(); i++)
_outs[i]->set_inputs({{_op}});
{src.replace("->var","")};
_op->init();
return _outs;
}}
""")
if pybind_name == 'None':
return
pyjt_names = []
for pybind_name in pybind_names:
if pybind_name.startswith("__"):
pyjt_names.append("Var."+pybind_name)
else:
pyjt_names.append(pybind_name)
if len(cc_args)>0 and cc_args[0].startswith("VarHolder* "):
pyjt_names.append("Var."+pybind_name)
if "multiple_outputs" in attrs:
jit_cc_src.append(f"""
/*{doc_string}*/
// @pyjt({",".join(pyjt_names)})
vector_to_tuple<VarHolder*> {cc_func_name}({", ".join(cc_args)}) {{
{ f'return make_vh_vector(make_{cc_func_name}({", ".join(op_args)}));'
if "replace_outputs" not in attrs else
f'''auto rt = make_vh_vector(make_{cc_func_name}({", ".join(op_args)}));
ASSERT(rt.size() == outputs.size());
for (int i=0; i<outputs.size(); i++)
outputs[i]->assign(rt[i]);
return rt;
'''}
}}
""")
else:
jit_cc_src.append(f"""
/*{doc_string}*/
// @pyjt({",".join(pyjt_names)})
VarHolder* {cc_func_name}({", ".join(cc_args)}) {{
return new VarHolder(make_{cc_func_name}({", ".join(op_args)}));
}}
""")
need_ir_define = False
ir_name = None
for pybind_name in pybind_names:
if pybind_name.startswith("__") and pybind_name[2:-2] in has_ir:
need_ir_define = True
assert ir_name is None
ir_name = pybind_name[2:-2]
if need_ir_define:
assert len(cc_args)>0 and cc_args[0].startswith("VarHolder* ")
this = cc_args[0].split()[-1]
jit_cc_src.append(f"""
// @pyjt(Var.__i{ir_name}__)
// @attrs(return_self)
VarHolder* i{cc_func_name}({", ".join(cc_args)}) {{
*{this} = make_{cc_func_name}({", ".join(op_args)});
return {this};
}}
""")
assert len(cc_args)>1 and cc_args[1].startswith("VarHolder* "), cc_args
r_cc_args = [cc_args[1], cc_args[0]] + cc_args[2:]
r_py_args = [py_args[1], py_args[0]] + py_args[2:]
jit_cc_src.append(f"""
VarHolder* r{cc_func_name}({", ".join(r_cc_args)}) {{
return new VarHolder(make_{cc_func_name}({", ".join(op_args)}));
}}
""")
jit_cc_src = []
jit_headers = ""
initer = []
pybind_reg = '(/\\*(.*?)\\*/\\s*)?(//\\s*@pybind\\(([^\\n]*)\\)\\s*)?'
pybind_attrs_reg = pybind_reg + '(//\\s*@attrs\\(([^\\n]*)\\)\\s*)?'
for header in op_headers:
# xxx_xxx_op
name = os.path.basename(header)
name = os.path.splitext(name)[0]
# xxx_xxx
assert name.endswith("_op")
func_name = name[:-3]
# XxxXxxOp
name2 = map(lambda s:s[:1].upper() + s[1:], name.split('_'))
name2 = "".join(name2)
with open(header, encoding='utf8') as f:
src = f.read()
# XxxXxxOp(args)
res = re.findall(pybind_attrs_reg + '[^~]('+name2+"\\([^\\n]*\\))", src, re.S)
assert len(res) >= 1, "Wrong op args in " + header
# registe op
cc_name = header[:-2] + ".cc"
constructors = []
for i in range(len(res)):
name = 'make_'+func_name+'_'*i
constructors.append(f"{{ &typeid(&{name}), (void*)&{name} }}")
constructors = ",".join(constructors)
var_member_reg = r"\n\s*Var\b(.*);"
var_member_match = re.findall(var_member_reg, src)
var_member_match = " ".join(var_member_match)
for c in "*,": var_member_match = var_member_match.replace(c, " ")
var_member = var_member_match.split()
LOG.vv("var_member_match "+var_member_match)
LOG.vv("var_member "+str(var_member))
var_member_src = [ f"VAR_MEMBER_NAME_AND_OFFSET({name}, {name2})" for name in var_member ]
var_member_src = ",".join(var_member_src)
initer.append(f'\n op_registe({{ "{func_name}", R"({cc_name})", extra_flags, {{{constructors}}}, {{{var_member_src}}} }});')
for hid, h_def in enumerate(res):
h_def = list(h_def)
# // @attrs(...)
attrs = {}
if h_def[4] != "":
attrs = pyjt_compiler.parse_attrs(h_def[5])
del h_def[4:6]
# /* doc_string */
# // @pybind(bind_name)
# XxxXxxOp(args_def)
doc_string = h_def[1].strip()
h_def = h_def[2:]
args_def = h_def[2][len(name2)+1:-1]
bind_name = h_def[1]
if bind_name == "":
bind_name = func_name
if args_def=="":
args = []
else:
args = list(map(lambda s: s.split()[-1].split('=')[0], args_def.split(',')))
# py_args: "arg"_a=default
py_args = []
new_args_def = []
new_args = []
# source of convert VarHolder* to Var*
vh2v_src = []
more_src = []
for arg, arg_def in zip(args, args_def.split(',')):
py_arg = f'"{arg}"_a'
if '=' in arg_def:
py_arg += "=" + arg_def.split('=')[-1]
arg_def = arg_def.split('=')[0]
py_args.append(py_arg)
arg_type = arg_def[:-(len(arg)+1)].strip()
if arg_type == "Var*":
new_args_def.append("VarHolder* " + arg)
vh2v_src.append(arg + "->var")
new_args.append(arg + "->var")
elif arg_type.startswith("vector<Var*>"):
new_args_def.append(
arg_type.replace("Var", "VarHolder")+' '+arg)
new_args.append(arg)
more_src.append(f"_op->add_inputs({arg});")
elif arg_type.startswith("VarSlices"):
new_args_def.append(arg_def)
new_args.append(arg)
more_src.append(f"""
vector<Var*> svars;
for (int i=0; i<_op->vs.n; i++)
if (_op->vs.slices[i].is_var())
svars.push_back(_op->vs.slices[i].var);
_op->add_inputs(svars);""")
else:
new_args_def.append(arg_def)
new_args.append(arg)
vh2v_src = "_op->set_inputs({" + ", ".join(vh2v_src) + "});" + \
"".join(more_src)
LOG.vvvv(f"Find op: {name2} args: {new_args}")
# if header.startswith("src/"):
# jit_headers += f"#include \"{header[4:]}\"\n"
# else:
jit_headers += f"#include \"{header}\"\n"
add_src(
func_name+'_'*hid,
new_args_def,
name2,
new_args,
vh2v_src,
bind_name,
py_args,
jit_cc_src,
doc_string,
attrs
)
if func_name in ["binary", "unary", "reduce"]:
# generate binary op alias
with open(os.path.join(jittor_path, f"src/ops/{func_name}_op.cc"), encoding="utf-8") as f:
src = f.read()
src = src.split(f"unordered_set<string> {func_name}_ops = ""{")[1].split("};")[0]
match_result = re.findall(pybind_reg + "\"([a-z_A-Z0-9]*)\"", src, re.S)
# remove /* doc_string */ pattern
res2 = [ (_[3], _[4]) for _ in match_result ]
LOG.vvvv(f"All supported {func_name} ops: {res2}")
# remove op args
if func_name == "reduce":
args_def = new_args_def[:1] + new_args_def[2:]
py_args_s = py_args[:1] + py_args[2:]
else:
args_def = new_args_def[:-1]
py_args_s = py_args[:-1]
# find the last type id(float64)
# add "_" suffix for all function
if func_name == "unary":
last_tid = res2.index(("","float64"))
# for each functor
for tid, (bind_name, func_name2) in enumerate(res2):
# get certain op doc_string
doc_string2 = match_result[tid][1].strip()
if len(doc_string2) == 0:
doc_string2 = doc_string
# add _ for types
if func_name == "unary" and tid <= last_tid:
func_name3 = func_name2 + "_"
elif func_name == "reduce":
func_name4 = func_name2
func_name2 = "reduce_" + func_name2
func_name3 = func_name2
else:
func_name3 = func_name2
if len(bind_name) == 0:
bind_name = func_name2
if func_name == "reduce":
args = new_args[:1] + [f'ns_{func_name4}'] + new_args[2:]
else:
args = new_args[:-1] + [f'ns_{func_name2}']
add_src(
func_name3+'_'*hid,
args_def,
name2,
args,
vh2v_src,
bind_name,
py_args_s,
jit_cc_src,
doc_string2,
attrs
)
jit_src = f"""
#pragma once
#include "pyjt/py_obj_holder.h"
#include "var.h"
#include "var_holder.h"
#include "ops/op_register.h"
{jit_headers}
namespace jittor {{
// fix make_array(py::array) undefine reference
#pragma GCC visibility push(default)
#define JIT_NAMESPACE {export+"_maker" if export else "jit_op_maker"}
// @pyjt(ops)
// @attrs(submodule{",core_name="+export if export else ""})
namespace JIT_NAMESPACE {{
{"".join(jit_cc_src)}
void initer() {{
string extra_flags = R"({extra_flags})";
{"".join(initer)}
}}
int caller = (initer(), 0);
}} // JIT_NAMESPACE
}} // jittor
{f'''
namespace jittor {{
extern void pyjt_def_{export}(PyObject*);
}}
static void init_module(PyModuleDef* mdef, PyObject* m) {{
mdef->m_doc = "User defined custom ops";
jittor::pyjt_def_{export}(m);
}}
PYJT_MODULE_INIT({export});
''' if export else ""}
"""
return jit_src
@lock.lock_scope()
def compile_custom_op(header, source, op_name, warp=True):
"""Compile a single custom op
header: code of op header, not path
source: code of op source, not path
op_name: op_name of this op, it will used for
generation of header and source files, if the
type name of op is XxxXxxOp, op_name should be
xxx_xxx
warp: if true, warp a snippet for header and source
"""
if warp:
header = f"""
#pragma once
#include "op.h"
#include "var.h"
namespace jittor {{
{header}
}}
"""
source = f"""
#include "{op_name}_op.h"
namespace jittor {{
{source}
}}
"""
cops_dir = os.path.join(cache_path, "custom_ops")
make_cache_dir(cops_dir)
hname = os.path.join(cops_dir, op_name+"_op.h")
ccname = os.path.join(cops_dir, op_name+"_op.cc")
with open(hname, 'w', encoding='utf8') as f:
f.write(header)
with open(ccname, 'w', encoding='utf8') as f:
f.write(source)
m = compile_custom_ops([hname, ccname])
return getattr(m, op_name)
@lock.lock_scope()
def compile_custom_ops(
filenames,
extra_flags="",
return_module=False,
dlopen_flags=None,
gen_name_ = ""):
"""Compile custom ops
filenames: path of op source files, filenames must be
pairs of xxx_xxx_op.cc and xxx_xxx_op.h, and the
type name of op must be XxxXxxOp.
extra_flags: extra compile flags
return_module: return module rather than ops(default: False)
return: compiled ops
"""
if dlopen_flags is None:
dlopen_flags = os.RTLD_GLOBAL | os.RTLD_NOW
if platform.system() == 'Linux':
dlopen_flags |= os.RTLD_DEEPBIND
srcs = {}
headers = {}
builds = []
includes = []
pyjt_includes = []
for name in filenames:
name = os.path.realpath(name)
if name.endswith(".cc") or name.endswith(".cpp") or name.endswith(".cu"):
builds.append(name)
if name.endswith(".h"):
dirname = os.path.dirname(name)
if dirname.endswith("inc"):
includes.append(dirname)
with open(name, "r", encoding='utf8') as f:
if "@pyjt" in f.read():
pyjt_includes.append(name)
bname = os.path.basename(name)
bname = os.path.splitext(bname)[0]
if bname.endswith("_op"):
bname = bname[:-3]
if name.endswith(".cc"):
srcs[bname] = name
elif name.endswith(".h"):
includes.append(os.path.dirname(name))
headers[bname] = name
assert len(srcs) == len(headers), "Source and header names not match"
for name in srcs:
assert name in headers, f"Header of op {name} not found"
gen_name = "gen_ops_" + "_".join(headers.keys())
if gen_name_ != "":
gen_name = gen_name_
if len(gen_name) > 50:
gen_name = gen_name[:50] + "___hash" + hashlib.md5(gen_name.encode()).hexdigest()[:6]
includes = sorted(list(set(includes)))
includes = "".join(map(lambda x: f" -I\"{x}\" ", includes))
LOG.vvvv(f"Include flags:{includes}")
op_extra_flags = includes + extra_flags
lib_path = os.path.join(cache_path, "custom_ops")
make_cache_dir(lib_path)
gen_src_fname = os.path.join(lib_path, gen_name+".cc")
gen_head_fname = os.path.join(lib_path, gen_name+".h")
gen_lib = os.path.join(lib_path, gen_name+extension_suffix)
libname = gen_name + lib_suffix
op_extra_flags += f" -L\"{lib_path}\" -l\"{libname}\" "
gen_src = gen_jit_op_maker(headers.values(), export=gen_name, extra_flags=op_extra_flags)
pyjt_compiler.compile_single(gen_head_fname, gen_src_fname, src=gen_src)
# gen src initialize first
builds.insert(0, gen_src_fname)
def insert_anchor(gen_src, anchor_str, insert_str):
# insert insert_str after anchor_str into gen_src
return gen_src.replace(anchor_str, anchor_str+insert_str, 1)
for name in pyjt_includes:
LOG.v("handle pyjt_include ", name)
bname = os.path.basename(name).split(".")[0]
gen_src_fname = os.path.join(cache_path, "custom_ops", gen_name+"_"+bname+".cc")
pyjt_compiler.compile_single(name, gen_src_fname)
builds.insert(1, gen_src_fname)
gen_src = insert_anchor(gen_src,
"namespace jittor {",
f"extern void pyjt_def_{bname}(PyObject* m);")
gen_src = insert_anchor(gen_src,
"init_module(PyModuleDef* mdef, PyObject* m) {",
f"jittor::pyjt_def_{bname}(m);")
with open(gen_head_fname, "w", encoding='utf8') as f:
f.write(gen_src)
LOG.vvv(f"Build custum ops lib:{gen_lib}")
LOG.vvvv(f"Build sources:{builds}")
compile(cc_path, extra_flags+cc_flags+opt_flags+includes, builds, gen_lib)
# add python path and import
LOG.vvv(f"Import custum ops lib:{gen_lib}")
lib_path = os.path.join(cache_path, "custom_ops")
if lib_path not in os.sys.path:
os.sys.path.append(lib_path)
# unlock scope when initialize
with lock.unlock_scope():
with jit_utils.import_scope(dlopen_flags):
exec(f"import {gen_name}")
mod = locals()[gen_name]
if return_module:
return mod
return mod.ops
def get_full_path_of_executable(name):
full_path = os.path.abspath(name)
while os.path.islink(full_path):
full_path = os.path.realpath(full_path)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
return get_full_path_of_executable(find_exe(name))
def compile_extern():
# compile llvm passes
if cc_type != "clang" or platform.system() != 'Linux':
return
global kernel_opt_flags
cache_path_llvm = os.path.join(cache_path, "llvm")
jittor_path_llvm = os.path.join(jittor_path, "extern", "llvm")
clang_dir = os.path.dirname(get_full_path_of_executable(cc_path))
assert clang_dir.endswith("bin") and "llvm" in clang_dir, f"Wrong clang_dir: {clang_dir}"
llvm_include = os.path.abspath(os.path.join(clang_dir, "..", "include"))
assert os.path.isdir(llvm_include), "LLVM include path not found"
make_cache_dir(cache_path_llvm)
files = os.listdir(jittor_path_llvm)
# test_pass.cc is used for test link problem of llvm pass plugin
test_pass_path = os.path.join(cache_path_llvm, "test_pass.cc")
with open(test_pass_path, 'w', encoding='utf8') as f:
f.write("int main() {return 0;}")
# -fno-rtti fix link error
# -Wl,-znodelete fix segfault
# https://github.com/sampsyo/llvm-pass-skeleton/issues/7#issuecomment-401834287
# -D_GLIBCXX_USE_CXX11_ABI=0 fix undefined symbol: createPrinterPass
# https://stackoverflow.com/questions/37366291/undefined-symbol-for-self-built-llvm-opt
# try different flags
try_flags = [
" -Wl,-znodelete -D_GLIBCXX_USE_CXX11_ABI=0 ",
" -Wl,-znodelete ",
]
found_flags_id = -1
for fname in files:
for i, flag in enumerate(try_flags):
if found_flags_id != -1 and found_flags_id != i:
continue
so_name = os.path.join(cache_path_llvm, os.path.splitext(fname)[0]+f".{i}.so")
compile(
cc_path,
f"{cc_flags} {opt_flags} {flag} -I'{llvm_include}'",
[os.path.join(jittor_path_llvm, fname)],
so_name
)
# if not found available flags, we test it.
if found_flags_id == -1:
try:
s = run_cmd(
f"{cc_path} {cc_flags} -Xclang -load -Xclang '{so_name}' {test_pass_path}",
cache_path_llvm,
print_error=False
)
except Exception as e:
LOG.v(f"Try flag {flag} failed: {e}")
continue
found_flags_id = i
kernel_opt_flags += f" -Xclang -load -Xclang '{so_name}' "
break
else:
LOG.w("Clang is used, but LLVM pass plugin is unable to link.")
break
LOG.vv(f"Compile extern llvm passes: {str(files)}")
def check_cuda():
if not nvcc_path:
return
global cc_flags, has_cuda, core_link_flags, cuda_dir, cuda_lib, cuda_include, cuda_home, cuda_bin
cuda_dir = os.path.dirname(get_full_path_of_executable(nvcc_path))
cuda_bin = cuda_dir
cuda_home = os.path.abspath(os.path.join(cuda_dir, ".."))
# try default nvidia-cuda-toolkit in Ubuntu 20.04
# assert cuda_dir.endswith("bin") and "cuda" in cuda_dir.lower(), f"Wrong cuda_dir: {cuda_dir}"
cuda_include = os.path.abspath(os.path.join(cuda_dir, "..", "include"))
cuda_lib = os.path.abspath(os.path.join(cuda_dir, "..", "lib64"))
if nvcc_path == "/usr/bin/nvcc":
# this nvcc is install by package manager
cuda_lib = "/usr/lib/x86_64-linux-gnu"
cuda_include2 = os.path.join(jittor_path, "extern","cuda","inc")
cc_flags += f" -DHAS_CUDA -DIS_CUDA -I\"{cuda_include}\" -I\"{cuda_include2}\" "
if os.name == 'nt':
cuda_lib = os.path.abspath(os.path.join(cuda_dir, "..", "lib", "x64"))
# cc_flags += f" \"{cuda_lib}\\cudart.lib\" "
cuda_lib_path = glob.glob(cuda_bin+"/cudart64*")[0]
cc_flags += f" -lcudart -L\"{cuda_lib}\" -L\"{cuda_bin}\" "
dll = ctypes.CDLL(cuda_lib_path, dlopen_flags)
ret = dll.cudaDeviceSynchronize()
assert ret == 0
else:
cc_flags += f" -lcudart -L\"{cuda_lib}\" "
# ctypes.CDLL(cuda_lib+"/libcudart.so", import_flags)
ctypes.CDLL(cuda_lib+"/libcudart.so", dlopen_flags)
has_cuda = 1
def check_cache_compile():
files = [
"src/utils/cache_compile.cc",
"src/utils/log.cc",
"src/utils/tracer.cc",
"src/utils/jit_utils.cc",
"src/utils/str_utils.cc",
]
if os.name == 'nt':
files = [ x.replace('/', '\\') for x in files ]
global jit_utils_core_files
jit_utils_core_files = files
recompile = compile(cc_path, cc_flags+f" {opt_flags} ", files, jit_utils.cache_path+'/jit_utils_core'+extension_suffix, True)
if recompile and jit_utils.cc:
LOG.e("jit_utils updated, please rerun your command.")
sys.exit(0)
if not jit_utils.cc:
with jit_utils.import_scope(import_flags):
jit_utils.try_import_jit_utils_core()
assert jit_utils.cc
# recompile, generate cache key
compile(cc_path, cc_flags+f" {opt_flags} ", files, jit_utils.cache_path+'/jit_utils_core'+extension_suffix, True)
def env_or_try_find(name, bname):
if name in os.environ:
path = os.environ[name]
if path != "":
version = jit_utils.get_version(path)
LOG.i(f"Found {bname}{version} at {path}")
return path
return try_find_exe(bname)
def try_find_exe(*args):
try:
return find_exe(*args)
except:
LOG.v(f"{args[0]} not found.")
return ""
def check_pybt(gdb_path, python_path):
if gdb_path=='' or python_path=='':
return False
return True
# TODO: prev we use below code to check has py-bt or nor
# but it is too slow, so we comment it,
# find a better way to check py-bt exist
# ret = sp.getoutput(f"{gdb_path} --batch {python_path} -ex 'help py-bt'")
# if 'python frame' in ret:
# LOG.v("py-bt found in gdb.")
# return True
# return False
def check_debug_flags():
global is_debug
is_debug = 0
if os.environ.get("debug")=="1":
is_debug = 1
global cc_flags
cc_flags += " -g -DNODE_MEMCHECK "
cc_flags = " "
# os.RTLD_NOW | os.RTLD_GLOBAL cause segfault when import torch first
import_flags = os.RTLD_NOW | os.RTLD_GLOBAL
if platform.system() == 'Linux':
import_flags |= os.RTLD_DEEPBIND
# if cc_type=="icc":
# # weird link problem, icc omp library may conflict and cause segfault
# import_flags = os.RTLD_NOW | os.RTLD_GLOBAL
dlopen_flags = os.RTLD_NOW | os.RTLD_GLOBAL
if platform.system() == 'Linux':
import_flags |= os.RTLD_DEEPBIND
with jit_utils.import_scope(import_flags):
jit_utils.try_import_jit_utils_core()
jittor_path = find_jittor_path()
if os.name == 'nt':
# prevent windows recompile
jittor_path = jittor_path.lower()
check_debug_flags()
sys.path.append(cache_path)
LOG.i(f"Jittor({__version__}) src: {jittor_path}")
LOG.i(f"{jit_utils.cc_type} at {jit_utils.cc_path}{jit_utils.get_version(jit_utils.cc_path)}")
LOG.i(f"cache_path: {cache_path}")
with jit_utils.import_scope(import_flags):
jit_utils.try_import_jit_utils_core()
python_path = sys.executable
# sometime python do not return the correct sys executable
# this will happend when multiple python version installed
ex_python_path = python_path + '.' + str(sys.version_info.minor)
if os.path.isfile(ex_python_path):
python_path = ex_python_path
# if jtcuda is already installed
nvcc_path = None
if install_cuda.has_installation() or os.name == 'nt':
nvcc_path = install_cuda.install_cuda()
if nvcc_path:
nvcc_path = try_find_exe(nvcc_path)
# check system installed cuda
if not nvcc_path:
nvcc_path = env_or_try_find('nvcc_path', 'nvcc') or \
try_find_exe('/usr/local/cuda/bin/nvcc') or \
try_find_exe('/usr/bin/nvcc') or \
try_find_exe('/opt/cuda/bin/nvcc')
# if system has no cuda, install jtcuda
if not nvcc_path:
nvcc_path = install_cuda.install_cuda()
if nvcc_path:
nvcc_path = try_find_exe(nvcc_path)
if nvcc_path is None:
nvcc_path = ""
if "nvcc_path" in os.environ:
nvcc_path = os.environ["nvcc_path"]
gdb_path = env_or_try_find('gdb_path', 'gdb')
addr2line_path = try_find_exe('addr2line')
has_pybt = check_pybt(gdb_path, python_path)
if nvcc_path:
# gen cuda key for cache_path
cu = "cu"
v = jit_utils.get_version(nvcc_path)[1:-1]
nvcc_version = list(map(int,v.split('.')))
cu += v
try:
r, s = sp.getstatusoutput(f"log_v=0 {sys.executable} -m jittor_utils.query_cuda_cc")
if r==0:
s = sorted(list(set(s.strip().split())))
cu += "_sm_" + "_".join(s)
if "cuda_arch" not in os.environ:
os.environ["cuda_arch"] = " ".join(cu)
cu = cu.replace(":", "").replace(" ", "")
except:
pass
LOG.i("cuda key:", cu)
cache_path = os.path.join(cache_path, cu)
sys.path.append(cache_path)
def check_clang_latest_supported_cpu():
output = run_cmd('clang --print-supported-cpus')
apple_cpus = [l.strip() for l in output.split('\n') if 'apple-a' in l]
apple_cpus_id = max([int(cpu[7:]) for cpu in apple_cpus])
return f'apple-a{apple_cpus_id}'
# cc_flags += " -Wall -Werror -Wno-unknown-pragmas -std=c++14 -fPIC "
cc_flags += " -Wall -Wno-unknown-pragmas -std=c++14 -fPIC "
# 1. Arch/CPU specific optimization
if platform.machine() in ["x86_64", "AMD64"]:
cc_flags += " -march=native "
elif platform.machine() == 'arm64' and platform.system() == "Darwin":
cc_flags += f" -mcpu={check_clang_latest_supported_cpu()} "
cc_flags += " -fdiagnostics-color=always "
# 2. Non standard include path
if platform.system() == 'Darwin' and platform.machine() == 'arm64':
cc_flags += " -I/opt/homebrew/include "
# 3. User specified flags
if "cc_flags" in os.environ:
cc_flags += os.environ["cc_flags"] + ' '
cc_flags += " -lstdc++ -ldl -shared "
if platform.system() == 'Darwin':
# TODO: if not using apple clang, there is no need to add -lomp
cc_flags += "-undefined dynamic_lookup -lomp "
if os.environ.get('CONDA_PREFIX', None):
cc_flags += f" -L{os.path.join(os.environ['CONDA_PREFIX'], 'lib')} "
if platform.machine() == "arm64":
cc_flags += " -L/opt/homebrew/lib "
opt_flags = ""
py_include = jit_utils.get_py3_include_path()
LOG.v(f"py_include: {py_include}")
extension_suffix = jit_utils.get_py3_extension_suffix()
lib_suffix = extension_suffix.rsplit(".", 1)[0]
LOG.v(f"extension_suffix: {extension_suffix}")
so = ".so" if os.name != 'nt' else ".dll"
kernel_opt_flags = os.environ.get("kernel_flags", "") + opt_flags
if platform.system() == 'Darwin':
# TODO: if not using apple clang, cannot add -Xpreprocessor
kernel_opt_flags += " -Xpreprocessor -fopenmp "
elif cc_type != 'cl':
kernel_opt_flags += " -fopenmp "
def fix_cl_flags(cmd):
output = shsplit(cmd)
output2 = []
libpaths = []
for s in output:
if s.startswith("-l") and ("cpython" in s or "lib" in s):
if platform.system() == 'Darwin':
fname = s[2:] + ".so"
for path in reversed(libpaths):
full = os.path.join(path, fname).replace("\"", "")
if os.path.isfile(full):
output2.append(full)
break
else:
output2.append(s)
else:
output2.append(f"-l:{s[2:]}.so")
elif s.startswith("-L"):
libpaths.append(s[2:])
output2.append(f"{s} -Wl,-rpath,{s[2:]}")
else:
output2.append(s)
return " ".join(output2)
if os.name == 'nt':
if cc_type == 'g++':
pass
elif cc_type == 'cl':
py3_link_path = os.path.join(
os.path.dirname(sys.executable),
"libs",
)
cc_flags = remove_flags(cc_flags, ["-f", "-m"])
cc_flags = cc_flags.replace("-std=c++14", "-std=c++17")
cc_flags = cc_flags.replace("-lstdc++", "")
cc_flags = cc_flags.replace("-ldl", "")
cc_flags += f" -L\"{py3_link_path}\" -lpython3{sys.version_info.minor} "
cc_flags += " -EHa -MD -utf-8 "
import jittor_utils
if jittor_utils.msvc_path:
mp = jittor_utils.msvc_path
cc_flags += f' -nologo -I"{mp}\\VC\\include" -I"{mp}\\win10_kits\\include\\ucrt" -I"{mp}\\win10_kits\\include\\shared" -I"{mp}\\win10_kits\\include\\um" -DNOMINMAX '
cc_flags += f' -L"{mp}\\VC\\lib" -L"{mp}\\win10_kits\\lib\\um\\x64" -L"{mp}\\win10_kits\\lib\\ucrt\\x64" '
win_libpaths = {}
def fix_cl_flags(cmd):
cmd = cmd.replace(".o ", ".obj ")
cmd = cmd.replace(".o\"", ".obj\"")
if cmd.endswith(".o"): cmd += "bj"
if " -o " in cmd:
if " -shared " in cmd:
cmd = cmd.replace(" -o ", " -Fe: ")
output = shsplit(cmd.split("-Fe:")[1].strip())[0]
base_output = os.path.basename(output).split('.')[0]
cmd += f" -DEF:\"{output}.def\" -IGNORE:4102 -IGNORE:4197 -IGNORE:4217 "
elif " -c -o " in cmd:
cmd = cmd.replace(" -c -o ", " -c -Fo: ")
flags = shsplit(cmd)
output = []
output2 = []
for f in flags:
if f.startswith("-link"):
pass
elif f.startswith("-l"):
output2.append(f[2:]+".lib")
elif f.startswith("-LIB"):
output2.append(f)
elif f.startswith("-LD"):
output.append(f)
elif f.startswith("-L"):
path = f[2:].replace("\"", "")
if path not in win_libpaths:
win_libpaths[path] = 1
os.add_dll_directory(path)
os.environ["PATH"] = f";{path};" + os.environ["PATH"]
output2.append("-LIBPATH:"+f[2:])
elif ".lib" in f:
output2.append(f)
elif f.startswith("-DEF:"):
output2.append(f)
elif f.startswith("-W") or f.startswith("-f"):
pass
elif f.startswith("-std="):
output.append(f.replace("=", ":"))
else:
output.append(f)
cmd = " ".join(output)
if len(output2):
cmd += " -link " + " ".join(output2)
cmd = cmd.replace("-include", "-FI")
cmd = cmd.replace("-shared", "-LD")
return cmd
if ' -O' not in cc_flags:
if os.environ.get("debug", "0") == "1":
opt_flags += " -O0 "
else:
opt_flags += " -O2 "
kernel_opt_flags += " -Ofast "
lto_flags = ""
if os.environ.get("enable_lto") == "1":
if cc_type == "icc":
lto_flags = " -flto -ipo -ipo-c "
elif cc_type == "g++":
lto_flags = " -flto -fuse-linker-plugin "
else:
lto_flags = " -flto "
make_cache_dir(cache_path)
make_cache_dir(os.path.join(cache_path, "jit"))
make_cache_dir(os.path.join(cache_path, "obj_files"))
make_cache_dir(os.path.join(cache_path, "gen"))
make_cache_dir(os.path.join(cache_path, "tmp"))
ck_path = os.path.join(cache_path, "checkpoints")
make_cache_dir(ck_path)
# build cache_compile
cc_flags += f" -I\"{os.path.join(jittor_path, 'src')}\" "
cc_flags += py_include
check_cache_compile()
LOG.v(f"Get cache_compile: {jit_utils.cc}")
# check cuda
has_cuda = 0
check_cuda()
nvcc_flags = os.environ.get("nvcc_flags", "")
if has_cuda:
nvcc_flags += cc_flags
def convert_nvcc_flags(nvcc_flags):
# nvcc don't support -Wall option
if os.name == 'nt':
nvcc_flags = nvcc_flags.replace("-fp:", "-Xcompiler -fp:")
nvcc_flags = nvcc_flags.replace("-EH", "-Xcompiler -EH")
nvcc_flags = nvcc_flags.replace("-M", "-Xcompiler -M")
nvcc_flags = nvcc_flags.replace("-utf", "-Xcompiler -utf")
nvcc_flags = nvcc_flags.replace("-nologo", "")
nvcc_flags = nvcc_flags.replace("-std:", "-std=")
nvcc_flags = nvcc_flags.replace("-Fo:", "-o")
nvcc_flags = nvcc_flags.replace("-LD", "-shared")
nvcc_flags = nvcc_flags.replace("-LIBPATH:", "-L")
nvcc_flags = nvcc_flags.replace("-link", "")
def func(x):
if ".lib" not in x: return x
x = x.replace("\"", "")
a = os.path.dirname(x)
b = os.path.basename(x)
if not b.endswith(".lib"):
return x
return f"-L\"{a}\" -l{b[:-4]}"
nvcc_flags = map_flags(nvcc_flags, func)
if nvcc_version >= [11,4]:
nvcc_flags = nvcc_flags.replace("-std=c++17", "-std=c++14 -Xcompiler -std:c++14")
else:
nvcc_flags = nvcc_flags.replace("-std=c++17", "")
nvcc_flags = nvcc_flags.replace("-Wall", "")
nvcc_flags = nvcc_flags.replace("-Wno-unknown-pragmas", "")
nvcc_flags = nvcc_flags.replace("-fopenmp", "")
nvcc_flags = nvcc_flags.replace("-march", "-Xcompiler -march")
nvcc_flags = nvcc_flags.replace("-Werror", "")
nvcc_flags = nvcc_flags.replace("-fPIC", "-Xcompiler -fPIC")
nvcc_flags = nvcc_flags.replace("-fdiagnostics", "-Xcompiler -fdiagnostics")
nvcc_flags += f" -x cu --cudart=shared -ccbin=\"{cc_path}\" --use_fast_math "
# nvcc warning is noise
nvcc_flags += " -w "
nvcc_flags += f" -I\"{os.path.join(jittor_path, 'extern/cuda/inc')}\" "
if os.environ.get("cuda_debug", "0") == "1":
nvcc_flags += " -G "
return nvcc_flags
nvcc_flags = convert_nvcc_flags(nvcc_flags)
# from .acl_compiler import check_acl
from .extern.acl import acl_compiler
jit_utils.add_backend(acl_compiler)
for mod in jit_utils.backends:
if mod.check():
break
# build core
gen_jit_flags()
gen_jit_tests()
op_headers = glob.glob(jittor_path+"/src/ops/**/*op.h", recursive=True)
jit_src = gen_jit_op_maker(op_headers)
LOG.vvvv(jit_src)
with open(os.path.join(cache_path, "gen", "jit_op_maker.h"), 'w', encoding='utf8') as f:
f.write(jit_src)
cc_flags += f' -I\"{cache_path}\" -L\"{cache_path}\" -L\"{jit_utils.cache_path}\" '
# gen pyjt
pyjt_gen_src = pyjt_compiler.compile(cache_path, jittor_path)
# initialize order:
# 1. registers
# 2. generate source
# 3. op_utils
# 4. other
files2 = pyjt_gen_src
ext_args = 'c[cu]' if has_cuda else 'cc'
files4 = glob.glob(jittor_path+"/src/**/*."+ext_args, recursive=True)
files4 = [ f[len(jittor_path)+1:] for f in files4 ]
# files4 = run_cmd('find -L src | grep '+grep_args, jittor_path).splitlines()
at_beginning = [
"src/ops/op_utils.cc",
"src/ops/op_register.cc",
"src/init.cc",
"src/event_queue.cc",
"src/mem/allocator/sfrl_allocator.cc",
"src/mem/allocator.cc",
"src/misc/nano_string.cc",
]
at_last = [
"src/profiler/profiler.cc",
"src/executor.cc",
]
if os.name == 'nt':
at_beginning = [ x.replace('/','\\') for x in at_beginning ]
at_last = [ x.replace('/','\\') for x in at_last ]
for i in range(len(at_beginning)):
files4.remove(at_beginning[i])
files4.insert(i, at_beginning[i])
for v in at_last:
files4.remove(v)
files4.append(v)
registers = [ name for name in files4 if "register" in name ]
for name in registers: files4.remove(name)
files = registers + files2 + files4
for file in jit_utils_core_files:
files.remove(file)
LOG.vv("compile order:", files)
if platform.system() == 'Linux':
libname = {"clang":"omp", "icc":"iomp5", "g++":"gomp"}[cc_type]
libname = ctypes.util.find_library(libname)
assert libname is not None, "openmp library not found"
ctypes.CDLL(libname, os.RTLD_NOW | os.RTLD_GLOBAL)
if platform.machine()=='sw_64':
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
data_gz_path = os.path.join(jittor_path, "utils", "data.gz")
use_data_gz = os.path.isfile(data_gz_path)
if os.environ.get("use_data_gz", "1") == "0":
use_data_gz = False
if use_data_gz:
import gzip
with gzip.open(data_gz_path, 'rb') as f:
data = f.read()
md5 = hashlib.md5(data).hexdigest()
target_md5 = None
data_gz_md5_path = os.path.join(cache_path, "data.md5")
if os.path.isfile(data_gz_md5_path):
with open(data_gz_md5_path, 'r') as f:
target_md5 = f.read()
data_o_path = os.path.join(cache_path, "data.o")
if target_md5 != md5:
data_s_path = os.path.join(cache_path, "data.cc")
with open(data_s_path, "w") as f:
f.write(data.decode("utf8"))
dflags = (cc_flags+opt_flags)\
.replace("-Wall", "") \
.replace("-Werror", "") \
.replace("-shared", "")
vdp = os.path.join(jittor_path, "src", "utils", "vdp")
run_cmd(fix_cl_flags(f"{cc_path} {dflags} -include \"{vdp}\" \"{data_s_path}\" -c -o \"{data_o_path}\""))
os.remove(data_s_path)
with open(data_gz_md5_path, 'w') as f:
f.write(md5)
files.append(data_o_path)
files = [f for f in files if "__data__" not in f]
cc_flags += f" -l\"jit_utils_core{lib_suffix}\" "
compile(cc_path, cc_flags+opt_flags, files, 'jittor_core'+extension_suffix)
cc_flags += f" -l\"jittor_core{lib_suffix}\" "
# TODO: move to compile_extern.py
# compile_extern()
with jit_utils.import_scope(import_flags):
import jittor_core as core
flags = core.Flags()
if has_cuda:
nvcc_flags = " " + os.environ.get("nvcc_flags", "") + " "
nvcc_flags += convert_nvcc_flags(cc_flags)
nvcc_version = list(jit_utils.get_int_version(nvcc_path))
max_arch = 1000
if nvcc_version < [11,]:
max_arch = 75
elif nvcc_version < [11,1]:
max_arch = 80
if len(flags.cuda_archs):
min_arch = 30
archs = []
for arch in flags.cuda_archs:
if arch<min_arch:
LOG.w(f"CUDA arch({arch})<{min_arch} is not supported")
continue
if arch>max_arch:
LOG.w(f"CUDA arch({arch})>{max_arch} will be backward-compatible")
arch = max_arch
archs.append(arch)
flags.cuda_archs = archs
nvcc_flags += f" -arch=compute_{min(archs)} "
nvcc_flags += ''.join(map(lambda x:f' -code=sm_{x} ', archs))
flags.cc_path = cc_path
flags.cc_type = cc_type
flags.cc_flags = cc_flags + kernel_opt_flags
flags.nvcc_path = nvcc_path
flags.nvcc_flags = nvcc_flags
flags.python_path = python_path
flags.cache_path = cache_path
flags.jittor_path = jittor_path
flags.gdb_path = gdb_path
flags.addr2line_path = addr2line_path
flags.has_pybt = has_pybt
core.set_lock_path(lock.lock_path)
|
import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from torch.utils.data import Dataset
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .pipelines import Compose
from .coco import CocoDataset
from pycocotools.coco import COCO
@DATASETS.register_module()
class Bdd100kDataset(CocoDataset):
"""Custom dataset for detection.
The annotation format is shown as follows. The `ann` field is optional for
testing.
.. code-block:: none
[
{
'filename': 'a.jpg',
'width': 1280,
'height': 720,
'ann': {
'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.
'labels': <np.ndarray> (n, ),
'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
'labels_ignore': <np.ndarray> (k, 4) (optional field)
}
},
...
]
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
data_root (str, optional): Data root for ``ann_file``,
``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
test_mode (bool, optional): If set True, annotation will not be loaded.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes of the dataset's classes will be filtered out. This option
only works when `test_mode=False`, i.e., we never filter images
during tests.
"""
CLASSES =('person','rider','car','bus','truck','bike','motor','traffic light','traffic sign','train')
def __init__(self,
ann_file,
pipeline,
classes=None,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.CLASSES = self.get_classes(classes)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.ann_file):
self.ann_file = osp.join(self.data_root, self.ann_file)
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if not (self.proposal_file is None
or osp.isabs(self.proposal_file)):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
# load annotations (and proposals)
self.data_infos = self.load_annotations(self.ann_file)
if self.proposal_file is not None:
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
# filter images too small and containing no annotations
if not test_mode:
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if self.proposals is not None:
self.proposals = [self.proposals[i] for i in valid_inds]
# set group flag for the sampler
self._set_group_flag()
# processing pipeline
self.pipeline = Compose(pipeline)
def __len__(self):
"""Total number of samples of data."""
return len(self.data_infos)
def load_annotations(self, ann_file):
"""Load annotation from annotation file."""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def load_proposals(self, proposal_file):
"""Load proposal from proposal file."""
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
if self.filter_empty_gt:
warnings.warn(
'CustomDataset does not support filtering empty gt images.')
valid_inds = []
for i, img_info in enumerate(self.data_infos):
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if img_info['width'] / img_info['height'] > 1:
self.flag[i] = 1
def _rand_another(self, idx):
"""Get another random index from the same group as the given index."""
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set \
True).
"""
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys \
introduced by pipeline.
"""
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys intorduced by \
piepline.
"""
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
@classmethod
def get_classes(cls, classes=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
return cls.CLASSES
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
pass
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
Default: None.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{'-' * 15}iou_thr: {iou_thr}{'-' * 15}')
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=scale_ranges,
iou_thr=iou_thr,
dataset=self.CLASSES,
logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
| import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from torch.utils.data import Dataset
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .pipelines import Compose
from .coco import CocoDataset
from pycocotools.coco import COCO
@DATASETS.register_module()
class Bdd100kDataset(CocoDataset):
"""Custom dataset for detection.
The annotation format is shown as follows. The `ann` field is optional for
testing.
.. code-block:: none
[
{
'filename': 'a.jpg',
'width': 1280,
'height': 720,
'ann': {
'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.
'labels': <np.ndarray> (n, ),
'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
'labels_ignore': <np.ndarray> (k, 4) (optional field)
}
},
...
]
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
data_root (str, optional): Data root for ``ann_file``,
``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
test_mode (bool, optional): If set True, annotation will not be loaded.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes of the dataset's classes will be filtered out. This option
only works when `test_mode=False`, i.e., we never filter images
during tests.
"""
CLASSES =('person','rider','car','bus','truck','bike','motor','traffic light','traffic sign','train')
def __init__(self,
ann_file,
pipeline,
classes=None,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.CLASSES = self.get_classes(classes)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.ann_file):
self.ann_file = osp.join(self.data_root, self.ann_file)
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if not (self.proposal_file is None
or osp.isabs(self.proposal_file)):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
# load annotations (and proposals)
self.data_infos = self.load_annotations(self.ann_file)
if self.proposal_file is not None:
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
# filter images too small and containing no annotations
if not test_mode:
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if self.proposals is not None:
self.proposals = [self.proposals[i] for i in valid_inds]
# set group flag for the sampler
self._set_group_flag()
# processing pipeline
self.pipeline = Compose(pipeline)
def __len__(self):
"""Total number of samples of data."""
return len(self.data_infos)
def load_annotations(self, ann_file):
"""Load annotation from annotation file."""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def load_proposals(self, proposal_file):
"""Load proposal from proposal file."""
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
if self.filter_empty_gt:
warnings.warn(
'CustomDataset does not support filtering empty gt images.')
valid_inds = []
for i, img_info in enumerate(self.data_infos):
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if img_info['width'] / img_info['height'] > 1:
self.flag[i] = 1
def _rand_another(self, idx):
"""Get another random index from the same group as the given index."""
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set \
True).
"""
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys \
introduced by pipeline.
"""
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys intorduced by \
piepline.
"""
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
@classmethod
def get_classes(cls, classes=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
return cls.CLASSES
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
pass
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
Default: None.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=scale_ranges,
iou_thr=iou_thr,
dataset=self.CLASSES,
logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
#!/usr/bin/env python3
version = '1.0.0'
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow
import argparse
parser = argparse.ArgumentParser(description=f'nexfil - Find social media profiles on the web | v{version}')
parser.add_argument('-u', help='Specify username', type=str)
parser.add_argument('-d', help='Specify DNS Servers [Default : 1.1.1.1]', type=str, nargs='+')
parser.add_argument('-f', help='Specify a file containing username list', type=str)
parser.add_argument('-l', help='Specify multiple comma separated usernames', type=str)
parser.add_argument('-t', help='Specify timeout [Default : 20]', type=int)
parser.add_argument('-v', help='Prints version', action='store_true')
parser.set_defaults(
d=['1.1.1.1'],
t=20,
v=False
)
args = parser.parse_args()
uname = args.u
dns = args.d
ulist = args.l
fname = args.f
tout = args.t
vers = args.v
if vers == True:
print(dns, type(dns))
print(uname, type(uname))
print(version)
exit()
if uname == None and ulist == None and fname == None:
print(
f'{R}[-] {C}Please provide {Y}one {C}of the following : \n\t{C}* {Y}username [-u]\n\t{C}* {Y}comma separated usernames [-l]\n\t{C}* {Y}file containing list of usernames [-f]{W}')
exit()
if uname != None:
mode = 'single'
if len(uname) > 0:
if uname.isspace():
print(f'{R}[-] {C}Username Missing!{W}')
exit()
else:
pass
else:
print(f'{R}[-] {C}Username Missing!{W}')
exit()
elif fname != None:
mode = 'file'
elif ulist != None:
mode = 'list'
tmp = ulist
if ',' not in tmp:
print(f'{R}[-] {C}Invalid Format!{W}')
exit()
else:
ulist = tmp.split(',')
else:
pass
print(f'{G}[+] {C}Importing Modules...{W}')
import socket
import asyncio
import aiohttp
import tldextract
from json import loads
from datetime import datetime
from requests import get, exceptions
from os import getenv, path, makedirs
gh_version = ''
twitter_url = ''
discord_url = ''
found = []
codes = [200, 301, 302, 403, 405, 410, 418, 500]
home = getenv('HOME')
loc_data = home + '/.local/share/nexfil/dumps/'
def fetch_meta():
global gh_version, twitter_url, discord_url
try:
rqst = get('https://raw.githubusercontent.com/thewhiteh4t/nexfil/master/metadata.json', timeout=5)
sc = rqst.status_code
if sc == 200:
metadata = rqst.text
json_data = loads(metadata)
gh_version = json_data['version']
twitter_url = json_data['twitter']
discord_url = json_data['discord']
else:
with open('metadata.json', 'r') as metadata:
json_data = loads(metadata.read())
gh_version = json_data['version']
twitter_url = json_data['twitter']
discord_url = json_data['discord']
except Exception as exc:
print(f'\n{R}[-] {C}Exception : {W}{str(exc)}')
with open('metadata.json', 'r') as metadata:
json_data = loads(metadata.read())
gh_version = json_data['version']
twitter_url = json_data['twitter']
discord_url = json_data['discord']
def banner():
banner = r'''
__ _ _____ _ _ _____ _____ _
| \ | |____ \___/ |____ | |
| \_| |____ _/ \_ | __|__ |_____'''
print(f'{G}{banner}{W}\n')
print(f'{G}[>] {C}Created By : {W}thewhiteh4t')
print(f'{G} |---> {C}Twitter : {W}{twitter_url}')
print(f'{G} |---> {C}Discord : {W}{discord_url}')
print(f'{G}[>] {C}Version : {W}{version}\n')
async def clout(url):
global found
found.append(url)
url = str(url)
ext = tldextract.extract(url)
dom = str(ext.domain)
suf = str(ext.suffix)
orig = f'{dom}.{suf}'
cl_dom = f'{Y}{dom}.{suf}{C}'
url = url.replace(orig, cl_dom)
print(f'{G}[+] {C}{url}{W}')
async def query(session, url, test, data, uname):
try:
if test == 'method':
await test_method(session, url)
elif test == 'string':
await test_string(session, url, data)
elif test == 'redirect':
await test_redirect(session, url)
elif test == 'api':
data = data.format(uname)
await test_api(session, url, data)
elif test == 'alt':
data = data.format(uname)
await test_alt(session, url, data)
else:
response = await session.head(url, allow_redirects=True)
if response.status in codes:
if test is None:
await clout(response.url)
elif test == 'url':
await test_url(response.url)
elif test == 'subdomain':
await test_sub(url, response.url)
else:
pass
elif response.status == 404 and test == 'method':
await test_method(session, url)
elif response.status != 404:
print(f'{R}[-] {Y}[{url}] {W}[{response.status}]')
else:
pass
except asyncio.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
except Exception as exc:
print(f'{Y}[!] Exception [query] [{url}] :{W} {str(exc)}')
async def test_method(session, url):
try:
response = await session.get(url, allow_redirects=True)
if response.status != 404:
await clout(response.url)
else:
pass
except asyncio.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
except Exception as exc:
print(f'{Y}[!] Exception [test_method] [{url}] :{W} {exc}')
return
async def test_url(url):
url = str(url)
proto = url.split('://')[0]
ext = tldextract.extract(url)
subd = ext.subdomain
if subd != '':
base_url = proto + '://' + subd + '.' + ext.registered_domain
else:
base_url = proto + '://' + ext.registered_domain
if url.endswith('/') == False and base_url.endswith('/') == True:
if url + '/' != base_url:
await clout(url)
else:
pass
elif url.endswith('/') == True and base_url.endswith('/') == False:
if url != base_url + '/':
await clout(url)
else:
pass
elif url != base_url:
await clout(url)
else:
pass
async def test_sub(url, resp_url):
if url == str(resp_url):
await clout(url)
else:
pass
async def test_string(session, url, data):
try:
response = await session.get(url)
if response.status == 404:
pass
elif response.status not in codes:
print(f'{R}[-] {Y}[{url}] {W}[{response.status}]')
else:
resp_body = await response.text()
if data in resp_body:
pass
else:
await clout(response.url)
except asyncio.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
return
except Exception as exc:
print(f'{Y}[!] Exception [test_string] [{url}] :{W} {exc}')
return
async def test_api(session, url, endpoint):
try:
response = await session.get(endpoint)
if response.status != 404:
resp_body = loads(await response.text())
if len(resp_body) != 0:
tmp_vars = ['results', 'users', 'username']
for var in tmp_vars:
try:
if resp_body.get(var) != None:
if len(resp_body[var]) != 0:
await clout(url)
return
else:
pass
else:
pass
except:
pass
else:
pass
else:
pass
except Exception as exc:
print(f'{Y}[!] Exception [test_api] [{url}] :{W} {exc}')
return
async def test_alt(session, url, alt_url):
try:
response = await session.get(alt_url, allow_redirects=False)
if response.status != 200:
pass
else:
await clout(url)
except Exception as exc:
print(f'{Y}[!] Exception [test_alt] [{url}] :{W} {str(exc)}')
return
async def test_redirect(session, url):
try:
response = await session.head(url, allow_redirects=False)
except asyncio.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
return
except Exception as exc:
print(f'{Y}[!] Exception [test_redirect] [{url}] :{W} {str(exc)}')
return
try:
location = response.headers['Location']
if url != location:
pass
else:
await clout(url)
except KeyError:
await clout(url)
def autosave(uname, ulist, mode, found, start_time, end_time):
if not path.exists(loc_data):
makedirs(loc_data)
else:
pass
if mode == 'single':
filename = f'{uname}_{str(int(datetime.now().timestamp()))}.txt'
username = uname
elif mode == 'list' or mode == 'file':
filename = f'session_{str(int(datetime.now().timestamp()))}.txt'
username = ulist
else:
pass
with open(loc_data + filename, 'w') as outfile:
outfile.write(f'nexfil v{version}\n')
outfile.write(f'Username : {username}\n')
outfile.write(f'Start Time : {start_time.strftime('%c')}\n')
outfile.write(f'End Time : {end_time.strftime('%c')}\n')
outfile.write(f'Total Profiles Found : {len(found)}\n\n')
outfile.write(f'URLs : \n\n')
for url in found:
outfile.write(f'{url}\n')
outfile.write(f'{'-' * 40}\n')
print(f'{G}[+] {C}Saved : {W}{loc_data + filename}')
async def main(uname):
tasks = []
print(f'\n{G}[+] {C}Target :{W} {uname}\n')
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux i686; rv:88.0) Gecko/20100101 Firefox/88.0'
}
resolver = aiohttp.AsyncResolver(nameservers=dns)
timeout = aiohttp.ClientTimeout(total=tout)
conn = aiohttp.TCPConnector(
limit=0,
family=socket.AF_INET,
ssl=False,
resolver=resolver
)
print(f'{Y}[!] Finding Profiles...{W}\n')
async with aiohttp.ClientSession(connector=conn, headers=headers, timeout=timeout) as session:
for block in urls_json:
curr_url = block['url'].format(uname)
test = block['test']
data = block['data']
task = asyncio.create_task(query(session, curr_url, test, data, uname))
tasks.append(task)
await asyncio.gather(*tasks)
def netcheck():
print(f'\n{G}[+] {C}Checking Connectivity...{W}')
try:
rqst = get('https://github.com/', timeout=5)
if rqst.status_code == 200:
pass
else:
print(f'{Y}[!] {C}Status : {W}{rqst.status_code}')
except exceptions.ConnectionError:
print(f'{R}[-] {C}Connection Error! Exiting.{W}')
exit()
def launch(uname):
loop = asyncio.new_event_loop()
loop.run_until_complete(main(uname))
loop.run_until_complete(asyncio.sleep(0))
loop.close()
try:
netcheck()
fetch_meta()
banner()
print(f'{Y}[!] Loading URLs...{W}')
with open('url_store.json', 'r') as url_store:
raw_data = url_store.read()
urls_json = loads(raw_data)
print(f'{G}[+] {W}{len(urls_json)} {C}URLs Loaded!{W}')
print(f'{G}[+] {C}Timeout : {W}{tout} secs')
print(f'{G}[+] {C}DNS Servers : {W}{dns}')
start_time = datetime.now()
if mode == 'single':
launch(uname)
elif mode == 'list':
for uname in ulist:
ulist[ulist.index(uname)] = uname.strip()
launch(uname)
elif mode == 'file':
ulist = []
try:
with open(fname, 'r') as wdlist:
tmp = wdlist.readlines()
for user in tmp:
ulist.append(user.strip())
for uname in ulist:
uname = uname.strip()
launch(uname)
except Exception as exc:
print(f'{Y}[!] Exception [file] :{W} {str(exc)}')
exit()
else:
pass
end_time = datetime.now()
delta = end_time - start_time
if mode == 'single':
print(f'\n{G}[+] {C}Lookup for {Y}{uname} {C}completed in {W}{delta}')
print(f'\n{G}[+] {Y}{len(found)} {C}Possible Profiles Found for {Y}{uname}{W}')
elif mode == 'list' or mode == 'file':
print(f'\n{G}[+] {C}Lookup for {Y}{ulist} {C}completed in {W}{delta}')
print(f'\n{G}[+] {Y}{len(found)} {C}Possible Profiles Found for {Y}{ulist}{W}')
if len(found) != 0:
autosave(uname, ulist, mode, found, start_time, end_time)
else:
pass
except KeyboardInterrupt:
print(f'{R}[-] {C}Keyboard Interrupt.{W}')
exit()
| #!/usr/bin/env python3
version = '1.0.0'
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow
import argparse
parser = argparse.ArgumentParser(description=f'nexfil - Find social media profiles on the web | v{version}')
parser.add_argument('-u', help='Specify username', type=str)
parser.add_argument('-d', help='Specify DNS Servers [Default : 1.1.1.1]', type=str, nargs='+')
parser.add_argument('-f', help='Specify a file containing username list', type=str)
parser.add_argument('-l', help='Specify multiple comma separated usernames', type=str)
parser.add_argument('-t', help='Specify timeout [Default : 20]', type=int)
parser.add_argument('-v', help='Prints version', action='store_true')
parser.set_defaults(
d=['1.1.1.1'],
t=20,
v=False
)
args = parser.parse_args()
uname = args.u
dns = args.d
ulist = args.l
fname = args.f
tout = args.t
vers = args.v
if vers == True:
print(dns, type(dns))
print(uname, type(uname))
print(version)
exit()
if uname == None and ulist == None and fname == None:
print(
f'{R}[-] {C}Please provide {Y}one {C}of the following : \n\t{C}* {Y}username [-u]\n\t{C}* {Y}comma separated usernames [-l]\n\t{C}* {Y}file containing list of usernames [-f]{W}')
exit()
if uname != None:
mode = 'single'
if len(uname) > 0:
if uname.isspace():
print(f'{R}[-] {C}Username Missing!{W}')
exit()
else:
pass
else:
print(f'{R}[-] {C}Username Missing!{W}')
exit()
elif fname != None:
mode = 'file'
elif ulist != None:
mode = 'list'
tmp = ulist
if ',' not in tmp:
print(f'{R}[-] {C}Invalid Format!{W}')
exit()
else:
ulist = tmp.split(',')
else:
pass
print(f'{G}[+] {C}Importing Modules...{W}')
import socket
import asyncio
import aiohttp
import tldextract
from json import loads
from datetime import datetime
from requests import get, exceptions
from os import getenv, path, makedirs
gh_version = ''
twitter_url = ''
discord_url = ''
found = []
codes = [200, 301, 302, 403, 405, 410, 418, 500]
home = getenv('HOME')
loc_data = home + '/.local/share/nexfil/dumps/'
def fetch_meta():
global gh_version, twitter_url, discord_url
try:
rqst = get('https://raw.githubusercontent.com/thewhiteh4t/nexfil/master/metadata.json', timeout=5)
sc = rqst.status_code
if sc == 200:
metadata = rqst.text
json_data = loads(metadata)
gh_version = json_data['version']
twitter_url = json_data['twitter']
discord_url = json_data['discord']
else:
with open('metadata.json', 'r') as metadata:
json_data = loads(metadata.read())
gh_version = json_data['version']
twitter_url = json_data['twitter']
discord_url = json_data['discord']
except Exception as exc:
print(f'\n{R}[-] {C}Exception : {W}{str(exc)}')
with open('metadata.json', 'r') as metadata:
json_data = loads(metadata.read())
gh_version = json_data['version']
twitter_url = json_data['twitter']
discord_url = json_data['discord']
def banner():
banner = r'''
__ _ _____ _ _ _____ _____ _
| \ | |____ \___/ |____ | |
| \_| |____ _/ \_ | __|__ |_____'''
print(f'{G}{banner}{W}\n')
print(f'{G}[>] {C}Created By : {W}thewhiteh4t')
print(f'{G} |---> {C}Twitter : {W}{twitter_url}')
print(f'{G} |---> {C}Discord : {W}{discord_url}')
print(f'{G}[>] {C}Version : {W}{version}\n')
async def clout(url):
global found
found.append(url)
url = str(url)
ext = tldextract.extract(url)
dom = str(ext.domain)
suf = str(ext.suffix)
orig = f'{dom}.{suf}'
cl_dom = f'{Y}{dom}.{suf}{C}'
url = url.replace(orig, cl_dom)
print(f'{G}[+] {C}{url}{W}')
async def query(session, url, test, data, uname):
try:
if test == 'method':
await test_method(session, url)
elif test == 'string':
await test_string(session, url, data)
elif test == 'redirect':
await test_redirect(session, url)
elif test == 'api':
data = data.format(uname)
await test_api(session, url, data)
elif test == 'alt':
data = data.format(uname)
await test_alt(session, url, data)
else:
response = await session.head(url, allow_redirects=True)
if response.status in codes:
if test is None:
await clout(response.url)
elif test == 'url':
await test_url(response.url)
elif test == 'subdomain':
await test_sub(url, response.url)
else:
pass
elif response.status == 404 and test == 'method':
await test_method(session, url)
elif response.status != 404:
print(f'{R}[-] {Y}[{url}] {W}[{response.status}]')
else:
pass
except asyncio.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
except Exception as exc:
print(f'{Y}[!] Exception [query] [{url}] :{W} {str(exc)}')
async def test_method(session, url):
try:
response = await session.get(url, allow_redirects=True)
if response.status != 404:
await clout(response.url)
else:
pass
except asyncio.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
except Exception as exc:
print(f'{Y}[!] Exception [test_method] [{url}] :{W} {exc}')
return
async def test_url(url):
url = str(url)
proto = url.split('://')[0]
ext = tldextract.extract(url)
subd = ext.subdomain
if subd != '':
base_url = proto + '://' + subd + '.' + ext.registered_domain
else:
base_url = proto + '://' + ext.registered_domain
if url.endswith('/') == False and base_url.endswith('/') == True:
if url + '/' != base_url:
await clout(url)
else:
pass
elif url.endswith('/') == True and base_url.endswith('/') == False:
if url != base_url + '/':
await clout(url)
else:
pass
elif url != base_url:
await clout(url)
else:
pass
async def test_sub(url, resp_url):
if url == str(resp_url):
await clout(url)
else:
pass
async def test_string(session, url, data):
try:
response = await session.get(url)
if response.status == 404:
pass
elif response.status not in codes:
print(f'{R}[-] {Y}[{url}] {W}[{response.status}]')
else:
resp_body = await response.text()
if data in resp_body:
pass
else:
await clout(response.url)
except asyncio.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
return
except Exception as exc:
print(f'{Y}[!] Exception [test_string] [{url}] :{W} {exc}')
return
async def test_api(session, url, endpoint):
try:
response = await session.get(endpoint)
if response.status != 404:
resp_body = loads(await response.text())
if len(resp_body) != 0:
tmp_vars = ['results', 'users', 'username']
for var in tmp_vars:
try:
if resp_body.get(var) != None:
if len(resp_body[var]) != 0:
await clout(url)
return
else:
pass
else:
pass
except:
pass
else:
pass
else:
pass
except Exception as exc:
print(f'{Y}[!] Exception [test_api] [{url}] :{W} {exc}')
return
async def test_alt(session, url, alt_url):
try:
response = await session.get(alt_url, allow_redirects=False)
if response.status != 200:
pass
else:
await clout(url)
except Exception as exc:
print(f'{Y}[!] Exception [test_alt] [{url}] :{W} {str(exc)}')
return
async def test_redirect(session, url):
try:
response = await session.head(url, allow_redirects=False)
except asyncio.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
return
except Exception as exc:
print(f'{Y}[!] Exception [test_redirect] [{url}] :{W} {str(exc)}')
return
try:
location = response.headers['Location']
if url != location:
pass
else:
await clout(url)
except KeyError:
await clout(url)
def autosave(uname, ulist, mode, found, start_time, end_time):
if not path.exists(loc_data):
makedirs(loc_data)
else:
pass
if mode == 'single':
filename = f'{uname}_{str(int(datetime.now().timestamp()))}.txt'
username = uname
elif mode == 'list' or mode == 'file':
filename = f'session_{str(int(datetime.now().timestamp()))}.txt'
username = ulist
else:
pass
with open(loc_data + filename, 'w') as outfile:
outfile.write(f'nexfil v{version}\n')
outfile.write(f'Username : {username}\n')
outfile.write(f'Start Time : {start_time.strftime("%c")}\n')
outfile.write(f'End Time : {end_time.strftime("%c")}\n')
outfile.write(f'Total Profiles Found : {len(found)}\n\n')
outfile.write(f'URLs : \n\n')
for url in found:
outfile.write(f'{url}\n')
outfile.write(f'{"-" * 40}\n')
print(f'{G}[+] {C}Saved : {W}{loc_data + filename}')
async def main(uname):
tasks = []
print(f'\n{G}[+] {C}Target :{W} {uname}\n')
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux i686; rv:88.0) Gecko/20100101 Firefox/88.0'
}
resolver = aiohttp.AsyncResolver(nameservers=dns)
timeout = aiohttp.ClientTimeout(total=tout)
conn = aiohttp.TCPConnector(
limit=0,
family=socket.AF_INET,
ssl=False,
resolver=resolver
)
print(f'{Y}[!] Finding Profiles...{W}\n')
async with aiohttp.ClientSession(connector=conn, headers=headers, timeout=timeout) as session:
for block in urls_json:
curr_url = block['url'].format(uname)
test = block['test']
data = block['data']
task = asyncio.create_task(query(session, curr_url, test, data, uname))
tasks.append(task)
await asyncio.gather(*tasks)
def netcheck():
print(f'\n{G}[+] {C}Checking Connectivity...{W}')
try:
rqst = get('https://github.com/', timeout=5)
if rqst.status_code == 200:
pass
else:
print(f'{Y}[!] {C}Status : {W}{rqst.status_code}')
except exceptions.ConnectionError:
print(f'{R}[-] {C}Connection Error! Exiting.{W}')
exit()
def launch(uname):
loop = asyncio.new_event_loop()
loop.run_until_complete(main(uname))
loop.run_until_complete(asyncio.sleep(0))
loop.close()
try:
netcheck()
fetch_meta()
banner()
print(f'{Y}[!] Loading URLs...{W}')
with open('url_store.json', 'r') as url_store:
raw_data = url_store.read()
urls_json = loads(raw_data)
print(f'{G}[+] {W}{len(urls_json)} {C}URLs Loaded!{W}')
print(f'{G}[+] {C}Timeout : {W}{tout} secs')
print(f'{G}[+] {C}DNS Servers : {W}{dns}')
start_time = datetime.now()
if mode == 'single':
launch(uname)
elif mode == 'list':
for uname in ulist:
ulist[ulist.index(uname)] = uname.strip()
launch(uname)
elif mode == 'file':
ulist = []
try:
with open(fname, 'r') as wdlist:
tmp = wdlist.readlines()
for user in tmp:
ulist.append(user.strip())
for uname in ulist:
uname = uname.strip()
launch(uname)
except Exception as exc:
print(f'{Y}[!] Exception [file] :{W} {str(exc)}')
exit()
else:
pass
end_time = datetime.now()
delta = end_time - start_time
if mode == 'single':
print(f'\n{G}[+] {C}Lookup for {Y}{uname} {C}completed in {W}{delta}')
print(f'\n{G}[+] {Y}{len(found)} {C}Possible Profiles Found for {Y}{uname}{W}')
elif mode == 'list' or mode == 'file':
print(f'\n{G}[+] {C}Lookup for {Y}{ulist} {C}completed in {W}{delta}')
print(f'\n{G}[+] {Y}{len(found)} {C}Possible Profiles Found for {Y}{ulist}{W}')
if len(found) != 0:
autosave(uname, ulist, mode, found, start_time, end_time)
else:
pass
except KeyboardInterrupt:
print(f'{R}[-] {C}Keyboard Interrupt.{W}')
exit()
|
from scripts.python.data_loader import CORE50
import torch
from tqdm import tqdm
from faiss_knn import knn
import cv2 as cv
import numpy as np
from torchvision import transforms
from sklearn.metrics import confusion_matrix
import os
import pathlib
import wandb
from umap import UMAP
import matplotlib.pyplot as plt
import seaborn as sns
import time
aug_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomAdjustSharpness(
sharpness_factor=2),
transforms.RandomAutocontrast(),
transforms.RandomResizedCrop(
scale=(0.16, 1), ratio=(0.75, 1.33), size=224),
transforms.RandomHorizontalFlip(0.5),
transforms.RandomVerticalFlip(0.5),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
std_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
torch.set_grad_enabled(False)
def load_features_for_testing(fe, test_x, features_size, batch_size=32, postfix=None):
if pathlib.Path(f'test_features{postfix}.pth').exists():
print('Found saved features')
saved_feats = torch.load(f'test_features{postfix}.pth')
if saved_feats.shape[1] == features_size:
return saved_feats
features = torch.empty((0, features_size), dtype=torch.float32)
for i in tqdm(range(test_x.shape[0] // batch_size + 1)):
x_minibatch = test_x[i*batch_size: (i+1)*batch_size]
x = [std_transforms(el) for el in x_minibatch.astype(np.uint8)]
x = torch.stack(x)
feats = fe(x.cuda()).cpu()
features = torch.cat((features, feats))
torch.save(features, f'test_features{postfix}.pth')
return features
def visualize_features(x_data, y_data, folder='./visualizations', return_array=False, iter=0):
# time_start = time.time()
umap = UMAP()
results = umap.fit_transform(x_data.squeeze())
# print('visualization done! Time elapsed: {} seconds'.format(
# time.time()-time_start))
fig = plt.figure(figsize=(16, 10))
sns.scatterplot(
x=results[:, 0], y=results[:, 1],
hue=y_data,
palette=sns.color_palette("hls", len(set(y_data))),
legend=None,
)
path = pathlib.Path(folder)
path.mkdir(parents=True, exist_ok=True)
plt.savefig(f'{folder}/{iter}_iteration.png')
if return_array:
im = cv.imread(f'{folder}/{iter}_iteration.png')
im = cv.resize(im, (640, 480))
return im
def run_experiment(run, cfg=None):
dataset = CORE50(root='core50_dataset/core50_128x128',
scenario="nicv2_391", preload=False, run=run)
# set name for experiment
name = f"{cfg["feature_extractor_model"]}_{cfg["embedding_size"]}_{cfg["N_neighbours"]}n_{cfg["runs"]}r"
if cfg['augmentation']:
name += '_aug'
if cfg['pca']:
name += f'_{cfg['pca'].split('_')[2]}pca'
# name = '_'.join(list(map(str, cfg.values())))
print(name)
wandb.init(project='core50_DINO_knn', reinit=True,
name=name + '_' + str(run), config=cfg)
transforms = aug_transforms if cfg['augmentation'] else std_transforms
# Get the fixed test set
test_x, test_y = dataset.get_test_set()
# print(test_x.shape)
fe = torch.hub.load('facebookresearch/dino:main',
cfg['feature_extractor_model'])
fe.cuda()
fe.eval()
classifier = knn('core50.pth', resume=False, knn_size=cfg['N_neighbours'])
batch_size = 512
test_x = load_features_for_testing(
fe, test_x, cfg['embedding_size'], batch_size=batch_size, postfix=f'_{cfg['feature_extractor_model']}')
# prepare pca
if cfg['pca']:
pca_size = cfg['pca'].split('_')[2]
pca_trained_arch = cfg['pca'].split('_')[-1][:-4]
print(pca_trained_arch)
pca= torch.load(cfg['pca'])
assert cfg['feature_extractor_model'].split('_')[-1] == pca_trained_arch
test_x = pca.transform(test_x.numpy().astype(np.float32))
# loop over the training incremental batches
total_pbar = tqdm(enumerate(dataset), total=dataset.nbatch[dataset.scenario])
for iteration_step, train_batch in total_pbar:
# WARNING train_batch is NOT a mini-batch, but one incremental batch!
# You can later train with SGD indexing train_x and train_y properly.
train_x, train_y = train_batch
# train stage
# for _ in range(4):
for i in range(train_x.shape[0] // batch_size + 1):
x_minibatch = train_x[i*batch_size: (i+1)*batch_size]
y_minibatch = train_y[i*batch_size: (i+1)*batch_size]
x = [transforms(el)
for el in x_minibatch.astype(np.uint8)]
x = torch.stack(x)
feats = fe(x.cuda()).cpu()
if cfg['pca']:
feats = pca.transform(feats.numpy()).astype(np.float32)
classifier.add_points(feats, y_minibatch)
# test stage
preds = np.empty((0))
test_batch_size = 4096 * 8
start_time = time.time()
for i in tqdm(range(test_x.shape[0] // test_batch_size + 1), desc='test'):
x_minibatch = test_x[i*test_batch_size: (i+1)*test_batch_size]
y_minibatch = test_y[i*test_batch_size: (i+1)*test_batch_size]
clss, confs, dists = classifier.classify(x_minibatch)
preds = np.concatenate((preds, clss))
duration = time.time() - start_time
M = confusion_matrix(test_y, preds)
accs = M.diagonal()/M.sum(axis=1)
total_pbar.set_description(f'{iteration_step}, acc: {accs.mean():.3f}')
logs_keys = ['accs/mean', 'accs/std', 'time to test kNN', 'data size']
logs_vals = [accs.mean(), accs.std(), duration, len(classifier.x_data)]
logs_dict = dict(zip(logs_keys, logs_vals))
wandb.log(logs_dict, step=iteration_step)
# # save features visualization to WanDB
# plot = visualize_features(
# classifier.x_data, classifier.y_data, return_array=True, iter=iteration_step)
# wandb.log({"2D visualization": wandb.Image(plot)},
# step=iteration_step)
if __name__ == "__main__":
# archs = ['dino_vitb8', #'dino_xcit_small_12_p8'
# ]
fs = os.listdir('pca')
fs.sort(key=lambda x: int(x.split('_')[2]))
for pca in fs[-4:]:
cfg = {
'feature_extractor_model': 'dino_vits16',
'embedding_size': 384,
'N_neighbours': 10,
'runs': 1,
'augmentation': False,
'pca': 'pca/'+ pca
}
for run in range(cfg['runs']):
run_experiment(run, cfg)
| from scripts.python.data_loader import CORE50
import torch
from tqdm import tqdm
from faiss_knn import knn
import cv2 as cv
import numpy as np
from torchvision import transforms
from sklearn.metrics import confusion_matrix
import os
import pathlib
import wandb
from umap import UMAP
import matplotlib.pyplot as plt
import seaborn as sns
import time
aug_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomAdjustSharpness(
sharpness_factor=2),
transforms.RandomAutocontrast(),
transforms.RandomResizedCrop(
scale=(0.16, 1), ratio=(0.75, 1.33), size=224),
transforms.RandomHorizontalFlip(0.5),
transforms.RandomVerticalFlip(0.5),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
std_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
torch.set_grad_enabled(False)
def load_features_for_testing(fe, test_x, features_size, batch_size=32, postfix=None):
if pathlib.Path(f'test_features{postfix}.pth').exists():
print('Found saved features')
saved_feats = torch.load(f'test_features{postfix}.pth')
if saved_feats.shape[1] == features_size:
return saved_feats
features = torch.empty((0, features_size), dtype=torch.float32)
for i in tqdm(range(test_x.shape[0] // batch_size + 1)):
x_minibatch = test_x[i*batch_size: (i+1)*batch_size]
x = [std_transforms(el) for el in x_minibatch.astype(np.uint8)]
x = torch.stack(x)
feats = fe(x.cuda()).cpu()
features = torch.cat((features, feats))
torch.save(features, f'test_features{postfix}.pth')
return features
def visualize_features(x_data, y_data, folder='./visualizations', return_array=False, iter=0):
# time_start = time.time()
umap = UMAP()
results = umap.fit_transform(x_data.squeeze())
# print('visualization done! Time elapsed: {} seconds'.format(
# time.time()-time_start))
fig = plt.figure(figsize=(16, 10))
sns.scatterplot(
x=results[:, 0], y=results[:, 1],
hue=y_data,
palette=sns.color_palette("hls", len(set(y_data))),
legend=None,
)
path = pathlib.Path(folder)
path.mkdir(parents=True, exist_ok=True)
plt.savefig(f'{folder}/{iter}_iteration.png')
if return_array:
im = cv.imread(f'{folder}/{iter}_iteration.png')
im = cv.resize(im, (640, 480))
return im
def run_experiment(run, cfg=None):
dataset = CORE50(root='core50_dataset/core50_128x128',
scenario="nicv2_391", preload=False, run=run)
# set name for experiment
name = f"{cfg['feature_extractor_model']}_{cfg['embedding_size']}_{cfg['N_neighbours']}n_{cfg['runs']}r"
if cfg['augmentation']:
name += '_aug'
if cfg['pca']:
name += f'_{cfg["pca"].split("_")[2]}pca'
# name = '_'.join(list(map(str, cfg.values())))
print(name)
wandb.init(project='core50_DINO_knn', reinit=True,
name=name + '_' + str(run), config=cfg)
transforms = aug_transforms if cfg['augmentation'] else std_transforms
# Get the fixed test set
test_x, test_y = dataset.get_test_set()
# print(test_x.shape)
fe = torch.hub.load('facebookresearch/dino:main',
cfg['feature_extractor_model'])
fe.cuda()
fe.eval()
classifier = knn('core50.pth', resume=False, knn_size=cfg['N_neighbours'])
batch_size = 512
test_x = load_features_for_testing(
fe, test_x, cfg['embedding_size'], batch_size=batch_size, postfix=f'_{cfg["feature_extractor_model"]}')
# prepare pca
if cfg['pca']:
pca_size = cfg['pca'].split('_')[2]
pca_trained_arch = cfg['pca'].split('_')[-1][:-4]
print(pca_trained_arch)
pca= torch.load(cfg['pca'])
assert cfg['feature_extractor_model'].split('_')[-1] == pca_trained_arch
test_x = pca.transform(test_x.numpy().astype(np.float32))
# loop over the training incremental batches
total_pbar = tqdm(enumerate(dataset), total=dataset.nbatch[dataset.scenario])
for iteration_step, train_batch in total_pbar:
# WARNING train_batch is NOT a mini-batch, but one incremental batch!
# You can later train with SGD indexing train_x and train_y properly.
train_x, train_y = train_batch
# train stage
# for _ in range(4):
for i in range(train_x.shape[0] // batch_size + 1):
x_minibatch = train_x[i*batch_size: (i+1)*batch_size]
y_minibatch = train_y[i*batch_size: (i+1)*batch_size]
x = [transforms(el)
for el in x_minibatch.astype(np.uint8)]
x = torch.stack(x)
feats = fe(x.cuda()).cpu()
if cfg['pca']:
feats = pca.transform(feats.numpy()).astype(np.float32)
classifier.add_points(feats, y_minibatch)
# test stage
preds = np.empty((0))
test_batch_size = 4096 * 8
start_time = time.time()
for i in tqdm(range(test_x.shape[0] // test_batch_size + 1), desc='test'):
x_minibatch = test_x[i*test_batch_size: (i+1)*test_batch_size]
y_minibatch = test_y[i*test_batch_size: (i+1)*test_batch_size]
clss, confs, dists = classifier.classify(x_minibatch)
preds = np.concatenate((preds, clss))
duration = time.time() - start_time
M = confusion_matrix(test_y, preds)
accs = M.diagonal()/M.sum(axis=1)
total_pbar.set_description(f'{iteration_step}, acc: {accs.mean():.3f}')
logs_keys = ['accs/mean', 'accs/std', 'time to test kNN', 'data size']
logs_vals = [accs.mean(), accs.std(), duration, len(classifier.x_data)]
logs_dict = dict(zip(logs_keys, logs_vals))
wandb.log(logs_dict, step=iteration_step)
# # save features visualization to WanDB
# plot = visualize_features(
# classifier.x_data, classifier.y_data, return_array=True, iter=iteration_step)
# wandb.log({"2D visualization": wandb.Image(plot)},
# step=iteration_step)
if __name__ == "__main__":
# archs = ['dino_vitb8', #'dino_xcit_small_12_p8'
# ]
fs = os.listdir('pca')
fs.sort(key=lambda x: int(x.split('_')[2]))
for pca in fs[-4:]:
cfg = {
'feature_extractor_model': 'dino_vits16',
'embedding_size': 384,
'N_neighbours': 10,
'runs': 1,
'augmentation': False,
'pca': 'pca/'+ pca
}
for run in range(cfg['runs']):
run_experiment(run, cfg)
|
# -*- coding:utf-8 -*-
import sys
from owlmixin import OwlMixin, TOption
from owlmixin.owlcollections import TList
from jumeaux.addons.final import FinalExecutor
from jumeaux.utils import jinja2_format, get_jinja2_format_error, when_optional_filter
from jumeaux.logger import Logger
from jumeaux.models import FinalAddOnPayload, Notifier, FinalAddOnReference, Report
from jumeaux.notification_handlers import create_notification_handler
logger: Logger = Logger(__name__)
LOG_PREFIX = "[final/notify]"
class Notify(OwlMixin):
notifier: str
message: str
when: TOption[str]
class Config(OwlMixin):
notifies: TList[Notify] = []
def send(message: str, notifier: Notifier) -> TOption[str]:
logger.info_lv1(notifier.logging_message)
return create_notification_handler(notifier).notify(message)
def need_to_notify(notify: Notify, report: Report) -> bool:
if when_optional_filter(notify.when, report.to_dict(ignore_none=False)):
logger.info_lv3(
f"{LOG_PREFIX} Notify by {notify.notifier}. (notify.when => {notify.when.get_or("None")})"
)
return True
else:
logger.info_lv3(
f"{LOG_PREFIX} Don"t Notify by {notify.notifier}. (notify.when => {notify.when.get_or("None")}"
)
return False
class Executor(FinalExecutor):
def __init__(self, config: dict):
self.config: Config = Config.from_dict(config or {})
errors: TList[str] = self.config.notifies.map(
lambda x: get_jinja2_format_error(x.message).get()
).filter(lambda x: x is not None)
if errors:
logger.error(f"{LOG_PREFIX} Illegal format in `notifies[*].message`.")
logger.error(f"{LOG_PREFIX} Please check your configuration yaml files.")
logger.error(f"{LOG_PREFIX} --- Error messages ---")
errors.map(lambda x: logger.error(f"{LOG_PREFIX} * `{x}`"))
logger.error(f"{LOG_PREFIX} ---------------------", exit=True)
def exec(self, payload: FinalAddOnPayload, reference: FinalAddOnReference) -> FinalAddOnPayload:
if reference.notifiers.is_none():
logger.error(f"{LOG_PREFIX} There are no notifiers. Please set notifiers in config.")
logger.error(
f"{LOG_PREFIX} See https://tadashi-aikawa.github.io/jumeaux/ja/getstarted/configuration/"
)
sys.exit(1)
errors: TList[TOption[str]] = self.config.notifies.filter(
lambda n: need_to_notify(n, payload.report)
).map(
lambda x: send(
jinja2_format(x.message, payload.report.to_dict(ignore_none=False)),
reference.notifiers.get()
.get(x.notifier)
.get(), # TODO: The case that notifier not found
)
)
if errors.reject(lambda m: m.is_none()):
errors.map(lambda m: m.map(logger.error))
sys.exit(1)
return payload
| # -*- coding:utf-8 -*-
import sys
from owlmixin import OwlMixin, TOption
from owlmixin.owlcollections import TList
from jumeaux.addons.final import FinalExecutor
from jumeaux.utils import jinja2_format, get_jinja2_format_error, when_optional_filter
from jumeaux.logger import Logger
from jumeaux.models import FinalAddOnPayload, Notifier, FinalAddOnReference, Report
from jumeaux.notification_handlers import create_notification_handler
logger: Logger = Logger(__name__)
LOG_PREFIX = "[final/notify]"
class Notify(OwlMixin):
notifier: str
message: str
when: TOption[str]
class Config(OwlMixin):
notifies: TList[Notify] = []
def send(message: str, notifier: Notifier) -> TOption[str]:
logger.info_lv1(notifier.logging_message)
return create_notification_handler(notifier).notify(message)
def need_to_notify(notify: Notify, report: Report) -> bool:
if when_optional_filter(notify.when, report.to_dict(ignore_none=False)):
logger.info_lv3(
f"{LOG_PREFIX} Notify by {notify.notifier}. (notify.when => {notify.when.get_or('None')})"
)
return True
else:
logger.info_lv3(
f"{LOG_PREFIX} Don't Notify by {notify.notifier}. (notify.when => {notify.when.get_or('None')}"
)
return False
class Executor(FinalExecutor):
def __init__(self, config: dict):
self.config: Config = Config.from_dict(config or {})
errors: TList[str] = self.config.notifies.map(
lambda x: get_jinja2_format_error(x.message).get()
).filter(lambda x: x is not None)
if errors:
logger.error(f"{LOG_PREFIX} Illegal format in `notifies[*].message`.")
logger.error(f"{LOG_PREFIX} Please check your configuration yaml files.")
logger.error(f"{LOG_PREFIX} --- Error messages ---")
errors.map(lambda x: logger.error(f"{LOG_PREFIX} * `{x}`"))
logger.error(f"{LOG_PREFIX} ---------------------", exit=True)
def exec(self, payload: FinalAddOnPayload, reference: FinalAddOnReference) -> FinalAddOnPayload:
if reference.notifiers.is_none():
logger.error(f"{LOG_PREFIX} There are no notifiers. Please set notifiers in config.")
logger.error(
f"{LOG_PREFIX} See https://tadashi-aikawa.github.io/jumeaux/ja/getstarted/configuration/"
)
sys.exit(1)
errors: TList[TOption[str]] = self.config.notifies.filter(
lambda n: need_to_notify(n, payload.report)
).map(
lambda x: send(
jinja2_format(x.message, payload.report.to_dict(ignore_none=False)),
reference.notifiers.get()
.get(x.notifier)
.get(), # TODO: The case that notifier not found
)
)
if errors.reject(lambda m: m.is_none()):
errors.map(lambda m: m.map(logger.error))
sys.exit(1)
return payload
|
# Ch 7 - Knights Problem
# The project for this chapter is to figure out the minimal number of chess knights necessary to attack every square on a
# chess board. This means our chessboard must be at least 3x4 for some number of knights to be able to attack all squares
# on the board because a knight can only attack certain squares relative to its own location:
import random
import unittest
import genetic
from datetime import datetime as dt
class Position:
X = None
Y = None
def __init__(self, x, y):
self.X = x
self.Y = y
def __str__(self):
return f'{self.X},{self.Y}'
def __eq__(self, other):
return self.X == other.X and self.Y == other.Y
def __hash__(self):
return self.X * 1000 + self.Y
def get_attacks(location, boardWidth, boardHeight):
return [i for i in set(
Position(x + location.X, y + location.Y)
for x in [-2, -1, 1, 2] if 0 <= x + location.X < boardWidth
for y in [-2, -1, 1, 2] if 0 <= y + location.Y < boardHeight
and abs(y) != abs(x)
)]
# Assign a specific number of knights to unique board positions
def create(fnGetRandomPosition, expectedKnights):
genes = [fnGetRandomPosition() for _ in range(expectedKnights)]
return genes
def mutate(genes, boardWidth, boardHeight, allPositions, nonEdgePositions):
count = 2 if random.randint(0, 10) == 0 else 1
while count > 0:
count -= 1
# figure out which knights are attacking which squares. The array in the dictionary has each knight’s gene index
positionToKnightIndexes = dict((p, []) for p in allPositions)
for i, knight in enumerate(genes):
for position in get_attacks(knight, boardWidth, boardHeight):
positionToKnightIndexes[position].append(i)
# list of indexes of knights whose attacks are all covered by some other knight
# and build a list of the squares that are not under attack.
knightIndexes = set(i for i in range(len(genes)))
unattacked = []
for kvp in positionToKnightIndexes.items():
if len(kvp[1]) > 1:
continue
if len(kvp[1]) == 0:
unattacked.append(kvp[0])
continue
for p in kvp[1]:
if p in knightIndexes:
knightIndexes.remove(p)
# build the list of locations from which the unattacked squares can be attacked.
potentialKnightPositions = \
[p for positions in
map(lambda x: get_attacks(x, boardWidth, boardHeight), unattacked)
for p in positions if p in nonEdgePositions] \
if len(unattacked) > 0 else nonEdgePositions
# choose a gene (knight) to replace.
geneIndex = random.randrange(0, len(genes)) \
if len(knightIndexes) == 0 \
else random.choice([i for i in knightIndexes])
# replace that knight with one likely to improve fitness
position = random.choice(potentialKnightPositions)
genes[geneIndex] = position
class Board:
def __init__(self, positions, width, height):
board = [['.'] * width for _ in range(height)]
for index in range(len(positions)):
knightPosition = positions[index]
board[knightPosition.Y][knightPosition.X] = 'N'
self._board = board
self._width = width
self._height = height
def print(self):
# 0,0 prints bottom left
for i in reversed(range(self._height)):
print(f'{i}\t{' '.join(self._board[i])}')
print(f' \t{' '.join(map(str, range(self._width)))}')
def display(candidate, startTime, boardWidth, boardHeight):
timeDiff = dt.now() - startTime
board = Board(candidate.Genes, boardWidth, boardHeight)
board.print()
print('{0}\n\t{1}\t{2}'.format(
' '.join(map(str, candidate.Genes)),
candidate.Fitness,
str(timeDiff)
))
def get_fitness(genes, boardWidth, boardHeight):
attacked = set(pos
for kn in genes
for pos in get_attacks(kn, boardWidth, boardHeight))
return len(attacked)
class KnightsTest(unittest.TestCase):
# def test_3x4(self):
# width = 4
# height = 3
# self.find_knight_positions(width, height, 6)
# def test_8x8(self):
# width = 8
# height = 8
# self.find_knight_positions(width, height, 14)
def test_10x10(self):
width = 10
height = 10
self.find_knight_positions(width, height, 22)
def find_knight_positions(self, boardWidth, boardHeight, expectedKnights):
startTime = dt.now()
allPositions = [Position(x, y) for y in range(boardHeight) for x in range(boardWidth)]
if boardWidth < 6 or boardHeight < 6:
nonEdgePositions = allPositions
else:
nonEdgePositions = [i for i in allPositions if 0 < i.X < boardWidth - 1 and 0 < i.Y < boardHeight - 1]
def fnDisplay(candidate):
display(candidate, startTime, boardWidth, boardHeight)
def fnGetFitness(genes):
return get_fitness(genes, boardWidth, boardHeight)
def fnGetRandomPosition():
return random.choice(nonEdgePositions)
def fnMutate(genes):
mutate(genes, boardWidth, boardHeight, allPositions, nonEdgePositions)
def fnCreate():
return create(fnGetRandomPosition, expectedKnights)
optimalFitness = boardWidth * boardHeight
best = genetic.get_best(fnGetFitness, None, optimalFitness, None, fnDisplay, fnMutate, fnCreate)
self.assertTrue(not optimalFitness > best.Fitness) | # Ch 7 - Knights Problem
# The project for this chapter is to figure out the minimal number of chess knights necessary to attack every square on a
# chess board. This means our chessboard must be at least 3x4 for some number of knights to be able to attack all squares
# on the board because a knight can only attack certain squares relative to its own location:
import random
import unittest
import genetic
from datetime import datetime as dt
class Position:
X = None
Y = None
def __init__(self, x, y):
self.X = x
self.Y = y
def __str__(self):
return f'{self.X},{self.Y}'
def __eq__(self, other):
return self.X == other.X and self.Y == other.Y
def __hash__(self):
return self.X * 1000 + self.Y
def get_attacks(location, boardWidth, boardHeight):
return [i for i in set(
Position(x + location.X, y + location.Y)
for x in [-2, -1, 1, 2] if 0 <= x + location.X < boardWidth
for y in [-2, -1, 1, 2] if 0 <= y + location.Y < boardHeight
and abs(y) != abs(x)
)]
# Assign a specific number of knights to unique board positions
def create(fnGetRandomPosition, expectedKnights):
genes = [fnGetRandomPosition() for _ in range(expectedKnights)]
return genes
def mutate(genes, boardWidth, boardHeight, allPositions, nonEdgePositions):
count = 2 if random.randint(0, 10) == 0 else 1
while count > 0:
count -= 1
# figure out which knights are attacking which squares. The array in the dictionary has each knight’s gene index
positionToKnightIndexes = dict((p, []) for p in allPositions)
for i, knight in enumerate(genes):
for position in get_attacks(knight, boardWidth, boardHeight):
positionToKnightIndexes[position].append(i)
# list of indexes of knights whose attacks are all covered by some other knight
# and build a list of the squares that are not under attack.
knightIndexes = set(i for i in range(len(genes)))
unattacked = []
for kvp in positionToKnightIndexes.items():
if len(kvp[1]) > 1:
continue
if len(kvp[1]) == 0:
unattacked.append(kvp[0])
continue
for p in kvp[1]:
if p in knightIndexes:
knightIndexes.remove(p)
# build the list of locations from which the unattacked squares can be attacked.
potentialKnightPositions = \
[p for positions in
map(lambda x: get_attacks(x, boardWidth, boardHeight), unattacked)
for p in positions if p in nonEdgePositions] \
if len(unattacked) > 0 else nonEdgePositions
# choose a gene (knight) to replace.
geneIndex = random.randrange(0, len(genes)) \
if len(knightIndexes) == 0 \
else random.choice([i for i in knightIndexes])
# replace that knight with one likely to improve fitness
position = random.choice(potentialKnightPositions)
genes[geneIndex] = position
class Board:
def __init__(self, positions, width, height):
board = [['.'] * width for _ in range(height)]
for index in range(len(positions)):
knightPosition = positions[index]
board[knightPosition.Y][knightPosition.X] = 'N'
self._board = board
self._width = width
self._height = height
def print(self):
# 0,0 prints bottom left
for i in reversed(range(self._height)):
print(f'{i}\t{" ".join(self._board[i])}')
print(f' \t{" ".join(map(str, range(self._width)))}')
def display(candidate, startTime, boardWidth, boardHeight):
timeDiff = dt.now() - startTime
board = Board(candidate.Genes, boardWidth, boardHeight)
board.print()
print('{0}\n\t{1}\t{2}'.format(
' '.join(map(str, candidate.Genes)),
candidate.Fitness,
str(timeDiff)
))
def get_fitness(genes, boardWidth, boardHeight):
attacked = set(pos
for kn in genes
for pos in get_attacks(kn, boardWidth, boardHeight))
return len(attacked)
class KnightsTest(unittest.TestCase):
# def test_3x4(self):
# width = 4
# height = 3
# self.find_knight_positions(width, height, 6)
# def test_8x8(self):
# width = 8
# height = 8
# self.find_knight_positions(width, height, 14)
def test_10x10(self):
width = 10
height = 10
self.find_knight_positions(width, height, 22)
def find_knight_positions(self, boardWidth, boardHeight, expectedKnights):
startTime = dt.now()
allPositions = [Position(x, y) for y in range(boardHeight) for x in range(boardWidth)]
if boardWidth < 6 or boardHeight < 6:
nonEdgePositions = allPositions
else:
nonEdgePositions = [i for i in allPositions if 0 < i.X < boardWidth - 1 and 0 < i.Y < boardHeight - 1]
def fnDisplay(candidate):
display(candidate, startTime, boardWidth, boardHeight)
def fnGetFitness(genes):
return get_fitness(genes, boardWidth, boardHeight)
def fnGetRandomPosition():
return random.choice(nonEdgePositions)
def fnMutate(genes):
mutate(genes, boardWidth, boardHeight, allPositions, nonEdgePositions)
def fnCreate():
return create(fnGetRandomPosition, expectedKnights)
optimalFitness = boardWidth * boardHeight
best = genetic.get_best(fnGetFitness, None, optimalFitness, None, fnDisplay, fnMutate, fnCreate)
self.assertTrue(not optimalFitness > best.Fitness) |
# v2.0
from toolbox import *
import sklearn.model_selection
import sklearn.metrics
import wandb
import pickle as pkl
import re
import seaborn as sns
#########################
### ###
### LOAD DATA ###
### ###
#########################
def load_goldStandard(args, out, whichSet, verbose=True, forceKeepAll=False):
assert out in ['IDdict' ,'enrichedDF']
assert args.filtering_goldStandard in ['all' ,'filtered']
# assert (args.removeHubs_goldStandard == 'keepAll')|(RepresentsInt(args.removeHubs_goldStandard))
assert set(whichSet) <= set([args.label_train , args.label_test])
# Load help files
cfg = load_cfg(path2dir='../..')
logVersions = load_LogVersions('../..')
# assert args.which_goldStandard in ['main', 'otherGoldStandard_controlledOverlap']
if args.species == 'human':
if args.which_goldStandard == 'main':
pref = f"goldStandard_v{logVersions["goldStandard"]}"
else:
bar = re.split("[_-]", args.which_goldStandard)
assert len(bar) in [1,2]
if len(bar) == 1:
pref = f"{args.which_goldStandard}_v{logVersions["otherGoldStandard"][bar[0]]}"
else:
assert bar[0] == 'otherGoldStandard'
pref = f"{args.which_goldStandard}_v{logVersions["otherGoldStandard"][bar[1]]}"
if out == 'enrichedDF':
foo = f"_similarityMeasure_v{logVersions["featuresEngineering"]["similarityMeasure"]}"
else:
foo = ''
else:
pref = f"{args.which_goldStandard}_v{logVersions["otherGoldStandard"][args.which_goldStandard]}"
if out == 'enrichedDF':
foo = f"_similarityMeasure_v{logVersions["featuresEngineering"][args.species]["similarityMeasure"]}"
else:
foo = ''
# Load data
with open(os.path.join(cfg['outputGoldStandard'],
f"{pref}{foo}.pkl"),
'rb') as f:
GS_dict = pkl.load(f)
# Select DataFrame of interest
if args.which_goldStandard == 'main':
if forceKeepAll:
GS = GS_dict[args.filtering_goldStandard]['keepAll']
else:
GS = GS_dict[args.filtering_goldStandard][args.removeHubs_goldStandard]
else:
GS = GS_dict
# GS = pd.read_pickle(
# os.path.join(
# cfg['outputGoldStandard'],
# "goldStandard_{}_v{}{}.pkl".format(
# args.filtering_goldStandard,
# logVersions['goldStandard'],
# foo
# )
# )
# )
# Add interactionID
GS['interactionID'] = GS.uniprotID_A + GS.uniprotID_B
# Only keep training set
GS_2 = GS.loc[GS.trainTest.isin(whichSet)].reset_index(drop=True)
if out == 'enrichedDF':
GS_2.drop(['uniprotID_A' ,'uniprotID_B'], axis=1, inplace=True)
GS_2.set_index('interactionID', inplace=True)
return GS_2, None, None
else:
### Create dict labels ###
dict_labels = pd.Series(GS_2.isInteraction.values, index=GS_2.interactionID).to_dict()
if verbose:
print("\n === dict_labels \n")
glance(dict_labels)
### Create dict mapping ###
dict_mappingID = dict(zip(GS_2.interactionID.values.tolist(),
GS_2.loc[:, ['uniprotID_A', 'uniprotID_B']].values.tolist()))
if verbose:
print("\n === dict_mappingID \n")
glance(dict_mappingID)
return (GS_2, dict_labels, dict_mappingID)
def load_data4prediction(nameDF, out):
'''
The df with IDs should be in cfg['outputPredictedPPI_IDs'] and has the columns ["uniprotID_A","uniprotID_B"]
With potentially "isInteraction" if we know the labels
'''
# Load help files
cfg = load_cfg(path2dir='../..')
# logVersions = load_LogVersions('../..')
# Load data
path_df = os.path.join(cfg['outputPredictedPPI_IDs'],
f"PPIs2predict_{nameDF}.pkl")
print(f"\nLoading predict data from: {path_df}")
df = pd.read_pickle(path_df)
print(f"Predicting on {len(df):,} samples\n")
# Add interactionID
df['interactionID'] = df.uniprotID_A + df.uniprotID_B
if out == 'enrichedDF':
mapping_df = df[['uniprotID_A' ,'uniprotID_B' ,'interactionID']].copy()
df.drop(['uniprotID_A' ,'uniprotID_B'], axis=1, inplace=True)
df.set_index('interactionID', inplace=True)
return df, None, mapping_df
else:
### Create dict labels (if needed) ###
if "isInteraction" in df.columns:
dict_labels = pd.Series(df.isInteraction.values, index=df.interactionID).to_dict()
### Create dict mapping ###
dict_mappingID = dict(zip(df.interactionID.values.tolist(),
df.loc[:, ['uniprotID_A', 'uniprotID_B']].values.tolist()))
return df, dict_labels, dict_mappingID
####################################
### ###
### SCIKIT-LEARN HELPERS ###
### ###
####################################
def trainTest_sklearn(pipe, X_train, y_train, X_test, y_test, verbose=True):
'''
Make predictions for binary classifier
:param pipe:
:param X_train:
:param y_train:
:param X_test:
:return:
'''
if verbose:
print('\tTraining...')
print(f'(on {len(X_train):,} samples)')
t1 = time.time()
pipe.fit(X_train, y_train)
if verbose:
print('\t({:.3f}s)'.format(time.time( ) -t1))
if verbose:
print('\tPredict...')
print(f'(on {len(X_test):,} samples)')
y_proba = pipe.predict_proba(X_test)[:, 1]
y_predict = pipe.predict(X_test)
return ({
"y_true": y_test,
"y_proba": y_proba,
"y_predict": y_predict,
"pipe": pipe
})
######################################
### ###
### X VALIDATION FUNCTIONS ###
### ###
######################################
def stratifiedXvalPartitions(
listIDs, listLabels,
n_splits, random_state,
IDorIndex,
verbose=True
):
assert len(listIDs) == len(listLabels)
skf = sklearn.model_selection.StratifiedKFold(n_splits = n_splits, shuffle=True, random_state=random_state)
# splits_idx = skf.split(np.zeros(len(idsGS)), idsGS[targetVar])
splits_idx = skf.split(np.zeros(len(listLabels)), listLabels)
cv_partition = []
for i, (train_index, val_index) in enumerate(splits_idx):
assert IDorIndex in ['ID' ,'index']
if IDorIndex == 'ID':
cv_partition.append({
'train': [listIDs[i] for i in train_index],
'validation': [listIDs[i] for i in val_index]
})
else:
cv_partition.append({
'train': train_index,
'validation': val_index
})
if verbose:
foo = [listLabels[i] for i in train_index]
bar = [listLabels[i] for i in val_index]
print('\tFold {}: {:,} positive (/ {:,}) in Train\t{:,} / {:,} + in Val'.format( i+1,
sum(foo),
len(foo),
sum(bar),
len(bar),
))
return cv_partition
def sampleFromXval(cv_partition, train_sampleSize, val_sampleSize, verbose=True):
cv_partition_sample = []
for partition in cv_partition:
cv_partition_sample.append({
'train': np.random.choice(partition['train'],
size=train_sampleSize,
replace=False),
'validation': np.random.choice(partition['validation'],
size=val_sampleSize,
replace=False),
})
if verbose:
print(len(cv_partition_sample[-1]['train']), len(cv_partition_sample[-1]['validation']))
return cv_partition_sample
def Xval_sklearn(cv, X, y, pipe, verbose=True):
yConcat_real = []
yConcat_proba = []
yConcat_predict = []
for i, partition in enumerate(cv):
if verbose:
print('- Fold ', i + 1)
outTT = trainTest_sklearn(pipe=pipe,
X_train=X.iloc[partition['train']],
y_train=y.iloc[partition['train']],
X_test=X.iloc[partition['validation']],
y_test=y.iloc[partition['validation']],
verbose=verbose
)
yConcat_real.append(outTT['y_true'])
yConcat_proba.append(outTT['y_proba'])
yConcat_predict.append(outTT['y_predict'])
yConcat_real2 = np.concatenate(yConcat_real)
yConcat_proba2 = np.concatenate(yConcat_proba)
yConcat_predict2 = np.concatenate(yConcat_predict)
return ({
'y_true': yConcat_real2,
'y_proba': yConcat_proba2,
'y_predict': yConcat_predict2,
'foldsResults': list(zip(yConcat_real, yConcat_proba, yConcat_predict))
})
#######################
### ###
### LOGGING ###
### ###
#######################
def wandb_logging(args, prc, roc, init=True, suffix="/validation"):
if init:
wandb.init(
project=args.wandb_project,
name=args.wandb_name,
dir=args.wandbLogs_dir,
tags=args.wandb_tags,
config=args
)
recall2, precision2 = sample_curve(x=prc['recall'], y=prc['precision'], sample_size=500)
for x, y in zip(recall2, precision2):
# FIXME: follow-up on wandb bug: https://github.com/wandb/client/issues/1507
wandb.log(
{f'Recall{suffix}': x, f'Precision{suffix}': y},
# commit=False,
)
fpr2, tpr2 = sample_curve(x=roc['fpr'], y=roc['tpr'], sample_size=500)
for x, y in zip(fpr2, tpr2):
wandb.log(
{f'FPR{suffix}': x, f'TPR{suffix}': y},
# commit=False,
)
if 'f1' in prc:
wandb.run.summary[f"F1{suffix}"] = prc['f1']
if 'ap' in prc:
wandb.run.summary[f"AP{suffix}"] = prc['ap']
###########################
### ###
### CURVE STUFF ###
### ###
###########################
def sample_curve(x, y, sample_size):
assert len(x) == len(y)
idx = np.linspace(0, len(x) - 1, dtype=int, num=min(len(x), sample_size))
x2 = [x[i] for i in idx]
y2 = [y[i] for i in idx]
return x2, y2
#########################################
### ###
### PRECISION RECAL FUNCTIONS ###
### ###
#########################################
def plotPRCs(prcList, myList=None):
sns.set(font_scale=1.3)
fig = plt.figure(figsize=(14, 8))
for algo, prc in prcList:
if myList is None:
goAhead = True
elif algo in myList:
goAhead = True
else:
goAhead = False
if goAhead:
try:
len(prc['thresholds'])
plt.plot(prc['recall'],
prc['precision'],
label='{} (f1= {:0.4f}, auc = {:0.2f}, ap = {:0.2f})'.format(algo,
prc['f1'], prc['auc'], prc['ap']),
lw=2)
except:
plt.plot(prc['recall'], prc['precision'],
marker='o', markersize=4, color="red", label='{} (f1= {:0.4f})'.format(algo, prc['f1']))
noSkill = 0.5
plt.plot([0, 1], [noSkill, noSkill],
linestyle='--', color=(0.6, 0.6, 0.6),
label='random guessing')
# plt.xlim([0.45, 1.05])
plt.ylim([0.4, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="upper right")
plt.title("Comparison PRC curves")
plt.tight_layout()
def precisionRecallCurve(y_test, y_predict, y_proba, fontScale=1,
figsize=(10, 6), doPlot=True, title=None, xlims=[0.45, 1.05]):
precision, recall, thresholds = sklearn.metrics.precision_recall_curve(y_test, y_proba)
prc_auc = sklearn.metrics.auc(recall, precision)
prc_f1 = sklearn.metrics.f1_score(y_test, y_predict)
prc_ap = sklearn.metrics.average_precision_score(y_test, y_proba)
# tn, fp, fn, tp = sklearn.metrix.confusion_matrix(y_test, y_predict).ravel()
# confusionMatrix = {'tn':tn,
# 'fp':fp,
# 'fn':fn,
# 'tp':tp
# }
df_confusion = pd.crosstab(pd.Series(y_test, name='Actual'),
pd.Series(y_predict, name='Predicted'))
df_conf_norm = df_confusion / df_confusion.sum(axis=1)
sampleIdx = list(map(int, np.linspace(0, len(thresholds) - 1, 20)))
prtSample = pd.DataFrame({'precision': precision[sampleIdx],
'recall': recall[sampleIdx],
'thresholds': thresholds[sampleIdx]})
sns.set(font_scale=fontScale)
fig = plt.figure(figsize=figsize)
plt.plot(recall,
precision,
label='PRC: auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f}'.format(prc_auc, prc_ap, prc_f1))
# no skill
noSkill = sum(y_test) / len(y_test)
plt.plot([0, 1], [noSkill, noSkill], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
# plt.xlim(xlims)
# plt.ylim([-0.05, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="lower left")
if title is not None:
plt.title(title)
else:
plt.title("Precision-Recall curve")
plt.tight_layout()
if doPlot:
plt.show()
return ({"precision": precision,
"recall": recall,
"thresholds": thresholds,
"prt": prtSample,
"auc": prc_auc,
"ap": prc_ap,
"f1": prc_f1,
"confusion": df_confusion,
"confusion_norm": df_conf_norm,
'plt': plt,
})
def cvPCR(output_Xval):
sns.set(font_scale=1)
fig = plt.figure(figsize=(14, 6))
for i, (labels, y_proba, y_predict) in enumerate(output_Xval['foldsResults']):
PRCfold = precisionRecallCurve(y_test=labels,
y_predict=y_predict,
y_proba=y_proba,
doPlot=False)
plt.plot(PRCfold['recall'],
PRCfold['precision'],
label='PRC fold {} (auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f})'.format(i + 1,
PRCfold['auc'],
PRCfold['ap'],
PRCfold['f1']))
PRC = precisionRecallCurve(y_test=output_Xval['labels'],
y_predict=output_Xval['y_predict'],
y_proba=output_Xval['y_proba'],
doPlot=False)
plt.plot(PRC['recall'],
PRC['precision'],
'k--',
label='mean PRC (auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f})'.format(PRC['auc'],
PRC['ap'],
PRC['f1']),
lw=2)
noSkill = .5
plt.plot([0, 1], [noSkill, noSkill], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="lower left")
plt.title("Precision-Recall curve")
plt.tight_layout()
plt.show()
# def cvPRC(X, Y, pipe, n_cv=3,
# fontScale=1, doPlot=True, cv=None,
# title=None, figsize=(14, 6),
# randomStateCV = 1
# ):
# if cv is None:
# cv = list(sklearn.model_selection.StratifiedKFold(n_splits=n_cv,
# random_state=randomStateCV).split(X, Y))
# if doPlot:
# sns.set(font_scale=fontScale)
# fig = plt.figure(figsize=figsize)
# yConcat_test = []
# yConcat_proba = []
# yConcat_predict = []
# for i, (train, test) in enumerate(cv):
# outTT = trainTest_sklearn(pipe=pipe,
# X_train=X[train],
# y_train=Y[train],
# X_test=X[test]
# )
# yConcat_test.append(Y[test])
# yConcat_proba.append(outTT['y_proba'])
# yConcat_predict.append(outTT['y_predict'])
# PRCfold = precisionRecallCurve(y_test=Y[test],
# y_predict=outTT['y_predict'],
# y_proba=outTT['y_proba'],
# doPlot=False)
# if doPlot:
# plt.plot(PRCfold['precision'],
# PRCfold['recall'],
# label='PRC fold {} (auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f})'.format(i+1,
# PRCfold['auc'],
# PRCfold['ap'],
# PRCfold['f1']))
# yConcat_test = np.concatenate(yConcat_test)
# yConcat_proba = np.concatenate(yConcat_proba)
# yConcat_predict = np.concatenate(yConcat_predict)
# PRC = precisionRecallCurve(y_test=yConcat_test,
# y_predict=yConcat_predict,
# y_proba=yConcat_proba,
# doPlot=False)
# sampleIdx = list(map(int, np.linspace(0, len(PRC['thresholds'])-1, 20)))
# prtSample = pd.DataFrame({'precision': PRC['precision'][sampleIdx],
# 'recall': PRC['recall'][sampleIdx],
# 'thresholds': PRC['thresholds'][sampleIdx]})
# if doPlot:
# plt.plot(PRC['precision'],
# PRC['recall'],
# 'k--',
# label='mean PRC (auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f})'.format(PRC['auc'],
# PRC['ap'],
# PRC['f1']),
# lw=2)
# # no skill
# noSkill = sum(Y) / len(Y)
# plt.plot([0, 1], [noSkill, noSkill], linestyle='--',
# color=(0.6, 0.6, 0.6),
# label='random guessing')
# plt.xlim([-0.05, 1.05])
# plt.ylim([-0.05, 1.05])
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.legend(loc="lower left")
# if title is not None:
# plt.title(title)
# else:
# plt.title("Precision-Recall curve")
# plt.tight_layout()
# # plt.savefig('images/06_10.png', dpi=300)
# plt.show()
# return({"precision": PRC['precision'],
# "recall": PRC['recall'],
# "thresholds": PRC['thresholds'],
# "prt": prtSample,
# "auc": PRC['auc'],
# "ap": PRC['ap'],
# "f1": PRC['f1'],
# "confusion":PRC['confusion'],
# "confusion_norm":PRC['confusion_norm']
# })
def comparePRCs(X, Y, pipes, algorithms, n_cv=3, fontScale=1, title=None,
figsize=(7, 5), xlims=[-0.05, 1.05],
figExportPath=None
):
cv = list(sklearn.model_selection.StratifiedKFold(n_splits=n_cv,
random_state=1).split(X, Y))
sns.set(font_scale=fontScale)
fig = plt.figure(figsize=figsize)
for pipe, algo in zip(pipes, algorithms):
PRC = cvPRC(X=X, Y=Y, pipe=pipe,
doPlot=False, cv=cv)
plt.plot(PRC['precision'],
PRC['recall'],
label='{} (auc = {:0.3f}, ap = {:0.3f} , f1= {:0.3f})'.format(algo,
PRC['auc'], PRC['ap'], PRC['f1']),
lw=2)
# no skill
noSkill = sum(Y) / len(Y)
plt.plot([0, 1], [noSkill, noSkill],
linestyle='--', color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlim(xlims)
plt.ylim([-0.05, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="lower left")
if title is None:
plt.title("Comparison")
else:
plt.title(title)
plt.tight_layout()
if figExportPath is not None:
plt.savefig(figExportPath, dpi=2000)
plt.show()
#############################
### ###
### ROC FUNCTIONS ###
### ###
#############################
def receiverOperatingCurve(y_test, y_predict, y_proba,
fontScale=1, figsize=(10, 6), doPlot=True, title=None, xlims=[0.45, 1.05]):
fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_test, y_proba)
roc_auc = sklearn.metrics.roc_auc_score(y_test, y_proba)
roc_accuracy = sklearn.metrics.accuracy_score(y_test, y_predict)
sns.set(font_scale=fontScale)
fig = plt.figure(figsize=figsize)
plt.plot(fpr,
tpr,
label='ROC: auc = {:0.2f}, acc = {:0.2f}'.format(roc_auc, roc_accuracy))
# no skill
plt.plot([0, 1], [0, 1], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend(loc="lower right")
if title is not None:
plt.title(title)
else:
plt.title("ROC curve")
plt.tight_layout()
if doPlot:
plt.show()
return ({"fpr": fpr,
"tpr": tpr,
"thresholds": thresholds,
"auc": roc_auc,
"accuracy": roc_accuracy,
'plt': plt,
})
def plotROCs(rocList, myList=None):
sns.set(font_scale=1.3)
fig = plt.figure(figsize=(14, 8))
for algo, roc in rocList:
if myList is None:
goAhead = True
elif algo in myList:
goAhead = True
else:
goAhead = False
if goAhead:
try:
len(roc['thresholds'])
plt.plot(roc['fpr'],
roc['tpr'],
label='{}: auc = {:0.2f}, acc = {:0.2f}'.format(algo, roc['auc'], roc['accuracy']), lw=2)
except:
plt.plot(roc['fpr'], roc['tpr'],
marker='o', markersize=4, color="red",
label='{} (acc = {:0.4f})'.format(algo, roc['accuracy']))
plt.plot([0, 1], [0, 1], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend(loc="lower right")
plt.title("Comparison ROC curves")
plt.tight_layout()
def cvROC(output_Xval):
sns.set(font_scale=1)
fig = plt.figure(figsize=(14, 6))
for i, (labels, y_proba, y_predict) in enumerate(output_Xval['foldsResults']):
ROCfold = receiverOperatingCurve(y_test=labels,
y_predict=y_predict,
y_proba=y_proba,
doPlot=False)
plt.plot(ROCfold['fpr'],
ROCfold['tpr'],
label='ROC fold {} (auc = {:0.2f}, acc = {:0.2f})'.format(i + 1,
ROCfold['auc'],
ROCfold['accuracy']))
ROC = receiverOperatingCurve(y_test=output_Xval['labels'],
y_predict=output_Xval['y_predict'],
y_proba=output_Xval['y_proba'],
doPlot=False)
plt.plot(ROC['fpr'],
ROC['tpr'],
'k--',
label='mean ROC (auc = {:0.2f}, acc = {:0.2f})'.format(ROC['auc'],
ROC['accuracy']),
lw=2)
plt.plot([0, 1], [0, 1], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend(loc="lower right")
plt.title("ROC curve")
plt.tight_layout()
plt.show()
# def plotROC(X, Y, pipe, n_cv = 3, fontScale=1,doPlot=True, cv=None, figsize=(7,5)):
# if cv is None:
# cv = list(sklearn.model_selection.StratifiedKFold(n_splits=n_cv,
# random_state=1).split(X, Y))
# if doPlot:
# sns.set(font_scale=fontScale)
# fig = plt.figure(figsize=figsize)
# mean_tpr = 0.0
# mean_fpr = np.linspace(0, 1, 100)
# all_tpr = []
# for i, (train, test) in enumerate(cv):
# probas = pipe.fit(X[train],
# Y[train]).predict_proba(X[test])
# fpr, tpr, thresholds = sklearn.metrics.roc_curve(Y[test],
# probas[:, 1],
# pos_label=1)
# mean_tpr += interp(mean_fpr, fpr, tpr)
# mean_tpr[0] = 0.0
# roc_auc = sklearn.metrics.auc(fpr, tpr)
# if doPlot:
# plt.plot(fpr,
# tpr,
# label='ROC fold %d (area = %0.2f)'
# % (i+1, roc_auc))
# mean_tpr /= len(cv)
# mean_tpr[-1] = 1.0
# mean_auc = sklearn.metrics.auc(mean_fpr, mean_tpr)
# if doPlot:
# plt.plot(mean_fpr, mean_tpr, 'k--',
# label='mean ROC (area = %0.2f)' % mean_auc, lw=2)
# plt.plot([0, 0, 1],
# [0, 1, 1],
# linestyle=':',
# color='black',
# label='perfect performance')
# plt.plot([0, 1],
# [0, 1],
# linestyle='--',
# color=(0.6, 0.6, 0.6),
# label='random guessing')
# plt.xlim([-0.05, 1.05])
# plt.ylim([-0.05, 1.05])
# plt.xlabel('false positive rate')
# plt.ylabel('true positive rate')
# plt.legend(loc="lower right")
# plt.tight_layout()
# # plt.savefig('images/06_10.png', dpi=300)
# plt.show()
# return({"fpr":mean_fpr,
# "tpr": mean_tpr,
# "auc": mean_auc
# })
def compareROCs(X, Y, pipes, algorithms, n_cv=3, fontScale=1, figsize=(7, 5), exportPath=None):
cv = list(sklearn.model_selection.StratifiedKFold(n_splits=n_cv,
random_state=1).split(X, Y))
sns.set(font_scale=fontScale)
fig = plt.figure(figsize=figsize)
for pipe, algo in zip(pipes, algorithms):
ROC = plotROC(X=X, Y=Y, pipe=pipe, doPlot=False, cv=cv)
plt.plot(ROC['fpr'],
ROC['tpr'],
label='{} (auc = {:0.3f})'.format(algo, ROC['auc']),
lw=2)
plt.plot([0, 1],
[0, 1],
linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.plot([0, 0, 1],
[0, 1, 1],
linestyle=':',
color='black',
label='perfect performance')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.legend(loc="lower right")
plt.tight_layout()
if exportPath is not None:
plt.savefig(exportPath)
plt.show()
if __name__ == '__main__':
# foo = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# bar = [0, 0, 0, 0, 1, 1, 1]
# baar = stratifiedXvalPartitions(
# listIDs=foo,
# listLabels=bar,
# n_splits=3,
# random_state=42,
# IDorIndex='ID',
# verbose=True
# )
# print(baar)
# args = dotdict(dict(
# filtering_goldStandard='filtered',
# removeHubs_goldStandard='50'
# ))
# GS_2, dict_labels, dict_mappingID = load_goldStandard(
# args=args,
# out='IDdict', #['IDdict','enrichedDF']
# whichSet=['Xval'],
# verbose=True
# )
a, b, c = load_data4prediction(
nameDF='test1-filtered-10_v7-0-1',
out='IDdict'
)
print()
|
# v2.0
from toolbox import *
import sklearn.model_selection
import sklearn.metrics
import wandb
import pickle as pkl
import re
import seaborn as sns
#########################
### ###
### LOAD DATA ###
### ###
#########################
def load_goldStandard(args, out, whichSet, verbose=True, forceKeepAll=False):
assert out in ['IDdict' ,'enrichedDF']
assert args.filtering_goldStandard in ['all' ,'filtered']
# assert (args.removeHubs_goldStandard == 'keepAll')|(RepresentsInt(args.removeHubs_goldStandard))
assert set(whichSet) <= set([args.label_train , args.label_test])
# Load help files
cfg = load_cfg(path2dir='../..')
logVersions = load_LogVersions('../..')
# assert args.which_goldStandard in ['main', 'otherGoldStandard_controlledOverlap']
if args.species == 'human':
if args.which_goldStandard == 'main':
pref = f"goldStandard_v{logVersions['goldStandard']}"
else:
bar = re.split("[_-]", args.which_goldStandard)
assert len(bar) in [1,2]
if len(bar) == 1:
pref = f"{args.which_goldStandard}_v{logVersions['otherGoldStandard'][bar[0]]}"
else:
assert bar[0] == 'otherGoldStandard'
pref = f"{args.which_goldStandard}_v{logVersions['otherGoldStandard'][bar[1]]}"
if out == 'enrichedDF':
foo = f"_similarityMeasure_v{logVersions['featuresEngineering']['similarityMeasure']}"
else:
foo = ''
else:
pref = f"{args.which_goldStandard}_v{logVersions['otherGoldStandard'][args.which_goldStandard]}"
if out == 'enrichedDF':
foo = f"_similarityMeasure_v{logVersions['featuresEngineering'][args.species]['similarityMeasure']}"
else:
foo = ''
# Load data
with open(os.path.join(cfg['outputGoldStandard'],
f"{pref}{foo}.pkl"),
'rb') as f:
GS_dict = pkl.load(f)
# Select DataFrame of interest
if args.which_goldStandard == 'main':
if forceKeepAll:
GS = GS_dict[args.filtering_goldStandard]['keepAll']
else:
GS = GS_dict[args.filtering_goldStandard][args.removeHubs_goldStandard]
else:
GS = GS_dict
# GS = pd.read_pickle(
# os.path.join(
# cfg['outputGoldStandard'],
# "goldStandard_{}_v{}{}.pkl".format(
# args.filtering_goldStandard,
# logVersions['goldStandard'],
# foo
# )
# )
# )
# Add interactionID
GS['interactionID'] = GS.uniprotID_A + GS.uniprotID_B
# Only keep training set
GS_2 = GS.loc[GS.trainTest.isin(whichSet)].reset_index(drop=True)
if out == 'enrichedDF':
GS_2.drop(['uniprotID_A' ,'uniprotID_B'], axis=1, inplace=True)
GS_2.set_index('interactionID', inplace=True)
return GS_2, None, None
else:
### Create dict labels ###
dict_labels = pd.Series(GS_2.isInteraction.values, index=GS_2.interactionID).to_dict()
if verbose:
print("\n === dict_labels \n")
glance(dict_labels)
### Create dict mapping ###
dict_mappingID = dict(zip(GS_2.interactionID.values.tolist(),
GS_2.loc[:, ['uniprotID_A', 'uniprotID_B']].values.tolist()))
if verbose:
print("\n === dict_mappingID \n")
glance(dict_mappingID)
return (GS_2, dict_labels, dict_mappingID)
def load_data4prediction(nameDF, out):
'''
The df with IDs should be in cfg['outputPredictedPPI_IDs'] and has the columns ["uniprotID_A","uniprotID_B"]
With potentially "isInteraction" if we know the labels
'''
# Load help files
cfg = load_cfg(path2dir='../..')
# logVersions = load_LogVersions('../..')
# Load data
path_df = os.path.join(cfg['outputPredictedPPI_IDs'],
f"PPIs2predict_{nameDF}.pkl")
print(f"\nLoading predict data from: {path_df}")
df = pd.read_pickle(path_df)
print(f"Predicting on {len(df):,} samples\n")
# Add interactionID
df['interactionID'] = df.uniprotID_A + df.uniprotID_B
if out == 'enrichedDF':
mapping_df = df[['uniprotID_A' ,'uniprotID_B' ,'interactionID']].copy()
df.drop(['uniprotID_A' ,'uniprotID_B'], axis=1, inplace=True)
df.set_index('interactionID', inplace=True)
return df, None, mapping_df
else:
### Create dict labels (if needed) ###
if "isInteraction" in df.columns:
dict_labels = pd.Series(df.isInteraction.values, index=df.interactionID).to_dict()
### Create dict mapping ###
dict_mappingID = dict(zip(df.interactionID.values.tolist(),
df.loc[:, ['uniprotID_A', 'uniprotID_B']].values.tolist()))
return df, dict_labels, dict_mappingID
####################################
### ###
### SCIKIT-LEARN HELPERS ###
### ###
####################################
def trainTest_sklearn(pipe, X_train, y_train, X_test, y_test, verbose=True):
'''
Make predictions for binary classifier
:param pipe:
:param X_train:
:param y_train:
:param X_test:
:return:
'''
if verbose:
print('\tTraining...')
print(f'(on {len(X_train):,} samples)')
t1 = time.time()
pipe.fit(X_train, y_train)
if verbose:
print('\t({:.3f}s)'.format(time.time( ) -t1))
if verbose:
print('\tPredict...')
print(f'(on {len(X_test):,} samples)')
y_proba = pipe.predict_proba(X_test)[:, 1]
y_predict = pipe.predict(X_test)
return ({
"y_true": y_test,
"y_proba": y_proba,
"y_predict": y_predict,
"pipe": pipe
})
######################################
### ###
### X VALIDATION FUNCTIONS ###
### ###
######################################
def stratifiedXvalPartitions(
listIDs, listLabels,
n_splits, random_state,
IDorIndex,
verbose=True
):
assert len(listIDs) == len(listLabels)
skf = sklearn.model_selection.StratifiedKFold(n_splits = n_splits, shuffle=True, random_state=random_state)
# splits_idx = skf.split(np.zeros(len(idsGS)), idsGS[targetVar])
splits_idx = skf.split(np.zeros(len(listLabels)), listLabels)
cv_partition = []
for i, (train_index, val_index) in enumerate(splits_idx):
assert IDorIndex in ['ID' ,'index']
if IDorIndex == 'ID':
cv_partition.append({
'train': [listIDs[i] for i in train_index],
'validation': [listIDs[i] for i in val_index]
})
else:
cv_partition.append({
'train': train_index,
'validation': val_index
})
if verbose:
foo = [listLabels[i] for i in train_index]
bar = [listLabels[i] for i in val_index]
print('\tFold {}: {:,} positive (/ {:,}) in Train\t{:,} / {:,} + in Val'.format( i+1,
sum(foo),
len(foo),
sum(bar),
len(bar),
))
return cv_partition
def sampleFromXval(cv_partition, train_sampleSize, val_sampleSize, verbose=True):
cv_partition_sample = []
for partition in cv_partition:
cv_partition_sample.append({
'train': np.random.choice(partition['train'],
size=train_sampleSize,
replace=False),
'validation': np.random.choice(partition['validation'],
size=val_sampleSize,
replace=False),
})
if verbose:
print(len(cv_partition_sample[-1]['train']), len(cv_partition_sample[-1]['validation']))
return cv_partition_sample
def Xval_sklearn(cv, X, y, pipe, verbose=True):
yConcat_real = []
yConcat_proba = []
yConcat_predict = []
for i, partition in enumerate(cv):
if verbose:
print('- Fold ', i + 1)
outTT = trainTest_sklearn(pipe=pipe,
X_train=X.iloc[partition['train']],
y_train=y.iloc[partition['train']],
X_test=X.iloc[partition['validation']],
y_test=y.iloc[partition['validation']],
verbose=verbose
)
yConcat_real.append(outTT['y_true'])
yConcat_proba.append(outTT['y_proba'])
yConcat_predict.append(outTT['y_predict'])
yConcat_real2 = np.concatenate(yConcat_real)
yConcat_proba2 = np.concatenate(yConcat_proba)
yConcat_predict2 = np.concatenate(yConcat_predict)
return ({
'y_true': yConcat_real2,
'y_proba': yConcat_proba2,
'y_predict': yConcat_predict2,
'foldsResults': list(zip(yConcat_real, yConcat_proba, yConcat_predict))
})
#######################
### ###
### LOGGING ###
### ###
#######################
def wandb_logging(args, prc, roc, init=True, suffix="/validation"):
if init:
wandb.init(
project=args.wandb_project,
name=args.wandb_name,
dir=args.wandbLogs_dir,
tags=args.wandb_tags,
config=args
)
recall2, precision2 = sample_curve(x=prc['recall'], y=prc['precision'], sample_size=500)
for x, y in zip(recall2, precision2):
# FIXME: follow-up on wandb bug: https://github.com/wandb/client/issues/1507
wandb.log(
{f'Recall{suffix}': x, f'Precision{suffix}': y},
# commit=False,
)
fpr2, tpr2 = sample_curve(x=roc['fpr'], y=roc['tpr'], sample_size=500)
for x, y in zip(fpr2, tpr2):
wandb.log(
{f'FPR{suffix}': x, f'TPR{suffix}': y},
# commit=False,
)
if 'f1' in prc:
wandb.run.summary[f"F1{suffix}"] = prc['f1']
if 'ap' in prc:
wandb.run.summary[f"AP{suffix}"] = prc['ap']
###########################
### ###
### CURVE STUFF ###
### ###
###########################
def sample_curve(x, y, sample_size):
assert len(x) == len(y)
idx = np.linspace(0, len(x) - 1, dtype=int, num=min(len(x), sample_size))
x2 = [x[i] for i in idx]
y2 = [y[i] for i in idx]
return x2, y2
#########################################
### ###
### PRECISION RECAL FUNCTIONS ###
### ###
#########################################
def plotPRCs(prcList, myList=None):
sns.set(font_scale=1.3)
fig = plt.figure(figsize=(14, 8))
for algo, prc in prcList:
if myList is None:
goAhead = True
elif algo in myList:
goAhead = True
else:
goAhead = False
if goAhead:
try:
len(prc['thresholds'])
plt.plot(prc['recall'],
prc['precision'],
label='{} (f1= {:0.4f}, auc = {:0.2f}, ap = {:0.2f})'.format(algo,
prc['f1'], prc['auc'], prc['ap']),
lw=2)
except:
plt.plot(prc['recall'], prc['precision'],
marker='o', markersize=4, color="red", label='{} (f1= {:0.4f})'.format(algo, prc['f1']))
noSkill = 0.5
plt.plot([0, 1], [noSkill, noSkill],
linestyle='--', color=(0.6, 0.6, 0.6),
label='random guessing')
# plt.xlim([0.45, 1.05])
plt.ylim([0.4, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="upper right")
plt.title("Comparison PRC curves")
plt.tight_layout()
def precisionRecallCurve(y_test, y_predict, y_proba, fontScale=1,
figsize=(10, 6), doPlot=True, title=None, xlims=[0.45, 1.05]):
precision, recall, thresholds = sklearn.metrics.precision_recall_curve(y_test, y_proba)
prc_auc = sklearn.metrics.auc(recall, precision)
prc_f1 = sklearn.metrics.f1_score(y_test, y_predict)
prc_ap = sklearn.metrics.average_precision_score(y_test, y_proba)
# tn, fp, fn, tp = sklearn.metrix.confusion_matrix(y_test, y_predict).ravel()
# confusionMatrix = {'tn':tn,
# 'fp':fp,
# 'fn':fn,
# 'tp':tp
# }
df_confusion = pd.crosstab(pd.Series(y_test, name='Actual'),
pd.Series(y_predict, name='Predicted'))
df_conf_norm = df_confusion / df_confusion.sum(axis=1)
sampleIdx = list(map(int, np.linspace(0, len(thresholds) - 1, 20)))
prtSample = pd.DataFrame({'precision': precision[sampleIdx],
'recall': recall[sampleIdx],
'thresholds': thresholds[sampleIdx]})
sns.set(font_scale=fontScale)
fig = plt.figure(figsize=figsize)
plt.plot(recall,
precision,
label='PRC: auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f}'.format(prc_auc, prc_ap, prc_f1))
# no skill
noSkill = sum(y_test) / len(y_test)
plt.plot([0, 1], [noSkill, noSkill], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
# plt.xlim(xlims)
# plt.ylim([-0.05, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="lower left")
if title is not None:
plt.title(title)
else:
plt.title("Precision-Recall curve")
plt.tight_layout()
if doPlot:
plt.show()
return ({"precision": precision,
"recall": recall,
"thresholds": thresholds,
"prt": prtSample,
"auc": prc_auc,
"ap": prc_ap,
"f1": prc_f1,
"confusion": df_confusion,
"confusion_norm": df_conf_norm,
'plt': plt,
})
def cvPCR(output_Xval):
sns.set(font_scale=1)
fig = plt.figure(figsize=(14, 6))
for i, (labels, y_proba, y_predict) in enumerate(output_Xval['foldsResults']):
PRCfold = precisionRecallCurve(y_test=labels,
y_predict=y_predict,
y_proba=y_proba,
doPlot=False)
plt.plot(PRCfold['recall'],
PRCfold['precision'],
label='PRC fold {} (auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f})'.format(i + 1,
PRCfold['auc'],
PRCfold['ap'],
PRCfold['f1']))
PRC = precisionRecallCurve(y_test=output_Xval['labels'],
y_predict=output_Xval['y_predict'],
y_proba=output_Xval['y_proba'],
doPlot=False)
plt.plot(PRC['recall'],
PRC['precision'],
'k--',
label='mean PRC (auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f})'.format(PRC['auc'],
PRC['ap'],
PRC['f1']),
lw=2)
noSkill = .5
plt.plot([0, 1], [noSkill, noSkill], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="lower left")
plt.title("Precision-Recall curve")
plt.tight_layout()
plt.show()
# def cvPRC(X, Y, pipe, n_cv=3,
# fontScale=1, doPlot=True, cv=None,
# title=None, figsize=(14, 6),
# randomStateCV = 1
# ):
# if cv is None:
# cv = list(sklearn.model_selection.StratifiedKFold(n_splits=n_cv,
# random_state=randomStateCV).split(X, Y))
# if doPlot:
# sns.set(font_scale=fontScale)
# fig = plt.figure(figsize=figsize)
# yConcat_test = []
# yConcat_proba = []
# yConcat_predict = []
# for i, (train, test) in enumerate(cv):
# outTT = trainTest_sklearn(pipe=pipe,
# X_train=X[train],
# y_train=Y[train],
# X_test=X[test]
# )
# yConcat_test.append(Y[test])
# yConcat_proba.append(outTT['y_proba'])
# yConcat_predict.append(outTT['y_predict'])
# PRCfold = precisionRecallCurve(y_test=Y[test],
# y_predict=outTT['y_predict'],
# y_proba=outTT['y_proba'],
# doPlot=False)
# if doPlot:
# plt.plot(PRCfold['precision'],
# PRCfold['recall'],
# label='PRC fold {} (auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f})'.format(i+1,
# PRCfold['auc'],
# PRCfold['ap'],
# PRCfold['f1']))
# yConcat_test = np.concatenate(yConcat_test)
# yConcat_proba = np.concatenate(yConcat_proba)
# yConcat_predict = np.concatenate(yConcat_predict)
# PRC = precisionRecallCurve(y_test=yConcat_test,
# y_predict=yConcat_predict,
# y_proba=yConcat_proba,
# doPlot=False)
# sampleIdx = list(map(int, np.linspace(0, len(PRC['thresholds'])-1, 20)))
# prtSample = pd.DataFrame({'precision': PRC['precision'][sampleIdx],
# 'recall': PRC['recall'][sampleIdx],
# 'thresholds': PRC['thresholds'][sampleIdx]})
# if doPlot:
# plt.plot(PRC['precision'],
# PRC['recall'],
# 'k--',
# label='mean PRC (auc = {:0.2f}, ap = {:0.2f}, f1= {:0.2f})'.format(PRC['auc'],
# PRC['ap'],
# PRC['f1']),
# lw=2)
# # no skill
# noSkill = sum(Y) / len(Y)
# plt.plot([0, 1], [noSkill, noSkill], linestyle='--',
# color=(0.6, 0.6, 0.6),
# label='random guessing')
# plt.xlim([-0.05, 1.05])
# plt.ylim([-0.05, 1.05])
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.legend(loc="lower left")
# if title is not None:
# plt.title(title)
# else:
# plt.title("Precision-Recall curve")
# plt.tight_layout()
# # plt.savefig('images/06_10.png', dpi=300)
# plt.show()
# return({"precision": PRC['precision'],
# "recall": PRC['recall'],
# "thresholds": PRC['thresholds'],
# "prt": prtSample,
# "auc": PRC['auc'],
# "ap": PRC['ap'],
# "f1": PRC['f1'],
# "confusion":PRC['confusion'],
# "confusion_norm":PRC['confusion_norm']
# })
def comparePRCs(X, Y, pipes, algorithms, n_cv=3, fontScale=1, title=None,
figsize=(7, 5), xlims=[-0.05, 1.05],
figExportPath=None
):
cv = list(sklearn.model_selection.StratifiedKFold(n_splits=n_cv,
random_state=1).split(X, Y))
sns.set(font_scale=fontScale)
fig = plt.figure(figsize=figsize)
for pipe, algo in zip(pipes, algorithms):
PRC = cvPRC(X=X, Y=Y, pipe=pipe,
doPlot=False, cv=cv)
plt.plot(PRC['precision'],
PRC['recall'],
label='{} (auc = {:0.3f}, ap = {:0.3f} , f1= {:0.3f})'.format(algo,
PRC['auc'], PRC['ap'], PRC['f1']),
lw=2)
# no skill
noSkill = sum(Y) / len(Y)
plt.plot([0, 1], [noSkill, noSkill],
linestyle='--', color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlim(xlims)
plt.ylim([-0.05, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc="lower left")
if title is None:
plt.title("Comparison")
else:
plt.title(title)
plt.tight_layout()
if figExportPath is not None:
plt.savefig(figExportPath, dpi=2000)
plt.show()
#############################
### ###
### ROC FUNCTIONS ###
### ###
#############################
def receiverOperatingCurve(y_test, y_predict, y_proba,
fontScale=1, figsize=(10, 6), doPlot=True, title=None, xlims=[0.45, 1.05]):
fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_test, y_proba)
roc_auc = sklearn.metrics.roc_auc_score(y_test, y_proba)
roc_accuracy = sklearn.metrics.accuracy_score(y_test, y_predict)
sns.set(font_scale=fontScale)
fig = plt.figure(figsize=figsize)
plt.plot(fpr,
tpr,
label='ROC: auc = {:0.2f}, acc = {:0.2f}'.format(roc_auc, roc_accuracy))
# no skill
plt.plot([0, 1], [0, 1], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend(loc="lower right")
if title is not None:
plt.title(title)
else:
plt.title("ROC curve")
plt.tight_layout()
if doPlot:
plt.show()
return ({"fpr": fpr,
"tpr": tpr,
"thresholds": thresholds,
"auc": roc_auc,
"accuracy": roc_accuracy,
'plt': plt,
})
def plotROCs(rocList, myList=None):
sns.set(font_scale=1.3)
fig = plt.figure(figsize=(14, 8))
for algo, roc in rocList:
if myList is None:
goAhead = True
elif algo in myList:
goAhead = True
else:
goAhead = False
if goAhead:
try:
len(roc['thresholds'])
plt.plot(roc['fpr'],
roc['tpr'],
label='{}: auc = {:0.2f}, acc = {:0.2f}'.format(algo, roc['auc'], roc['accuracy']), lw=2)
except:
plt.plot(roc['fpr'], roc['tpr'],
marker='o', markersize=4, color="red",
label='{} (acc = {:0.4f})'.format(algo, roc['accuracy']))
plt.plot([0, 1], [0, 1], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend(loc="lower right")
plt.title("Comparison ROC curves")
plt.tight_layout()
def cvROC(output_Xval):
sns.set(font_scale=1)
fig = plt.figure(figsize=(14, 6))
for i, (labels, y_proba, y_predict) in enumerate(output_Xval['foldsResults']):
ROCfold = receiverOperatingCurve(y_test=labels,
y_predict=y_predict,
y_proba=y_proba,
doPlot=False)
plt.plot(ROCfold['fpr'],
ROCfold['tpr'],
label='ROC fold {} (auc = {:0.2f}, acc = {:0.2f})'.format(i + 1,
ROCfold['auc'],
ROCfold['accuracy']))
ROC = receiverOperatingCurve(y_test=output_Xval['labels'],
y_predict=output_Xval['y_predict'],
y_proba=output_Xval['y_proba'],
doPlot=False)
plt.plot(ROC['fpr'],
ROC['tpr'],
'k--',
label='mean ROC (auc = {:0.2f}, acc = {:0.2f})'.format(ROC['auc'],
ROC['accuracy']),
lw=2)
plt.plot([0, 1], [0, 1], linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend(loc="lower right")
plt.title("ROC curve")
plt.tight_layout()
plt.show()
# def plotROC(X, Y, pipe, n_cv = 3, fontScale=1,doPlot=True, cv=None, figsize=(7,5)):
# if cv is None:
# cv = list(sklearn.model_selection.StratifiedKFold(n_splits=n_cv,
# random_state=1).split(X, Y))
# if doPlot:
# sns.set(font_scale=fontScale)
# fig = plt.figure(figsize=figsize)
# mean_tpr = 0.0
# mean_fpr = np.linspace(0, 1, 100)
# all_tpr = []
# for i, (train, test) in enumerate(cv):
# probas = pipe.fit(X[train],
# Y[train]).predict_proba(X[test])
# fpr, tpr, thresholds = sklearn.metrics.roc_curve(Y[test],
# probas[:, 1],
# pos_label=1)
# mean_tpr += interp(mean_fpr, fpr, tpr)
# mean_tpr[0] = 0.0
# roc_auc = sklearn.metrics.auc(fpr, tpr)
# if doPlot:
# plt.plot(fpr,
# tpr,
# label='ROC fold %d (area = %0.2f)'
# % (i+1, roc_auc))
# mean_tpr /= len(cv)
# mean_tpr[-1] = 1.0
# mean_auc = sklearn.metrics.auc(mean_fpr, mean_tpr)
# if doPlot:
# plt.plot(mean_fpr, mean_tpr, 'k--',
# label='mean ROC (area = %0.2f)' % mean_auc, lw=2)
# plt.plot([0, 0, 1],
# [0, 1, 1],
# linestyle=':',
# color='black',
# label='perfect performance')
# plt.plot([0, 1],
# [0, 1],
# linestyle='--',
# color=(0.6, 0.6, 0.6),
# label='random guessing')
# plt.xlim([-0.05, 1.05])
# plt.ylim([-0.05, 1.05])
# plt.xlabel('false positive rate')
# plt.ylabel('true positive rate')
# plt.legend(loc="lower right")
# plt.tight_layout()
# # plt.savefig('images/06_10.png', dpi=300)
# plt.show()
# return({"fpr":mean_fpr,
# "tpr": mean_tpr,
# "auc": mean_auc
# })
def compareROCs(X, Y, pipes, algorithms, n_cv=3, fontScale=1, figsize=(7, 5), exportPath=None):
cv = list(sklearn.model_selection.StratifiedKFold(n_splits=n_cv,
random_state=1).split(X, Y))
sns.set(font_scale=fontScale)
fig = plt.figure(figsize=figsize)
for pipe, algo in zip(pipes, algorithms):
ROC = plotROC(X=X, Y=Y, pipe=pipe, doPlot=False, cv=cv)
plt.plot(ROC['fpr'],
ROC['tpr'],
label='{} (auc = {:0.3f})'.format(algo, ROC['auc']),
lw=2)
plt.plot([0, 1],
[0, 1],
linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
plt.plot([0, 0, 1],
[0, 1, 1],
linestyle=':',
color='black',
label='perfect performance')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.legend(loc="lower right")
plt.tight_layout()
if exportPath is not None:
plt.savefig(exportPath)
plt.show()
if __name__ == '__main__':
# foo = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# bar = [0, 0, 0, 0, 1, 1, 1]
# baar = stratifiedXvalPartitions(
# listIDs=foo,
# listLabels=bar,
# n_splits=3,
# random_state=42,
# IDorIndex='ID',
# verbose=True
# )
# print(baar)
# args = dotdict(dict(
# filtering_goldStandard='filtered',
# removeHubs_goldStandard='50'
# ))
# GS_2, dict_labels, dict_mappingID = load_goldStandard(
# args=args,
# out='IDdict', #['IDdict','enrichedDF']
# whichSet=['Xval'],
# verbose=True
# )
a, b, c = load_data4prediction(
nameDF='test1-filtered-10_v7-0-1',
out='IDdict'
)
print()
|
import inspect
import pprint
import re
from typing import TYPE_CHECKING
import lale.helpers
if TYPE_CHECKING:
from lale.operators import IndividualOp
def _indent(prefix, string, first_prefix=None):
lines = string.splitlines()
if lines:
if first_prefix is None:
first_prefix = prefix
first_indented = (first_prefix + lines[0]).rstrip()
rest_indented = [(prefix + line).rstrip() for line in lines[1:]]
result = first_indented + "\n" + "\n".join(rest_indented)
return result
else:
return ""
def _value_docstring(value):
return pprint.pformat(value, width=10000, compact=True)
def _kind_tag(schema):
if "anyOf" in schema:
return "union type"
elif "allOf" in schema:
return "intersection type"
elif "not" in schema or "laleNot" in schema:
return "negated type"
elif "type" in schema:
if schema["type"] == "object":
return "dict"
elif schema["type"] == "number":
return "float"
elif isinstance(schema["type"], list):
return " *or* ".join(schema["type"])
else:
return schema["type"]
elif "enum" in schema:
values = schema["enum"]
assert len(values) >= 1
if len(values) == 1:
return _value_docstring(values[0])
elif len(values) == 2:
return " *or* ".join([_value_docstring(v) for v in values])
else:
prefix = ", ".join([_value_docstring(v) for v in values[:-1]])
suffix = ", *or* " + _value_docstring(values[-1])
return prefix + suffix
else:
return "any type"
def _schema_docstring(name, schema, required=True, relevant=True):
tags = []
if "laleType" in schema:
tags.append(schema["laleType"])
else:
tags.append(_kind_tag(schema))
if "minimum" in schema:
op = ">" if schema.get("exclusiveMinimum", False) else ">="
tags.append(op + _value_docstring(schema["minimum"]))
if "minimumForOptimizer" in schema:
tags.append(
">=" + _value_docstring(schema["minimumForOptimizer"]) + " for optimizer"
)
if "maximum" in schema:
op = "<" if schema.get("exclusiveMaximum", False) else "<="
tags.append(op + _value_docstring(schema["maximum"]))
if "laleMaximum" in schema:
tags.append("<=" + _value_docstring(schema["laleMaximum"]))
if "maximumForOptimizer" in schema:
tags.append(
"<=" + _value_docstring(schema["maximumForOptimizer"]) + " for optimizer"
)
if "distribution" in schema:
tags.append(schema["distribution"] + " distribution")
if "minItems" in schema:
tags.append(">=" + _value_docstring(schema["minItems"]) + " items")
if "minItemsForOptimizer" in schema:
tags.append(
">="
+ _value_docstring(schema["minItemsForOptimizer"])
+ " items for optimizer"
)
if "maxItems" in schema:
tags.append("<=" + _value_docstring(schema["maxItems"]) + " items")
if "maxItemsForOptimizer" in schema:
tags.append(
"<="
+ _value_docstring(schema["maxItemsForOptimizer"])
+ " items for optimizer"
)
if not required:
tags.append("optional")
if not relevant or schema.get("forOptimizer", True) is False:
tags.append("not for optimizer")
if schema.get("transient", False):
tags.append("transient")
if "default" in schema:
tags.append("default " + _value_docstring(schema["default"]))
def item_docstring(name, item_schema, required=True):
sd = _schema_docstring(name, item_schema, required=required)
return _indent(" ", sd, " - ").rstrip()
body = None
if "anyOf" in schema:
item_docstrings = [item_docstring(None, s) for s in schema["anyOf"]]
if name is not None and name.startswith("_`constraint-"):
rexp = re.compile(r"^( - )(dict \*of\* )(.+)")
item_docstrings = [rexp.sub(r"\1\3", s) for s in item_docstrings]
if len(item_docstrings) > 1:
rexp = re.compile(r"^( - )(.+)")
rest = [rexp.sub(r"\1*or* \2", s) for s in item_docstrings[1:]]
item_docstrings = item_docstrings[:1] + rest
body = "\n\n".join(item_docstrings)
elif "allOf" in schema:
item_docstrings = [item_docstring(None, s) for s in schema["allOf"]]
if len(item_docstrings) > 1:
rexp = re.compile(r"^( - )(.+)")
rest = [rexp.sub(r"\1*and* \2", s) for s in item_docstrings[1:]]
item_docstrings = item_docstrings[:1] + rest
body = "\n\n".join(item_docstrings)
elif "not" in schema:
body = item_docstring(None, schema["not"])
elif "laleNot" in schema:
body = f" - '{schema["laleNot"]}'"
elif schema.get("type", "") == "array":
if "items" in schema:
items_schemas = schema["items"]
if isinstance(items_schemas, dict):
body = item_docstring("items", items_schemas)
else:
items_docstrings = [
item_docstring(f"item {i}", s) for i, s in enumerate(items_schemas)
]
body = "\n\n".join(items_docstrings)
elif schema.get("type", "") == "object" and "properties" in schema:
item_docstrings = [
item_docstring(k, s) for k, s in schema["properties"].items()
]
body = "\n\n".join(item_docstrings)
result = name + " : " if name else ""
try:
result += ", ".join(tags)
except BaseException as e:
raise ValueError(f"Unexpected internal error for {schema}.") from e
assert len(result) > 0 and result.rstrip() == result
if result.startswith("-"):
result = "\\" + result
if body is not None and body.find("\n") == -1:
assert body.startswith(" - ")
result += " *of* " + body[4:]
if "description" in schema:
result += "\n\n" + _indent(" ", schema["description"]).rstrip()
if body is not None and body.find("\n") != -1:
result += "\n\n" + body
return result.rstrip()
def _params_docstring(params_schema, hp2constraints=None):
if params_schema is None:
return ""
params = params_schema.get("properties", {})
if len(params) == 0:
result = ""
else:
result = "Parameters\n----------\n"
for param_name, param_schema in params.items():
required = param_name in params_schema.get("required", {})
relevant = (
"relevantToOptimizer" not in params_schema
or param_name in params_schema["relevantToOptimizer"]
)
item_docstring = _schema_docstring(param_name, param_schema, required, relevant)
result += _indent(" ", item_docstring, "").rstrip()
if hp2constraints is not None and param_name in hp2constraints:
constraints = [f"`constraint-{i}`_" for i in hp2constraints[param_name]]
result += f"\n\n See also {", ".join(constraints)}."
result += "\n\n"
return result
def _arg_docstring(val):
if val is None:
return str("None")
if isinstance(val, (int, float)):
return str(val)
elif isinstance(val, list):
return [_arg_docstring(x) for x in val]
elif isinstance(val, dict):
return {_arg_docstring(k): _arg_docstring(v) for k, v in val.items()}
else:
return f'"{str(val)}"'
def _paramlist_docstring(hyperparams_schema) -> str:
params = hyperparams_schema.get("allOf", None)
if params is None:
return ""
if isinstance(params, list):
if not params:
return ""
params = params[0]
if params is None:
return ""
params = params.get("properties", {})
if len(params) == 0:
return ""
result = ", *"
for param_name, param_schema in params.items():
result += f", {param_name}"
default = param_schema.get("default", None)
if "default" in param_schema:
default = param_schema["default"]
default_str = _arg_docstring(default)
if default_str is not None:
result += f"={default_str}"
return result
def _get_hp2constraints(hyperparams_schema):
result = {}
for i in range(1, len(hyperparams_schema["allOf"])):
schema = hyperparams_schema["allOf"][i]
for disjunct in schema.get("anyOf", []):
for hyperparam in disjunct.get("properties", {}).keys():
result[hyperparam] = result.get(hyperparam, []) + [i]
return result
def _hyperparams_docstring(hyperparams_schema):
hp2constraints = _get_hp2constraints(hyperparams_schema)
result = _params_docstring(hyperparams_schema["allOf"][0], hp2constraints)
if len(hyperparams_schema["allOf"]) > 1:
result += "Notes\n-----\n"
item_docstrings = [
_schema_docstring(f"_`constraint-{i}`", hyperparams_schema["allOf"][i])
for i in range(1, len(hyperparams_schema["allOf"]))
]
result += "\n\n".join(item_docstrings)
return result
def _method_docstring(description, ready_string, params_schema, result_schema=None):
result = description + "\n\n"
if ready_string is not None:
result += "*Note: " + ready_string + "*\n\n"
result += (
"Once this method is available, it will have the following signature: \n\n"
)
result += _params_docstring(params_schema)
if result_schema is not None:
result += "Returns\n-------\n"
item_docstring = _schema_docstring("result", result_schema)
result += _indent(" ", item_docstring, "")
result += "\n\n"
return result
def _cls_docstring(cls, combined_schemas):
descr_lines = combined_schemas["description"].splitlines()
result = descr_lines[0]
result += "\n\nThis documentation is auto-generated from JSON schemas.\n\n"
more_description = "\n".join(descr_lines[1:]).strip()
if more_description != "":
result += more_description + "\n\n"
return result
def _set_docstrings_helper(cls, lale_op, combined_schemas):
properties = combined_schemas.get("properties", None)
assert cls.__doc__ is None
impl_cls = lale_op.impl_class
cls.__doc__ = _cls_docstring(impl_cls, combined_schemas)
if properties is not None:
hyperparams_schema = properties.get("hyperparams", None)
if hyperparams_schema is not None:
doc = _hyperparams_docstring(hyperparams_schema)
try:
args = _paramlist_docstring(hyperparams_schema)
code = f"""
def __init__(self{args}):
pass
"""
import math
d = {}
exec(code, {"nan": math.nan, "inf": math.inf}, d)
__init__ = d["__init__"] # type: ignore
except BaseException as e:
import warnings
warnings.warn(
f"""While trying to generate a docstring for {cls.__name__}, when trying
to create an init method with the appropriate parameter list, an exception was raised: {e}"""
)
def __init__(self):
pass
__init__.__doc__ = doc
cls.__init__ = __init__
def make_fun(
fun_name,
fake_fun,
description,
ready_string,
params_schema_key,
result_schema_key=None,
):
params_schema = None
result_schema = None
if properties is not None:
if params_schema_key is not None:
params_schema = properties.get(params_schema_key, None)
if result_schema_key is not None:
result_schema = properties.get(result_schema_key, None)
if hasattr(impl_cls, fun_name):
ready_string_to_use = None
if not hasattr(cls, fun_name):
ready_string_to_use = ready_string
doc = _method_docstring(
description, ready_string_to_use, params_schema, result_schema
)
setattr(cls, fun_name, fake_fun)
fake_fun.__name__ = "fun_name"
fake_fun.__doc__ = doc
def fit(self, X, y=None, **fit_params):
pass
make_fun(
"fit",
fit,
"Train the operator.",
"The fit method is not available until this operator is trainable.",
"input_fit",
)
def transform(self, X, y=None):
pass
make_fun(
"transform",
transform,
"Transform the data.",
"The transform method is not available until this operator is trained.",
"input_transform",
"output_transform",
)
def predict(self, X):
pass
make_fun(
"predict",
predict,
"Make predictions.",
"The predict method is not available until this operator is trained.",
"input_predict",
"output_predict",
)
def predict_proba(self, X):
pass
make_fun(
"predict_proba",
predict_proba,
"Probability estimates for all classes.",
"The predict_proba method is not available until this operator is trained.",
"input_predict_proba",
"output_predict_proba",
)
def decision_function(self, X):
pass
make_fun(
"decision_function",
decision_function,
"Confidence scores for all classes.",
"The decision_function method is not available until this operator is trained.",
"input_decision_function",
"output_decision_function",
)
def set_docstrings(lale_op: "IndividualOp"):
"""
If we are running under sphinx, this will take
a variable whose value is a lale operator
and change it to a value of an artificial class
with appropriately documented methods.
"""
try:
if __sphinx_build__: # type: ignore
try:
# impl = lale_op.impl_class
frm = inspect.stack()[1]
module = inspect.getmodule(frm[0])
assert module is not None
combined_schemas = lale_op._schemas
name = lale.helpers.arg_name(pos=0, level=1)
assert name is not None
# we want to make sure that the Operator constructor args are not shown
def __init__():
pass
new_class = type(name, (lale_op.__class__,), {"__init__": __init__}) # type: ignore
new_class.__module__ = module.__name__
module.__dict__[name] = new_class
_set_docstrings_helper(new_class, lale_op, combined_schemas)
except NameError as e:
raise ValueError(e)
except NameError:
pass
| import inspect
import pprint
import re
from typing import TYPE_CHECKING
import lale.helpers
if TYPE_CHECKING:
from lale.operators import IndividualOp
def _indent(prefix, string, first_prefix=None):
lines = string.splitlines()
if lines:
if first_prefix is None:
first_prefix = prefix
first_indented = (first_prefix + lines[0]).rstrip()
rest_indented = [(prefix + line).rstrip() for line in lines[1:]]
result = first_indented + "\n" + "\n".join(rest_indented)
return result
else:
return ""
def _value_docstring(value):
return pprint.pformat(value, width=10000, compact=True)
def _kind_tag(schema):
if "anyOf" in schema:
return "union type"
elif "allOf" in schema:
return "intersection type"
elif "not" in schema or "laleNot" in schema:
return "negated type"
elif "type" in schema:
if schema["type"] == "object":
return "dict"
elif schema["type"] == "number":
return "float"
elif isinstance(schema["type"], list):
return " *or* ".join(schema["type"])
else:
return schema["type"]
elif "enum" in schema:
values = schema["enum"]
assert len(values) >= 1
if len(values) == 1:
return _value_docstring(values[0])
elif len(values) == 2:
return " *or* ".join([_value_docstring(v) for v in values])
else:
prefix = ", ".join([_value_docstring(v) for v in values[:-1]])
suffix = ", *or* " + _value_docstring(values[-1])
return prefix + suffix
else:
return "any type"
def _schema_docstring(name, schema, required=True, relevant=True):
tags = []
if "laleType" in schema:
tags.append(schema["laleType"])
else:
tags.append(_kind_tag(schema))
if "minimum" in schema:
op = ">" if schema.get("exclusiveMinimum", False) else ">="
tags.append(op + _value_docstring(schema["minimum"]))
if "minimumForOptimizer" in schema:
tags.append(
">=" + _value_docstring(schema["minimumForOptimizer"]) + " for optimizer"
)
if "maximum" in schema:
op = "<" if schema.get("exclusiveMaximum", False) else "<="
tags.append(op + _value_docstring(schema["maximum"]))
if "laleMaximum" in schema:
tags.append("<=" + _value_docstring(schema["laleMaximum"]))
if "maximumForOptimizer" in schema:
tags.append(
"<=" + _value_docstring(schema["maximumForOptimizer"]) + " for optimizer"
)
if "distribution" in schema:
tags.append(schema["distribution"] + " distribution")
if "minItems" in schema:
tags.append(">=" + _value_docstring(schema["minItems"]) + " items")
if "minItemsForOptimizer" in schema:
tags.append(
">="
+ _value_docstring(schema["minItemsForOptimizer"])
+ " items for optimizer"
)
if "maxItems" in schema:
tags.append("<=" + _value_docstring(schema["maxItems"]) + " items")
if "maxItemsForOptimizer" in schema:
tags.append(
"<="
+ _value_docstring(schema["maxItemsForOptimizer"])
+ " items for optimizer"
)
if not required:
tags.append("optional")
if not relevant or schema.get("forOptimizer", True) is False:
tags.append("not for optimizer")
if schema.get("transient", False):
tags.append("transient")
if "default" in schema:
tags.append("default " + _value_docstring(schema["default"]))
def item_docstring(name, item_schema, required=True):
sd = _schema_docstring(name, item_schema, required=required)
return _indent(" ", sd, " - ").rstrip()
body = None
if "anyOf" in schema:
item_docstrings = [item_docstring(None, s) for s in schema["anyOf"]]
if name is not None and name.startswith("_`constraint-"):
rexp = re.compile(r"^( - )(dict \*of\* )(.+)")
item_docstrings = [rexp.sub(r"\1\3", s) for s in item_docstrings]
if len(item_docstrings) > 1:
rexp = re.compile(r"^( - )(.+)")
rest = [rexp.sub(r"\1*or* \2", s) for s in item_docstrings[1:]]
item_docstrings = item_docstrings[:1] + rest
body = "\n\n".join(item_docstrings)
elif "allOf" in schema:
item_docstrings = [item_docstring(None, s) for s in schema["allOf"]]
if len(item_docstrings) > 1:
rexp = re.compile(r"^( - )(.+)")
rest = [rexp.sub(r"\1*and* \2", s) for s in item_docstrings[1:]]
item_docstrings = item_docstrings[:1] + rest
body = "\n\n".join(item_docstrings)
elif "not" in schema:
body = item_docstring(None, schema["not"])
elif "laleNot" in schema:
body = f" - '{schema['laleNot']}'"
elif schema.get("type", "") == "array":
if "items" in schema:
items_schemas = schema["items"]
if isinstance(items_schemas, dict):
body = item_docstring("items", items_schemas)
else:
items_docstrings = [
item_docstring(f"item {i}", s) for i, s in enumerate(items_schemas)
]
body = "\n\n".join(items_docstrings)
elif schema.get("type", "") == "object" and "properties" in schema:
item_docstrings = [
item_docstring(k, s) for k, s in schema["properties"].items()
]
body = "\n\n".join(item_docstrings)
result = name + " : " if name else ""
try:
result += ", ".join(tags)
except BaseException as e:
raise ValueError(f"Unexpected internal error for {schema}.") from e
assert len(result) > 0 and result.rstrip() == result
if result.startswith("-"):
result = "\\" + result
if body is not None and body.find("\n") == -1:
assert body.startswith(" - ")
result += " *of* " + body[4:]
if "description" in schema:
result += "\n\n" + _indent(" ", schema["description"]).rstrip()
if body is not None and body.find("\n") != -1:
result += "\n\n" + body
return result.rstrip()
def _params_docstring(params_schema, hp2constraints=None):
if params_schema is None:
return ""
params = params_schema.get("properties", {})
if len(params) == 0:
result = ""
else:
result = "Parameters\n----------\n"
for param_name, param_schema in params.items():
required = param_name in params_schema.get("required", {})
relevant = (
"relevantToOptimizer" not in params_schema
or param_name in params_schema["relevantToOptimizer"]
)
item_docstring = _schema_docstring(param_name, param_schema, required, relevant)
result += _indent(" ", item_docstring, "").rstrip()
if hp2constraints is not None and param_name in hp2constraints:
constraints = [f"`constraint-{i}`_" for i in hp2constraints[param_name]]
result += f"\n\n See also {', '.join(constraints)}."
result += "\n\n"
return result
def _arg_docstring(val):
if val is None:
return str("None")
if isinstance(val, (int, float)):
return str(val)
elif isinstance(val, list):
return [_arg_docstring(x) for x in val]
elif isinstance(val, dict):
return {_arg_docstring(k): _arg_docstring(v) for k, v in val.items()}
else:
return f'"{str(val)}"'
def _paramlist_docstring(hyperparams_schema) -> str:
params = hyperparams_schema.get("allOf", None)
if params is None:
return ""
if isinstance(params, list):
if not params:
return ""
params = params[0]
if params is None:
return ""
params = params.get("properties", {})
if len(params) == 0:
return ""
result = ", *"
for param_name, param_schema in params.items():
result += f", {param_name}"
default = param_schema.get("default", None)
if "default" in param_schema:
default = param_schema["default"]
default_str = _arg_docstring(default)
if default_str is not None:
result += f"={default_str}"
return result
def _get_hp2constraints(hyperparams_schema):
result = {}
for i in range(1, len(hyperparams_schema["allOf"])):
schema = hyperparams_schema["allOf"][i]
for disjunct in schema.get("anyOf", []):
for hyperparam in disjunct.get("properties", {}).keys():
result[hyperparam] = result.get(hyperparam, []) + [i]
return result
def _hyperparams_docstring(hyperparams_schema):
hp2constraints = _get_hp2constraints(hyperparams_schema)
result = _params_docstring(hyperparams_schema["allOf"][0], hp2constraints)
if len(hyperparams_schema["allOf"]) > 1:
result += "Notes\n-----\n"
item_docstrings = [
_schema_docstring(f"_`constraint-{i}`", hyperparams_schema["allOf"][i])
for i in range(1, len(hyperparams_schema["allOf"]))
]
result += "\n\n".join(item_docstrings)
return result
def _method_docstring(description, ready_string, params_schema, result_schema=None):
result = description + "\n\n"
if ready_string is not None:
result += "*Note: " + ready_string + "*\n\n"
result += (
"Once this method is available, it will have the following signature: \n\n"
)
result += _params_docstring(params_schema)
if result_schema is not None:
result += "Returns\n-------\n"
item_docstring = _schema_docstring("result", result_schema)
result += _indent(" ", item_docstring, "")
result += "\n\n"
return result
def _cls_docstring(cls, combined_schemas):
descr_lines = combined_schemas["description"].splitlines()
result = descr_lines[0]
result += "\n\nThis documentation is auto-generated from JSON schemas.\n\n"
more_description = "\n".join(descr_lines[1:]).strip()
if more_description != "":
result += more_description + "\n\n"
return result
def _set_docstrings_helper(cls, lale_op, combined_schemas):
properties = combined_schemas.get("properties", None)
assert cls.__doc__ is None
impl_cls = lale_op.impl_class
cls.__doc__ = _cls_docstring(impl_cls, combined_schemas)
if properties is not None:
hyperparams_schema = properties.get("hyperparams", None)
if hyperparams_schema is not None:
doc = _hyperparams_docstring(hyperparams_schema)
try:
args = _paramlist_docstring(hyperparams_schema)
code = f"""
def __init__(self{args}):
pass
"""
import math
d = {}
exec(code, {"nan": math.nan, "inf": math.inf}, d)
__init__ = d["__init__"] # type: ignore
except BaseException as e:
import warnings
warnings.warn(
f"""While trying to generate a docstring for {cls.__name__}, when trying
to create an init method with the appropriate parameter list, an exception was raised: {e}"""
)
def __init__(self):
pass
__init__.__doc__ = doc
cls.__init__ = __init__
def make_fun(
fun_name,
fake_fun,
description,
ready_string,
params_schema_key,
result_schema_key=None,
):
params_schema = None
result_schema = None
if properties is not None:
if params_schema_key is not None:
params_schema = properties.get(params_schema_key, None)
if result_schema_key is not None:
result_schema = properties.get(result_schema_key, None)
if hasattr(impl_cls, fun_name):
ready_string_to_use = None
if not hasattr(cls, fun_name):
ready_string_to_use = ready_string
doc = _method_docstring(
description, ready_string_to_use, params_schema, result_schema
)
setattr(cls, fun_name, fake_fun)
fake_fun.__name__ = "fun_name"
fake_fun.__doc__ = doc
def fit(self, X, y=None, **fit_params):
pass
make_fun(
"fit",
fit,
"Train the operator.",
"The fit method is not available until this operator is trainable.",
"input_fit",
)
def transform(self, X, y=None):
pass
make_fun(
"transform",
transform,
"Transform the data.",
"The transform method is not available until this operator is trained.",
"input_transform",
"output_transform",
)
def predict(self, X):
pass
make_fun(
"predict",
predict,
"Make predictions.",
"The predict method is not available until this operator is trained.",
"input_predict",
"output_predict",
)
def predict_proba(self, X):
pass
make_fun(
"predict_proba",
predict_proba,
"Probability estimates for all classes.",
"The predict_proba method is not available until this operator is trained.",
"input_predict_proba",
"output_predict_proba",
)
def decision_function(self, X):
pass
make_fun(
"decision_function",
decision_function,
"Confidence scores for all classes.",
"The decision_function method is not available until this operator is trained.",
"input_decision_function",
"output_decision_function",
)
def set_docstrings(lale_op: "IndividualOp"):
"""
If we are running under sphinx, this will take
a variable whose value is a lale operator
and change it to a value of an artificial class
with appropriately documented methods.
"""
try:
if __sphinx_build__: # type: ignore
try:
# impl = lale_op.impl_class
frm = inspect.stack()[1]
module = inspect.getmodule(frm[0])
assert module is not None
combined_schemas = lale_op._schemas
name = lale.helpers.arg_name(pos=0, level=1)
assert name is not None
# we want to make sure that the Operator constructor args are not shown
def __init__():
pass
new_class = type(name, (lale_op.__class__,), {"__init__": __init__}) # type: ignore
new_class.__module__ = module.__name__
module.__dict__[name] = new_class
_set_docstrings_helper(new_class, lale_op, combined_schemas)
except NameError as e:
raise ValueError(e)
except NameError:
pass
|
from telegram import ParseMode, ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import ConversationHandler
from pdf_bot.analytics import TaskType
from pdf_bot.consts import (
BACK,
BY_PERCENT,
TO_DIMENSIONS,
WAIT_SCALE_DIMENSION,
WAIT_SCALE_PERCENT,
WAIT_SCALE_TYPE,
)
from pdf_bot.files.utils import get_back_markup
from pdf_bot.language import set_lang
from pdf_bot.utils import process_pdf
def ask_scale_type(update, context):
_ = set_lang(update, context)
keyboard = [[_(BY_PERCENT), _(TO_DIMENSIONS)], [_(BACK)]]
reply_markup = ReplyKeyboardMarkup(
keyboard, one_time_keyboard=True, resize_keyboard=True
)
update.effective_message.reply_text(
_("Select the scale type that you'll like to perform"),
reply_markup=reply_markup,
)
return WAIT_SCALE_TYPE
def ask_scale_value(update, context, ask_percent=True):
_ = set_lang(update, context)
message = update.effective_message
reply_markup = get_back_markup(update, context)
if message.text == _(TO_DIMENSIONS) or not ask_percent:
message.reply_text(
"{desc_1}\n{desc_2}\n\n{desc_3}".format(
desc_1=_("Send me the width and height"),
desc_2=f"<b>{_("Example: 150 200")}</b>",
desc_3=_("This will set the width to 150 and height to 200"),
),
reply_markup=reply_markup,
parse_mode=ParseMode.HTML,
)
return WAIT_SCALE_DIMENSION
message.reply_text(
"{desc_1}\n{desc_2}\n\n{desc_3}".format(
desc_1=_(
"Send me the scaling factors for the horizontal and vertical axes"
),
desc_2=f"<b>{_("Example: 2 0.5")}</b>",
desc_3=_(
"This will double the horizontal axis and halve the vertical axis"
),
),
reply_markup=reply_markup,
parse_mode=ParseMode.HTML,
)
return WAIT_SCALE_PERCENT
def check_scale_percent(update, context):
_ = set_lang(update, context)
message = update.effective_message
text = message.text
if text == _(BACK):
return ask_scale_type(update, context)
try:
x, y = map(float, text.split())
except ValueError:
message.reply_text(
_("The scaling factors {values} are invalid, please try again").format(
values=f"<b>{text}</b>"
),
parse_mode=ParseMode.HTML,
)
return ask_scale_value(update, context)
return scale_pdf(update, context, percent=(x, y))
def check_scale_dimension(update, context):
_ = set_lang(update, context)
message = update.effective_message
text = message.text
if text == _(BACK):
return ask_scale_type(update, context)
try:
x, y = map(float, text.split())
except ValueError:
message.reply_text(
_("The dimensions {values} are invalid, please try again").format(
values=f"<b>{text}</b>"
),
parse_mode=ParseMode.HTML,
)
return ask_scale_value(update, context, ask_percent=False)
return scale_pdf(update, context, dim=(x, y))
def scale_pdf(update, context, percent=None, dim=None):
_ = set_lang(update, context)
if percent is not None:
update.effective_message.reply_text(
_(
"Scaling your PDF file, horizontally by {horizontal} "
"and vertically by {vertical}"
).format(
horizontal=f"<b>{percent[0]}</b>",
vertical=f"<b>{percent[1]}</b>",
),
reply_markup=ReplyKeyboardRemove(),
parse_mode=ParseMode.HTML,
)
process_pdf(update, context, TaskType.scale_pdf, scale_by=percent)
else:
update.effective_message.reply_text(
_(
"Scaling your PDF file with width of {width} and height of {height}"
).format(width=f"<b>{dim[0]}</b>", height=f"<b>{dim[1]}</b>"),
reply_markup=ReplyKeyboardRemove(),
parse_mode=ParseMode.HTML,
)
process_pdf(update, context, TaskType.scale_pdf, scale_to=dim)
return ConversationHandler.END
| from telegram import ParseMode, ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import ConversationHandler
from pdf_bot.analytics import TaskType
from pdf_bot.consts import (
BACK,
BY_PERCENT,
TO_DIMENSIONS,
WAIT_SCALE_DIMENSION,
WAIT_SCALE_PERCENT,
WAIT_SCALE_TYPE,
)
from pdf_bot.files.utils import get_back_markup
from pdf_bot.language import set_lang
from pdf_bot.utils import process_pdf
def ask_scale_type(update, context):
_ = set_lang(update, context)
keyboard = [[_(BY_PERCENT), _(TO_DIMENSIONS)], [_(BACK)]]
reply_markup = ReplyKeyboardMarkup(
keyboard, one_time_keyboard=True, resize_keyboard=True
)
update.effective_message.reply_text(
_("Select the scale type that you'll like to perform"),
reply_markup=reply_markup,
)
return WAIT_SCALE_TYPE
def ask_scale_value(update, context, ask_percent=True):
_ = set_lang(update, context)
message = update.effective_message
reply_markup = get_back_markup(update, context)
if message.text == _(TO_DIMENSIONS) or not ask_percent:
message.reply_text(
"{desc_1}\n{desc_2}\n\n{desc_3}".format(
desc_1=_("Send me the width and height"),
desc_2=f"<b>{_('Example: 150 200')}</b>",
desc_3=_("This will set the width to 150 and height to 200"),
),
reply_markup=reply_markup,
parse_mode=ParseMode.HTML,
)
return WAIT_SCALE_DIMENSION
message.reply_text(
"{desc_1}\n{desc_2}\n\n{desc_3}".format(
desc_1=_(
"Send me the scaling factors for the horizontal and vertical axes"
),
desc_2=f"<b>{_('Example: 2 0.5')}</b>",
desc_3=_(
"This will double the horizontal axis and halve the vertical axis"
),
),
reply_markup=reply_markup,
parse_mode=ParseMode.HTML,
)
return WAIT_SCALE_PERCENT
def check_scale_percent(update, context):
_ = set_lang(update, context)
message = update.effective_message
text = message.text
if text == _(BACK):
return ask_scale_type(update, context)
try:
x, y = map(float, text.split())
except ValueError:
message.reply_text(
_("The scaling factors {values} are invalid, please try again").format(
values=f"<b>{text}</b>"
),
parse_mode=ParseMode.HTML,
)
return ask_scale_value(update, context)
return scale_pdf(update, context, percent=(x, y))
def check_scale_dimension(update, context):
_ = set_lang(update, context)
message = update.effective_message
text = message.text
if text == _(BACK):
return ask_scale_type(update, context)
try:
x, y = map(float, text.split())
except ValueError:
message.reply_text(
_("The dimensions {values} are invalid, please try again").format(
values=f"<b>{text}</b>"
),
parse_mode=ParseMode.HTML,
)
return ask_scale_value(update, context, ask_percent=False)
return scale_pdf(update, context, dim=(x, y))
def scale_pdf(update, context, percent=None, dim=None):
_ = set_lang(update, context)
if percent is not None:
update.effective_message.reply_text(
_(
"Scaling your PDF file, horizontally by {horizontal} "
"and vertically by {vertical}"
).format(
horizontal=f"<b>{percent[0]}</b>",
vertical=f"<b>{percent[1]}</b>",
),
reply_markup=ReplyKeyboardRemove(),
parse_mode=ParseMode.HTML,
)
process_pdf(update, context, TaskType.scale_pdf, scale_by=percent)
else:
update.effective_message.reply_text(
_(
"Scaling your PDF file with width of {width} and height of {height}"
).format(width=f"<b>{dim[0]}</b>", height=f"<b>{dim[1]}</b>"),
reply_markup=ReplyKeyboardRemove(),
parse_mode=ParseMode.HTML,
)
process_pdf(update, context, TaskType.scale_pdf, scale_to=dim)
return ConversationHandler.END
|
import logging
from functools import wraps
import airflow
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagFileExists, DagNotFound
from airflow.utils.log.logging_mixin import LoggingMixin
from flask import current_app, flash, g, redirect, request, url_for
from flask_appbuilder import BaseView as AppBuilderBaseView
from flask_appbuilder import expose, has_access
from dagen.exceptions import TemplateNotFoundError
from dagen.internal import refresh_dagbag
from dagen.models import DagenDag
from dagen.query import DagenDagQueryset, DagenDagVersionQueryset
from dagen.utils import get_template_loader, refresh_dagen_templates
from dagen.www.forms import BulkSyncDagenForm
from dagen.www.utils import login_required
class DagenFABView(AppBuilderBaseView, LoggingMixin):
route_base = '/dagen'
log = logging.root.getChild(f'{__name__}.{'DagenFABView'}')
@expose('/')
@expose('/dags')
@login_required
@has_access
def list(self):
dbDags = DagenDagQueryset().get_all()
return self.render_template(
'dagen/dags.html',
dbDags=dbDags
)
@expose('/dags/create', methods=('GET', 'POST'))
@login_required
@has_access
def create(self):
template = 'dagen/create-dag.html'
tmpls = get_template_loader().template_classes
forms = {key: tmpl.as_form() for key, tmpl in tmpls.items()}
if request.method == 'POST' and request.form:
tmplId = request.form.get('template_id')
form = forms[tmplId]
form.process(request.form)
if form.validate():
ret = form.create(template_id=tmplId, user=g.user)
msg = f'"{ret.dag_id}" created successfully' if ret else "failed to create"
flash(f'Dagen - {msg}!')
if ret:
return self._handle_form_submission(request.form)
return self.render_template(
template,
template_classes=tmpls,
template_id=tmplId,
forms=forms
)
return self.render_template(
template,
template_classes=tmpls,
forms=forms
)
@expose('/dags/save', methods=('GET', 'POST'))
@login_required
@has_access
def bulk_save(self):
template = 'dagen/bulk-save.html'
tmpls = get_template_loader().template_classes.keys()
# make mark_approved no-op if no permission to approve
has_approve_perm = self._has_permission('can_approve')
form = BulkSyncDagenForm(
templates=tmpls, has_approve_perm=has_approve_perm)
context = {
'form': form
}
if request.method == 'POST' and form.validate():
dag_results = form.save(g.user)
success_results, failed_results = dict(), dict()
for dag_id, (is_success, message) in dag_results.items():
if is_success:
success_results[dag_id] = message
else:
failed_results[dag_id] = message
context['res_success'] = success_results
context['res_failure'] = failed_results
return self.render_template(template, **context)
@expose('/dags/edit', methods=('GET', 'POST'))
@login_required
@has_access
def edit(self,):
template = 'dagen/edit-dag.html'
dag_id = request.args.get('dag_id')
qs = DagenDagQueryset()
dbDag = qs.get_dag(dag_id)
versions = {
version.version: version.dict_repr for version in dbDag.versions}
try:
tmpl = get_template_loader().get_template_class(dbDag.template_id)
except TemplateNotFoundError as e:
flash(e, category='error')
flash(
'Either delete this DAG or add back the template with given template ID')
return self._redirect_home()
try:
init_data = {**dbDag.dict_repr, **dbDag.live_version.dict_repr}
except Exception as e:
self.log.exception(e)
init_data = dbDag.dict_repr
form = tmpl.as_form(data=init_data)
if request.method == 'POST' and request.form:
form.process(request.form)
if form.validate():
ret = form.update(dbDag, user=g.user,
form_version=request.form.get('live_version'))
flash(
f'Dagen "{dag_id}' version {'updated' if ret else 'unchanged'}!')
if ret:
refresh_dagbag(dag_id=dag_id)
return self._handle_form_submission(request.form)
return self.render_template(
template,
dbDag=dbDag,
dagVersions=versions,
dag_id=dag_id,
form=form
)
@expose('/dags/delete')
@login_required
@has_access
def delete(self, session=None):
dag_id = request.args.get('dag_id')
DagenDagQueryset().delete_dag(dag_id).done()
refresh_dagen_templates()
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash("DAG with id {} not found. Cannot delete".format(dag_id), 'error')
return self._redirect_home()
except DagFileExists:
flash("Dag id {} is still in DagBag. "
"Remove the DAG file first.".format(dag_id),
'error')
return self._redirect_home()
flash("Deleting DAG with id {}. May take a couple minutes to fully"
" disappear.".format(dag_id))
# Upon success return to home.
return self._redirect_home()
@expose('/dags/detail')
@login_required
@has_access
def detail(self):
tmpls = get_template_loader().templates
dag_id = request.args.get('dag_id')
dbDag = DagenDagQueryset().get_dag(dag_id)
template = tmpls[dbDag. template_id]
return self.render_template(
'dagen/detail.html',
dbDag=dbDag,
dag_id=dag_id
)
@expose('/dags/approve')
@login_required
@has_access
def approve(self):
dag_id = request.args.get('dag_id')
user_id = g.user.id
try:
DagenDagVersionQueryset().approve_live_version(dag_id, user_id).done()
refresh_dagbag(dag_id=dag_id)
flash(
f'DAG "{dag_id}" approved successfully! Please wait for 5-10 minutes for workers to refresh and the DAG to go live.')
except ValueError as e:
flash(str(e))
return self._redirect_home()
def render_template(self, template, **kwargs):
extra_ctx = {
'perm_can_create': self._has_permission('can_create'),
'perm_can_bulk_save': self._has_permission('can_bulk_save'),
'perm_can_edit': self._has_permission('can_edit'),
'perm_can_approve': self._has_permission('can_approve'),
'perm_can_delete': self._has_permission('can_delete'),
'perm_can_list': self._has_permission('can_list'),
'perm_can_detail': self._has_permission('can_detail')
}
return super().render_template(template, **kwargs, **extra_ctx)
def _handle_form_submission(self, data):
if data.get('_add_another', None):
return redirect(url_for('DagenFABView.create'))
elif data.get('_continue_editing', None):
return redirect(url_for('DagenFABView.edit', dag_id=data.get('dag_id')))
return self._redirect_home()
def _redirect_home(self):
return redirect(url_for('DagenFABView.list'))
def _has_permission(self, permission_str, user=None) -> bool:
return self.appbuilder.sm.has_access(permission_str, 'DagenFABView', user=user)
| import logging
from functools import wraps
import airflow
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagFileExists, DagNotFound
from airflow.utils.log.logging_mixin import LoggingMixin
from flask import current_app, flash, g, redirect, request, url_for
from flask_appbuilder import BaseView as AppBuilderBaseView
from flask_appbuilder import expose, has_access
from dagen.exceptions import TemplateNotFoundError
from dagen.internal import refresh_dagbag
from dagen.models import DagenDag
from dagen.query import DagenDagQueryset, DagenDagVersionQueryset
from dagen.utils import get_template_loader, refresh_dagen_templates
from dagen.www.forms import BulkSyncDagenForm
from dagen.www.utils import login_required
class DagenFABView(AppBuilderBaseView, LoggingMixin):
route_base = '/dagen'
log = logging.root.getChild(f'{__name__}.{"DagenFABView"}')
@expose('/')
@expose('/dags')
@login_required
@has_access
def list(self):
dbDags = DagenDagQueryset().get_all()
return self.render_template(
'dagen/dags.html',
dbDags=dbDags
)
@expose('/dags/create', methods=('GET', 'POST'))
@login_required
@has_access
def create(self):
template = 'dagen/create-dag.html'
tmpls = get_template_loader().template_classes
forms = {key: tmpl.as_form() for key, tmpl in tmpls.items()}
if request.method == 'POST' and request.form:
tmplId = request.form.get('template_id')
form = forms[tmplId]
form.process(request.form)
if form.validate():
ret = form.create(template_id=tmplId, user=g.user)
msg = f'"{ret.dag_id}" created successfully' if ret else "failed to create"
flash(f'Dagen - {msg}!')
if ret:
return self._handle_form_submission(request.form)
return self.render_template(
template,
template_classes=tmpls,
template_id=tmplId,
forms=forms
)
return self.render_template(
template,
template_classes=tmpls,
forms=forms
)
@expose('/dags/save', methods=('GET', 'POST'))
@login_required
@has_access
def bulk_save(self):
template = 'dagen/bulk-save.html'
tmpls = get_template_loader().template_classes.keys()
# make mark_approved no-op if no permission to approve
has_approve_perm = self._has_permission('can_approve')
form = BulkSyncDagenForm(
templates=tmpls, has_approve_perm=has_approve_perm)
context = {
'form': form
}
if request.method == 'POST' and form.validate():
dag_results = form.save(g.user)
success_results, failed_results = dict(), dict()
for dag_id, (is_success, message) in dag_results.items():
if is_success:
success_results[dag_id] = message
else:
failed_results[dag_id] = message
context['res_success'] = success_results
context['res_failure'] = failed_results
return self.render_template(template, **context)
@expose('/dags/edit', methods=('GET', 'POST'))
@login_required
@has_access
def edit(self,):
template = 'dagen/edit-dag.html'
dag_id = request.args.get('dag_id')
qs = DagenDagQueryset()
dbDag = qs.get_dag(dag_id)
versions = {
version.version: version.dict_repr for version in dbDag.versions}
try:
tmpl = get_template_loader().get_template_class(dbDag.template_id)
except TemplateNotFoundError as e:
flash(e, category='error')
flash(
'Either delete this DAG or add back the template with given template ID')
return self._redirect_home()
try:
init_data = {**dbDag.dict_repr, **dbDag.live_version.dict_repr}
except Exception as e:
self.log.exception(e)
init_data = dbDag.dict_repr
form = tmpl.as_form(data=init_data)
if request.method == 'POST' and request.form:
form.process(request.form)
if form.validate():
ret = form.update(dbDag, user=g.user,
form_version=request.form.get('live_version'))
flash(
f'Dagen "{dag_id}" version {"updated" if ret else "unchanged"}!')
if ret:
refresh_dagbag(dag_id=dag_id)
return self._handle_form_submission(request.form)
return self.render_template(
template,
dbDag=dbDag,
dagVersions=versions,
dag_id=dag_id,
form=form
)
@expose('/dags/delete')
@login_required
@has_access
def delete(self, session=None):
dag_id = request.args.get('dag_id')
DagenDagQueryset().delete_dag(dag_id).done()
refresh_dagen_templates()
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash("DAG with id {} not found. Cannot delete".format(dag_id), 'error')
return self._redirect_home()
except DagFileExists:
flash("Dag id {} is still in DagBag. "
"Remove the DAG file first.".format(dag_id),
'error')
return self._redirect_home()
flash("Deleting DAG with id {}. May take a couple minutes to fully"
" disappear.".format(dag_id))
# Upon success return to home.
return self._redirect_home()
@expose('/dags/detail')
@login_required
@has_access
def detail(self):
tmpls = get_template_loader().templates
dag_id = request.args.get('dag_id')
dbDag = DagenDagQueryset().get_dag(dag_id)
template = tmpls[dbDag. template_id]
return self.render_template(
'dagen/detail.html',
dbDag=dbDag,
dag_id=dag_id
)
@expose('/dags/approve')
@login_required
@has_access
def approve(self):
dag_id = request.args.get('dag_id')
user_id = g.user.id
try:
DagenDagVersionQueryset().approve_live_version(dag_id, user_id).done()
refresh_dagbag(dag_id=dag_id)
flash(
f'DAG "{dag_id}" approved successfully! Please wait for 5-10 minutes for workers to refresh and the DAG to go live.')
except ValueError as e:
flash(str(e))
return self._redirect_home()
def render_template(self, template, **kwargs):
extra_ctx = {
'perm_can_create': self._has_permission('can_create'),
'perm_can_bulk_save': self._has_permission('can_bulk_save'),
'perm_can_edit': self._has_permission('can_edit'),
'perm_can_approve': self._has_permission('can_approve'),
'perm_can_delete': self._has_permission('can_delete'),
'perm_can_list': self._has_permission('can_list'),
'perm_can_detail': self._has_permission('can_detail')
}
return super().render_template(template, **kwargs, **extra_ctx)
def _handle_form_submission(self, data):
if data.get('_add_another', None):
return redirect(url_for('DagenFABView.create'))
elif data.get('_continue_editing', None):
return redirect(url_for('DagenFABView.edit', dag_id=data.get('dag_id')))
return self._redirect_home()
def _redirect_home(self):
return redirect(url_for('DagenFABView.list'))
def _has_permission(self, permission_str, user=None) -> bool:
return self.appbuilder.sm.has_access(permission_str, 'DagenFABView', user=user)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.