id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3339453 | # -*- coding: utf-8 -*-
# Library dependancies
import csv
import json
import math
import os
import sys
# Files
MOVIES_INPUT_FILE = 'data/top_10_movies_2006-2015.json'
CENSUS_INPUT_FILE = 'data/census_2014.json'
REPORT_OUTPUT_FILE = 'data/hollywood_census_report.csv'
# Init
races = []
people = []
census = []
hollywood = []
# Read movies from file
with open(MOVIES_INPUT_FILE) as data_file:
movies = json.load(data_file)
for m in movies:
for p in m["people"]:
for race in p["races"]:
people.append({
"race": race,
"gender": p["gender"]
})
# Read census from file
with open(CENSUS_INPUT_FILE) as data_file:
census = json.load(data_file)
# Generate hollywood numbers
for category in census:
hollywood.append({
"label": category["label"],
"gender": category["gender"],
"race": category["race"],
"value": len([p for p in people if p["race"]==category["race"] and p["gender"]==category["gender"]])
})
with open(REPORT_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(['', 'Census', 'Hollywood', 'Difference'])
census_total = sum([c['value'] for c in census])
hollywood_total = sum([c['value'] for c in hollywood])
for c in census:
c_value = 1.0 * c["value"] / census_total * 100
h = next(iter([h for h in hollywood if h["gender"]==c["gender"] and h["race"]==c["race"]]), None)
h_value = 1.0 * h["value"] / hollywood_total * 100
w.writerow([c["label"], c_value, h_value, h_value - c_value])
print "Successfully wrote report to file: %s" % REPORT_OUTPUT_FILE
| StarcoderdataPython |
11397233 | <reponame>RaspberryPi-Samples/py-my-key<filename>py_my_key/samples/sample_nxppy.py<gh_stars>1-10
import nxppy
import time
mifare = nxppy.Mifare()
# Print card UIDs as they are detected
while True:
try:
uid = mifare.select()
print(uid)
except nxppy.SelectError:
# SelectError is raised if no card is in the field.
pass
time.sleep(0.1)
| StarcoderdataPython |
8009607 | <reponame>LexcaliburR/notebook
import torch
import torch.nn as nn
from torch.onnx import register_custom_op_symbolic
from torch.onnx.symbolic_helper import parse_args
# Define custom symbolic function
@parse_args("v", "v", "f", "i")
def symbolic_foo_forward(g, input1, input2, attr1, attr2):
return g.op("custom_domain::Foo", input1, input2, attr1_f=attr1, attr2_i=attr2)
# Register custom symbolic function
register_custom_op_symbolic("custom_ops::foo_forward", symbolic_foo_forward, 9)
class FooModel(torch.nn.Module):
def __init__(self, attr1, attr2):
super(FooModel, self).__init__()
self.attr1 = attr1
self.attr2 = attr2
def forward(self, input1, input2):
# Calling custom op
return torch.ops.custom_ops.foo_forward(input1, input2, self.attr1, self.attr2)
model = FooModel(2, 3)
torch.onnx.export(
model,
(torch.Tensor(1), torch.Tensor(2)),
"model.onnx",
# only needed if you want to specify an opset version > 1.
custom_opsets={"custom_domain": 2},
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH) | StarcoderdataPython |
8604 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Template Helpers used in workbox"""
import logging
import socket
from datetime import datetime
from markupsafe import Markup
import psutil
import tg
log = logging.getLogger(__name__)
def current_year():
""" Return current year. """
now = datetime.now()
return now.strftime('%Y')
def is_docker_enabled():
""" Detect if docker service is started. """
for proc in psutil.process_iter():
if 'docker' in proc.name():
return True
return False
def get_server_load_value():
""" Get server load value. """
return psutil.virtual_memory().percent
def get_free_port():
""" Find and returns free port number. """
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(("", 0))
free_port = soc.getsockname()[1]
soc.close()
return free_port
def get_vagrantfiles_base_folder():
""" Return base folder for vagrantfiles. """
return tg.config.get('workbox.vagrantfiles.basefolder')
def get_hostname():
""" Return hostname. """
return tg.config.get('workbox.hostname')
try:
from webhelpers2 import date, html, number, misc, text
except SyntaxError:
log.error("WebHelpers2 helpers not available with this Python Version")
| StarcoderdataPython |
9662543 | <reponame>KKoga/UniRapidJson
# coding: utf-8
# Copyright (c) 2017 <NAME>
#
# UniRapidJson is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
import uuid
import os
import time
import tempfile
import shutil
import tarfile
import gzip
PROJ_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
DIST_PATH = os.path.join(PROJ_ROOT, 'dist', 'unity')
CS_SRC_PATH = os.path.join(PROJ_ROOT, 'src', 'runtime-support', 'unity', 'cs')
OUTPUT_PATH = os.path.join(PROJ_ROOT, 'dist', 'UniRapidJson.unitypackage')
ASSET_PREFIX_PATH = 'Assets/Standard Assets/UniRapidJson'
META_HEADER = """fileFormatVersion: 2
guid: {guid}
timeCreated: {time}
licenseType: Free"""
DEFAULT_IMPORTER = """
DefaultImporter:
userData:
assetBundleName:
assetBundleVariant:
"""
PLUGIN_IMPORTER = """
PluginImporter:
serializedVersion: 1
iconMap: {{}}
executionOrder: {{}}
isPreloaded: 0
platformData: {platform_data}
userData:
assetBundleName:
assetBundleVariant:
"""
PLATFORM_DATA = {
'Internal.Any': '''
iOS:
enabled: 1
settings:
CompileFlags:
FrameworkDependencies:''',
'Android.Any': '''
Android:
enabled: 1
settings:
CPU: AnyCPU''',
'OSX.Any': '''
OSXIntel:
enabled: 1
settings:
CPU: AnyCPU
OSXIntel64:
enabled: 1
settings:
CPU: AnyCPU
OSXUniversal:
enabled: 1
settings: {}
Editor:
enabled: 1
settings:
CPU: AnyCPU
DefaultValueInitialized: true
OS: OSX''',
'Windows.x64': '''
Win64:
enabled: 1
settings:
CPU: AnyCPU
Editor:
enabled: 1
settings:
CPU: x86_64
DefaultValueInitialized: true
OS: Windows''',
'Windows.x86': '''
Win:
enabled: 1
settings:
CPU: AnyCPU
Editor:
enabled: 1
settings:
CPU: x86
DefaultValueInitialized: true
OS: Windows''',
'WinRT.ARM': '''
WindowsStoreApps:
enabled: 1
settings:
CPU: ARM
DontProcess: False
PlaceholderPath:
SDK: AnySDK
ScriptingBackend: AnyScriptingBackend''',
'WinRT.x64': '''
WindowsStoreApps:
enabled: 1
settings:
CPU: X64
DontProcess: False
PlaceholderPath:
SDK: AnySDK
ScriptingBackend: AnyScriptingBackend''',
'WinRT.x86': '''
WindowsStoreApps:
enabled: 1
settings:
CPU: X86
DontProcess: False
PlaceholderPath:
SDK: AnySDK
ScriptingBackend: AnyScriptingBackend''',
}
def get_guid(metapath):
with open(metapath, 'r') as f:
for l in f.readlines():
if l.lstrip().startswith('guid:'):
return l.split(':', 1)[1].strip()
return None
def default_importer(platform, arch):
return DEFAULT_IMPORTER
def plugin_importer(platform, arch):
return PLUGIN_IMPORTER.format(
platform_data=PLATFORM_DATA[platform + '.' + arch])
def generate_meta_file(platform, arch, asset, importer, isdir):
meta_file = asset + '.meta'
with open(meta_file, 'w') as f:
header = META_HEADER.format(
time=int(time.time()),
guid=uuid.uuid4().hex)
if isdir:
header += "\nfolderAsset: yes"
f.write(header + importer(platform, arch))
def generate_package(tmpdir):
for root, dirs, files in os.walk(DIST_PATH):
assets = [x for x in files if x.endswith('.meta')]
folderassets = dirs
module = root[len(DIST_PATH):]
for metafile in assets:
asset = metafile[:-5]
guid = get_guid(os.path.join(root, metafile))
try:
module_dir = os.path.join(tmpdir, guid)
os.makedirs(module_dir)
except OSError:
pass
pathname_path = os.path.join(module_dir, 'pathname')
assetmeta_path = os.path.join(module_dir, 'asset.meta')
asset_path = os.path.join(module_dir, 'asset')
with open(pathname_path, 'w') as f:
f.write(ASSET_PREFIX_PATH + module + '/' + asset)
shutil.copyfile(os.path.join(root, metafile), assetmeta_path)
if not asset in folderassets:
shutil.copyfile(os.path.join(root, asset), asset_path)
# with tarfile.open(OUTPUT_PATH, 'w:gz', format=tarfile.GNU_FORMAT, compresslevel=9) as archive:
with tarfile.open(OUTPUT_PATH, 'w') as archive:
archive.add(tmpdir, arcname='')
def main():
for root, dirs, files in os.walk(CS_SRC_PATH):
for script in [x for x in files if x.endswith('.cs') and not x.endswith('.meta')]:
module = root[len(CS_SRC_PATH):]
src = os.path.join(root, script)
dst = os.path.join(DIST_PATH, module, script)
shutil.copyfile(src, dst)
generate_meta_file(None, None, dst, default_importer, isdir=False)
for module in os.listdir(DIST_PATH):
dir_path = os.path.join(DIST_PATH, module)
if not os.path.isdir(dir_path):
continue
platform, arch = module.split('.', 1)
importer = plugin_importer
for root, dirs, files in os.walk(dir_path):
for asset in [x for x in files if not x.endswith('.meta')]:
generate_meta_file(platform, arch, os.path.join(root, asset), importer, isdir=False)
for asset in dirs:
generate_meta_file(platform, arch, os.path.join(root, asset), importer, isdir=True)
importer = default_importer
with tempfile.TemporaryDirectory() as tmpdir:
generate_package(tmpdir)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3524004 | from Crypto.PublicKey import RSA
from Crypto.Signature import pss
from Crypto.Hash import SHA256
message = None
with open('secret2.txt', 'rb') as f:
message = f.read()
with open('private_key.pem', 'rb') as f:
private_key = RSA.import_key(f.read())
hash = SHA256.new(message)
print(f'hash: {hash.hexdigest()}')
signature = pss.new(private_key).sign(hash)
print(f'signature: {signature}')
with open('signature.dat', 'wb') as f:
f.write(signature)
| StarcoderdataPython |
47115 | import argparse
import os
import cv2
import numpy as np
import torch
from torch import nn
from deepface.backbones.iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200
from deepface.backbones.mobilefacenet import get_mbf
from deepface.commons import functions
import gdown
url={
'ms1mv3_r50':'https://eb9uqq.dm.files.1drv.com/y4mo1LyxVkMS7RwyNFyD7Oj_LrukPmnMwHsL9rjh0By0Pbgglx-f55KwzpQ7rMhHYsgqz8WXcFOFpNKcgwBwPpmd2UjEOc2JwcdRAitVfngManBko6wU-y2HTwGi--_4R9_TmfTqO4yGQEIhR9-d4LOcisKC8YzL4bth1b4tSJ8nloIIq7xGizPX3jWiYfFHzirG5-VgJ3guFBVZKE7pupRsw',
'ms1mv3_r18':'https://eb9uqq.dm.files.1drv.com/y4mpJ0NiyBPDzo_aQlh9QHwL52UljHSI60KSPv0-p2oTb4qnoUA5Cu3Ul-Tfxc8l7uyg9BYE_hoItNc9JjqYRW-qmIIM0JeMqKGjyl5sZQvwPZUxazPW8THT9CrWpwzaKkrBXFDc_uEDGAvDpaB1lhrc83aG5lBOeuI6LbtMLBHyR7TA2YdPxcIvPGnsbqjvWl1rXQFG4zD2_TxL_m4avN43Q',
'ms1mv3_r34': 'https://eb9uqq.dm.files.1drv.com/y4mU3JhshWSlooEzKRYnCPrOb1-xpZqS_Z90rOXm8D6KOL-PpOhvlsDYAgiTWkGG8TYqC2kdgr4I66XBkhEtqhptKTRFY90gnLTesR9Sw0xNGb46_ULn6IcfRMTW18uKJS2pwGpwabu7SpL3Z1EsX-gcd74M26gMJ11svjthg15CzpGQhVASMZMMfSvlUGhyP5HPFxOQi3X0cpAUMm8P9Yn8Q',
'ms1mv3_r100':'https://eb9uqq.dm.files.1drv.com/y4mNdH0KjE7_R3tIT1h86Ov1XshRRgT1BUBeVIrUgRasS5x93UeCpP023bspth03rUtIg1raK3EtRqMtrGf_DvA0pIf2RgB7FsHsBaNoJYF1JqUl7Q8qsTpYGxOaq7-ow0Hiejjz5JRU9nWOJSniOlM2STvDKZH-Zs6pHiyLEfLhikQkm8xC2SYkcas-xedihqRJCVmzTI4LfBqtFbX1nxU-Q',
'glint360_r18':'https://eb9uqq.dm.files.1drv.com/y4mn1hArpddPJw-OM6IzTll6TpxZaSVjs6HyzeYC2m-tg-v9qqBjoI37Lr20K-RNFr-9_AlbnguKxxzrC4lqSykaUNWaJhya12ZdOIIwS1h2kPGSjGJkCEyEca9YkV5Mkesiee8nHibkeLvY5uSoe5PSLtm_umgqd6l3f4-RSnP4ecGrtYM3-Jt49YgKPwDcb5hNyXVBixUqVhTmyOiw9pM3g',
'glint360_r34': 'https://eb9uqq.dm.files.1drv.com/y4mDEvblVeT<KEY>',
'glint360_r50': 'https://eb9uqq.dm.files.1drv.com/y4m7HMGc6qBhL2PwUcsjx4z-Pm57HD2Uze1oa27yGL4BXt4Ech3sIbi59XUpBJMv6kxAAxJP00W_lWyN8T8Dm2rZ8eLQVxMiNoskpN0JZOfjTeiovnhNwBsOc3RN2Y91xNqzyMPs-5GQ4qKdZ_LNlulu8wckJcWvTIFSupsLkmtnym8PnL5u7XTERhXBTgL5nwoutQg6Yvb8Ixr_5VY1m2LaQ',
'glint360_r100': 'https://eb9uqq.dm.files.1drv.com/y4m6MECUN2ituEEi6oi8ksrTVHaNKfu21zaqpVA750ynYQqsP-RSDbGFX_MyK-OdWOnFp9NZuFTU711TVGAUMbttVWclSzruJRQUEp7-D8fZLMUBPc43lXSAkReo6WCfWaHIFZltEsfO3WomoCyePTRlEgShXYxVpSnu_VDuD8_MC7WcRmBJGznahexUgSQE0NcVJDvYkq2MW1eaeEQ0T4d6Q'
}
def getmodel(name, **kwargs):
# resnet
if name == "r18":
base_model= iresnet18(False, **kwargs)
elif name == "r34":
base_model= iresnet34(False, **kwargs)
elif name == "r50":
base_model= iresnet50(False, **kwargs)
elif name == "r100":
base_model= iresnet100(False, **kwargs)
elif name == "r200":
base_model= iresnet200(False, **kwargs)
elif name == "r2060":
from deepface.backbones.iresnet2060 import iresnet2060
base_model= iresnet2060(False, **kwargs)
elif name == "mbf":
fp16 = kwargs.get("fp16", False)
num_features = kwargs.get("num_features", 512)
base_model= get_mbf(fp16=fp16, num_features=num_features)
else:
raise ValueError()
return base_model
class Model_ArcFace(nn.Module):
def __init__(self,name,weight):
super().__init__()
self.model= getmodel(name, fp16=False)
self.model.load_state_dict(torch.load(weight, map_location=torch.device("cpu") ))
self.model.eval()
@torch.no_grad()
def predict(self,image):
self.img=image
self.img = np.transpose(self.img, (0,3, 1, 2))
self.img = torch.from_numpy(self.img).float()
self.img.div_(255).sub_(0.5).div_(0.5)
print(self.img.shape)
feat = self.model(self.img)
feat=feat.numpy()
return feat
@torch.no_grad()
def predict1(self,image):
self.img=image
if self.img is None:
self.img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8)
else:
self.img = cv2.imread(self.img)
self.img = cv2.resize(self.img, (112, 112))
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
self.img = np.transpose(self.img, (2, 0, 1))
self.img = torch.from_numpy(self.img).unsqueeze(0).float()
self.img.div_(255).sub_(0.5).div_(0.5)
feat = self.model(self.img)
feat=feat.numpy()
# print(feat.shape)
return feat
def loadModel_ms1mv3_r50(url = 'https://eb9uqq.dm.files.1drv.com/y4mo1LyxVkMS7RwyNFyD7Oj_LrukPmnMwHsL9rjh0By0Pbgglx-f55KwzpQ7rMhHYsgqz8WXcFOFpNKcgwBwPpmd2UjEOc2JwcdRAitVfngManBko6wU-y2HTwGi--_4R9_TmfTqO4yGQEIhR9-d4LOcisKC8YzL4bth1b4tSJ8nloIIq7xGizPX3jWiYfFHzirG5-VgJ3guFBVZKE7pupRsw'):
home = functions.get_deepface_home()
file_name = "backbone.pth"
output = home+'/.deepface/weights/ms1mv3_arcface_r50/'+file_name
if os.path.exists(output) != True and os.path.exists(home+'/.deepface/weights/ms1mv3_arcface_r50/') !=True :
os.mkdir(home+'/.deepface/weights/ms1mv3_arcface_r50/')
print(file_name," will be downloaded to ",output)
gdown.download(url, output, quiet=False)
model=Model_ArcFace('r50',output)
return model
def loadModel(name):
home = functions.get_deepface_home()
file_name = "backbone.pth"
output= home + '/.deepface/weights/'+name+"/"+file_name
if os.path.exists(output) != True:
os.mkdir(home+ '/.deepface/weights/'+name+"/")
print(file_name," will be downloaded to ",output)
gdown.download(url[name], output, quiet=False)
name_model=name.split("_")[-1]
model= Model_ArcFace(name_model,output)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch ArcFace Training')
parser.add_argument('--model_name', type=str, default='glint360_r100', help='backbone network')
parser.add_argument('--img', type=str, default='/home/quang/Documents/FACE/deepface/tests/dataset/img1.jpg')
args = parser.parse_args()
model_name=args.model_name
path_img=args.img
model=loadModel_ms1mv3_r50()
first_parameter = next(model.parameters())
input_shape = first_parameter.size()
input_shape=(112,112)
# input_shape = model.layers[0].input_shape
print(input_shape)
img1 = functions.preprocess_face(path_img,input_shape)
feat=model.predict(img1)
print(feat.shape) | StarcoderdataPython |
214980 | from typing import List, Tuple, Set, Iterable
import itertools
import numpy as np
class GridError(Exception):
"""
Custom Exception for grid validation
"""
pass
def input_grid(grid: List[str] = None) -> List[str]:
"""
Inputting grid and splitting it to list of strings
:param grid: (optional) predefined grid
:return: grid
"""
if grid is None:
print("Please enter grid as strings, divided by commas")
in_grid = input("Grid: ")
processed_grid = in_grid.replace(' ', '').split(',')
return processed_grid
else:
return grid
def input_grid_size(n: int = None) -> int:
"""
Inputting grid size and checking if it is valid
:param n: (optional) predefined grid size
:return: grid size
"""
if n is None:
print("Please enter grid size as integer")
in_size = input("Grid size: ")
try:
processed_size = int(in_size)
if processed_size <= 0:
raise GridError("Incorrect grid size, should be more than 0")
return processed_size
except ValueError:
print("Not a correct integer number")
else:
try:
processed_size = int(n)
if processed_size <= 0:
raise GridError("Incorrect grid size, should be more than 0")
return processed_size
except ValueError:
print("Not a correct integer number")
def parse_grid(grid: List[str], grid_size: int) -> Tuple[tuple, tuple, np.ndarray]:
"""
Validating the inputted grid and returning the positions of Mario and princess, along with grid in array format
:param grid_size: size of the grid (size*size)
:param grid: inputted grid as list of strings
:return: Mario and princess positions in tuple format and validated grid in numpy array format
"""
# checking if the grid is the correct size (N)
if len(grid) != grid_size:
raise GridError("Incorrect grid size")
# list of correct signs in the grid
acceptable_signs = ['-', 'm', 'p', 'x']
# initializing variables for Mario and princess
mario_position = None
princess_position = None
for line_num, line in enumerate(grid):
# type check
if not isinstance(line, str):
raise GridError("Line {} is not defined by string".format(line_num))
# line (row) size check
elif len(line) != grid_size:
raise GridError("Incorrect line {} size".format(line_num))
for elem_num, elem in enumerate(line):
# checking if every sign in the strings is acceptable
if elem not in acceptable_signs:
raise GridError("Incorrect sign on line {}, position {}".format(line_num, elem_num))
# getting princess position
elif elem == 'p':
if princess_position is None:
princess_position = (line_num, elem_num)
else:
raise GridError("Multiple princess positions defined")
# getting Mario position
elif elem == 'm':
if mario_position is None:
mario_position = (line_num, elem_num)
else:
raise GridError("Multiple Mario positions defined")
# check if Mario and princess were found on the grid
if mario_position is None:
raise GridError("Mario position is not defined")
if mario_position is None:
raise GridError("Princess position is not defined")
# converting grid to usable array form
grid_array = np.array([list(line) for line in grid])
return mario_position, princess_position, grid_array
def get_easy_possible_paths(mario_position: tuple, princess_position: tuple) -> Set[tuple]:
"""
Getting all possible 'easy' paths made from coordinates difference between Mario and princess
:param mario_position: Mario position
:param princess_position: princess position
:return: set of all possible paths
"""
# getting 'distance' between Mario and princess in coordinates
distance = np.subtract(princess_position, mario_position)
# create required moves from distance
moves_list = []
for i in range(abs(distance[0])):
moves_list.append((1, 0) if distance[0] >= 0 else (-1, 0))
for i in range(abs(distance[1])):
moves_list.append((0, 1) if distance[1] >= 0 else (0, -1))
# create possible paths from possible moves, using set to remove duplicates
all_paths_list = set(itertools.permutations(moves_list))
return all_paths_list
def find_working_paths(paths: Iterable[tuple], starting_position: tuple, grid: np.ndarray) -> List[tuple]:
"""
Checking possible paths for running into obstacles
:param paths: iterable of possible paths
:param starting_position: starting position
:param grid: validated array of a grid
:return: list of all working paths
"""
moves_dict = {(1, 0): "DOWN", (-1, 0): "UP", (0, 1): "RIGHT", (0, -1): "LEFT"}
successful_paths = []
for path in paths:
# starting from starting position
new_pos = starting_position
path_check = []
for move in path:
# moving to new position and checking if it is valid
new_pos = tuple(np.add(new_pos, move))
path_check.append(True if grid[new_pos] != 'x' else False)
# if all moves are valid, adding path to success list
if all(path_check):
successful_paths.append(tuple(moves_dict[move] for move in path))
return successful_paths
def check_valid_move(grid: np.ndarray, current_position: tuple, move: tuple) -> bool:
"""
Checking if move is valid for the current position in provided grid
:param grid: validated array of a grid
:param current_position: current position
:param move: move in tuple form
:return: True or False
"""
# getting coordinates for moved position
moved_position = tuple(np.add(current_position, move))
def compare_coordinates(a: tuple, b: tuple) -> bool:
"""
Helper function to compare coordinates
Checks if a is smaller than b
"""
return all(np.array(a) < np.array(b))
# checking if coordinates are inside the array (between (0,0) and (N,N))
if compare_coordinates((0, 0), moved_position) and compare_coordinates(moved_position, grid.shape):
# checking if the coordinates are not on the obstacle
if grid[moved_position] == 'x':
return False
else:
return True
else:
return False
def pathfinder(starting_position: tuple, target_position: tuple, grid: np.ndarray) -> List[tuple] or None:
"""
Path finding function to seek the available path if the 'easy' way fails
Short description:
1. Check all possible moves (not in history and not dead ends)
2. Select the most profitable move (least distance to target)
3. Move there, add previous position to history and move to path
4. Go back to 1, if no move is possible, declare position dead end and move to previous position in history
5. Repeat until the target is reached
:param starting_position: starting position
:param target_position: target position
:param grid: validated array of a grid
:return: list with available path
"""
moves_dict = {(1, 0): "DOWN", (-1, 0): "UP", (0, 1): "RIGHT", (0, -1): "LEFT"}
moves = []
path = []
dead_ends = []
def rate_position(current, target):
"""
Helper function to calculate distance to target
"""
return (target[0] - current[0]) ** 2 + (target[1] - current[1]) ** 2
# Setting starting position
current_position = starting_position
while current_position != target_position:
possible_moves = {}
# Checking for each possible move and rating them
for m in moves_dict.keys():
if check_valid_move(grid, current_position, m):
new_position = tuple(np.add(current_position, m))
new_position_rating = rate_position(new_position, target_position)
if new_position not in path and new_position not in dead_ends:
possible_moves[new_position_rating] = m
# if there are possible move, select the one, that would move us the closest to target
if possible_moves:
path.append(current_position) # save position to path
moves.append(possible_moves[min(possible_moves)]) # save move to move list
current_position = tuple(np.add(current_position, possible_moves[min(possible_moves)]))
# if not, go back one move and add current position to dead ends
else:
# if no moves available from the start, return None
if current_position == starting_position:
return None
dead_ends.append(current_position) # save position to dead ends
current_position = path[-1] # move back one step
path.pop(-1) # delete step from path
moves.pop(-1) # delete move from move list
return [tuple(moves_dict[move] for move in moves)]
def main(predef_size: int = None, predef_grid: List[str] = None) -> Tuple[bool, List[str] or None]:
"""
Running the game itself
"""
try:
# getting the game settings
size = input_grid_size(predef_size)
grid = input_grid(predef_grid)
# parsing the grid
mario_pos, princess_pos, v_grid = parse_grid(grid, size)
print(v_grid)
# getting all possible (easy) paths
all_paths = get_easy_possible_paths(mario_pos, princess_pos)
# testing all possible (easy) paths
working_paths = find_working_paths(all_paths, mario_pos, v_grid)
# if none of easy paths works, try to find path
if not working_paths:
working_paths = pathfinder(mario_pos, princess_pos, v_grid)
return False, working_paths
except GridError as ex:
print("Grid is not correctly defined")
print(ex)
return True, None
if __name__ == "__main__":
main()
| StarcoderdataPython |
266497 | <reponame>mithi/algorithm-playground
import numpy as np
import plotly.graph_objects as go
# rotate about y, translate in x
def frame_yrotate_xtranslate(theta, x):
theta = np.radians(theta)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
return np.array([
[cos_theta, 0, sin_theta, x],
[0, 1, 0, 0],
[-sin_theta, 0, cos_theta, 0],
[0, 0, 0, 1]
])
# rotate about z, translate in x and y
def frame_zrotate_xytranslate(theta, x, y):
theta = np.radians(theta)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
return np.array([
[cos_theta, -sin_theta, 0, x],
[sin_theta, cos_theta, 0, y],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
class Point:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def get_point_wrt(self, reference_frame):
# given frame_ab which is the pose of frame_b wrt frame_a
# given a point as defined wrt to frame_b
# return point defined wrt to frame a
p = np.array([self.x, self.y, self.z, 1])
p = np.matmul(reference_frame, p)
return Point(p[0], p[1], p[2])
class Linkage:
def __init__(self, a, b, c, alpha=0, beta=0, gamma=0, new_x_axis=0, new_origin=Point(0, 0, 0)):
self.store_linkage_attributes(a, b, c, new_x_axis, new_origin)
self.save_new_pose(alpha, beta, gamma)
def store_linkage_attributes(self, a, b, c, new_x_axis, new_origin):
self._a = a
self._b = b
self._c = c
self._new_origin = new_origin
self._new_x_axis = new_x_axis
def save_new_pose(self, alpha, beta, gamma):
self._alpha = alpha
self._beta = beta
self._gamma = gamma
# frame_ab is the pose of frame_b wrt frame_a
frame_01 = frame_yrotate_xtranslate(theta=-self._beta, x=self._a)
frame_12 = frame_yrotate_xtranslate(theta=90-self._gamma, x=self._b)
frame_23 = frame_yrotate_xtranslate(theta=0, x=self._c)
frame_02 = np.matmul(frame_01, frame_12)
frame_03 = np.matmul(frame_02, frame_23)
new_frame = frame_zrotate_xytranslate(self._new_x_axis + self._alpha, self._new_origin.x, self._new_origin.y)
# find points wrt to body contact point
p0 = Point(0, 0, 0)
p1 = p0.get_point_wrt(frame_01)
p2 = p0.get_point_wrt(frame_02)
p3 = p0.get_point_wrt(frame_03)
# find points wrt to center of gravity
self.p0 = self._new_origin
self.p1 = p1.get_point_wrt(new_frame)
self.p2 = p2.get_point_wrt(new_frame)
self.p3 = p3.get_point_wrt(new_frame)
def change_pose(self, alpha=None, beta=None, gamma=None):
if alpha is None:
alpha = self._alpha
if beta is None:
beta = self._beta
if gamma is None:
gamma = self._gamma
self.save_new_pose(alpha, beta, gamma)
class Hexagon:
def __init__(self, f, m, s):
self.f = f
self.m = m
self.s = s
self.cog = Point(0, 0, 0)
self.head = Point(0, s, 0)
self.vertices = [
Point(m, 0, 0),
Point(f, s, 0),
Point(-f, s, 0),
Point(-m, 0, 0),
Point(-f, -s, 0),
Point(f, -s, 0),
]
self.new_x_axes = [
0, 45, 135, 180, 225, 315
]
class VirtualHexapod:
def __init__(self, a=0, b=0, c=0, f=0, m=0, s=0):
self.linkage_measurements = [a, b, c]
self.body_measurements = [f, m, s]
self.body = Hexagon(f, m, s)
self.store_neutral_legs(a, b, c)
def store_neutral_legs(self, a, b, c):
self.legs = []
for point, theta in zip(self.body.vertices, self.body.new_x_axes):
linkage = Linkage(a, b, c, new_x_axis=theta, new_origin=point)
self.legs.append(linkage)
class VirtualHexapodPlot:
def __init__(self, _hexapod):
self.hexapod = _hexapod
self.fig = go.Figure()
def _draw_lines(self, _name, _points, _size, _color, _is_name_visible=True):
self.fig.add_trace(go.Scatter3d(
name=_name,
x=[point.x for point in _points],
y=[point.y for point in _points],
z=[point.z for point in _points],
line={
'color': _color,
'width': _size
},
showlegend=_is_name_visible
))
def _draw_point(self, _name, _point, _size, _color):
self.fig.add_trace(go.Scatter3d(
name=_name,
x=[_point.x],
y=[_point.y],
z=[_point.z],
mode='markers',
marker={
'size': _size,
'color': _color,
'opacity': 1.0
}
))
def draw(self):
LINE_SIZE = 10
HEAD_SIZE = 15
COG_SIZE = 10
BODY_COLOR = '#8e44ad'
COG_COLOR = '#e74c3c'
LEG_COLOR = '#2c3e50'
# Add body outline
points = self.hexapod.body.vertices
self._draw_lines('body', points + [points[0]], LINE_SIZE, BODY_COLOR)
# Add head and center of gravity
self._draw_point('cog', self.hexapod.body.cog, COG_SIZE, COG_COLOR)
self._draw_point('head', self.hexapod.body.head, HEAD_SIZE, BODY_COLOR)
# Draw legs
for leg in self.hexapod.legs:
points = [leg.p0, leg.p1, leg.p2, leg.p3]
self._draw_lines('leg', points, LINE_SIZE, LEG_COLOR, False)
self.fig.show()
FRONT_LENGTH = 20
SIDE_LENGTH = 30
MID_LENGTH = 40
HIP_LENGTH = 25
KNEE_LENGTH = 40
ANKLE_LENGTH = 60
virtual_hexapod = VirtualHexapod(HIP_LENGTH, KNEE_LENGTH, ANKLE_LENGTH, FRONT_LENGTH, MID_LENGTH, SIDE_LENGTH)
VirtualHexapodPlot(virtual_hexapod).draw()
| StarcoderdataPython |
4993917 | from ui import Widget, Text, MuiFont, Display, MotionEvent
| StarcoderdataPython |
215044 | <reponame>EkremBayar/bayar
from typing import TypeVar, Union
import numpy as np
import numpy.typing as npt
T = TypeVar("T", bound=npt.NBitBase)
def add(a: np.floating[T], b: np.integer[T]) -> np.floating[T]:
return a + b
i8: np.int64
i4: np.int32
f8: np.float64
f4: np.float32
reveal_type(add(f8, i8)) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(add(f4, i8)) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(add(f8, i4)) # E: numpy.floating[numpy.typing._64Bit]
reveal_type(add(f4, i4)) # E: numpy.floating[numpy.typing._32Bit]
| StarcoderdataPython |
5153278 | <reponame>marvincosmo/Python-Curso-em-Video<filename>ex058 - Jogo da adivinhação v2.0.py
""" 58 - Melhore o jogo do desafio 028, onde o computador vai 'pensar' em um número entre 0 e 10. Só que agora o
jogador vai tentar adivinhar até acertar, mostrando, no final, quantos palpites foram necessários para vencer. """
'''
# Minha versão
from random import randint
print('\033[33m-=-\033[m' * 21)
print('\033[34mPensei em um número entre 0 e 10. Tente adivinhar...\033[m')
print('\033[33m-=-\033[m' * 21)
computador = randint(0, 10) # A jogada do computador
tentativas = 0
jogador = -1
while jogador != computador:
jogador = int(input('Em qual número eu pensei? ')) # A jogada do jogador
if jogador < computador:
print('\033[91mMais... Tente outra vez!\033[m')
elif jogador > computador:
print('\033[31mMenos... Tente outra vez!\033[m')
tentativas += 1
if jogador == computador:
print(f'\033[32mPARABÉNS! Você adivinhou em {tentativas} tentativas.\033[m')
'''
# Versão do professor
from random import randint
print('\033[33m-=-\033[m' * 21)
print('\033[34mPensei em um número entre 0 e 10. Tente adivinhar...\033[m')
print('\033[33m-=-\033[m' * 21)
computador = randint(0, 10) # A jogada do computador
acertou = False
palpites = 0
while not acertou:
jogador = int(input('Em qual número eu pensei? '))
palpites += 1
if jogador == computador:
acertou = True
else:
if jogador < computador:
print('\033[91mMais... Tente outra vez!\033[m')
elif jogador > computador:
print('\033[31mMenos... Tente outra vez!\033[m')
print(f'\033[32mPARABÉNS! Você adivinhou em {palpites} tentativas.\033[m')
| StarcoderdataPython |
6670796 | import logging
import numpy as np
import tqdm
from lwrl.utils.visualizer import Visualizer
import lwrl.utils.logging as L
class Runner:
def __init__(self, agent, env, test_env=None):
self.agent = agent
self.env = env
self.test_env = test_env if test_env is not None else self.env
def train(self,
max_timestep=50000000,
save_freq=100000,
test_freq=1000,
log_freq=1,
verbose=True):
vis = Visualizer()
logger = logging.getLogger(__name__)
logger.info(L.begin_section('Training'))
pbar = range(max_timestep)
if verbose:
pbar = tqdm.tqdm(pbar)
obs = self.env.reset()
episode_reward = 0
episode_rewards = []
episode_timesteps = []
acc_episode_rewards = []
acc_avg_episode_rewards = []
for t in pbar:
action = self.agent.act(obs)
# take action in the environment
obs, reward, done = self.env.step(action)
episode_reward += reward
# observe the effect
self.agent.observe(obs, action, reward, done, training=True)
if done:
self.env.reset()
self.agent.reset()
episode_rewards.append(episode_reward)
episode_timesteps.append(t)
acc_episode_rewards.append(episode_reward)
# log training status
total_episodes = len(episode_rewards)
if total_episodes < 100:
avg_r = np.mean(episode_rewards)
else:
avg_r = np.mean(episode_rewards[-101:-1])
acc_avg_episode_rewards.append(avg_r)
if total_episodes % log_freq == 0:
logger.info(
'Reporting @ episode {}'.format(total_episodes))
logger.info('Episode {}: total timestep:\t{}'.format(
total_episodes, t))
logger.info('Episode {}: episode score:\t{}'.format(
total_episodes, episode_reward))
logger.info('Episode {}: avg. eps. score:\t{}'.format(
total_episodes, avg_r))
vis.line(
'episode reward',
np.array(episode_timesteps),
np.array(acc_episode_rewards),
xlabel='Timestep',
append=True)
vis.line(
'average episode reward',
np.array(episode_timesteps),
np.array(acc_avg_episode_rewards),
xlabel='Timestep',
append=True)
episode_timesteps = []
acc_episode_rewards = []
acc_avg_episode_rewards = []
if verbose:
pbar.set_description(
'Train: episode: {}, global steps: {}, episode score: {}, avg score: {}'.
format(total_episodes, t, episode_reward, avg_r))
if total_episodes % test_freq == 0:
test_score = self.test()
vis.line(
'test score',
np.array([t]),
np.array([test_score]),
xlabel='Timestep',
append=True)
logger.info(
'Evaluating @ episode {}'.format(total_episodes))
logger.info('Episode {}: test score:\t{}'.format(
total_episodes, test_score))
episode_reward = 0
if t % save_freq == 0:
self.agent.model.save(t)
def test(self, num_episodes=10):
scores = []
for episode in range(num_episodes):
obs = self.test_env.reset()
done = False
acc_reward = 0
while not done:
action = self.agent.act(obs, random_action=False)
obs, reward, done = self.test_env.step(action)
self.agent.observe(obs, action, reward, done)
acc_reward += reward
if done:
scores.append(acc_reward)
self.test_env.reset()
self.agent.reset()
return sum(scores) / float(num_episodes)
| StarcoderdataPython |
346579 | """Optimize services and applications deployed on Kubernetes with Opsani.
"""
from __future__ import annotations, print_function
import abc
import asyncio
import collections
import contextlib
import copy
import datetime
import decimal
import enum
import functools
import itertools
import json
import operator
import os
import pathlib
import re
from typing import (
Any,
Callable,
ClassVar,
Collection,
Dict,
Generator,
Iterable,
List,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type,
Union,
cast,
get_type_hints,
runtime_checkable,
)
import backoff
import kubernetes_asyncio
import kubernetes_asyncio.client.models
import kubernetes_asyncio.client
import kubernetes_asyncio.client.exceptions
import kubernetes_asyncio.watch
import pydantic
import servo
class Condition(servo.logging.Mixin):
"""A Condition is a convenience wrapper around a function and its arguments
which allows the function to be called at a later time.
The function is called in the ``check`` method, which resolves the result to
a boolean value, thus the condition function should return a boolean or
something that ultimately resolves to a Truthy or Falsey value.
Args:
name: The name of the condition to make it easier to identify.
fn: The condition function that will be checked.
*args: Any arguments for the condition function.
**kwargs: Any keyword arguments for the condition function.
Attributes:
name (str): The name of the Condition.
fn (callable): The condition function that will be checked.
args (tuple): Arguments for the checking function.
kwargs (dict): Keyword arguments for the checking function.
last_check (bool): Holds the state of the last condition check.
Raises:
ValueError: The given ``fn`` is not callable.
"""
def __init__(self, name: str, fn: Callable, *args, **kwargs) -> None: # noqa: D107
if not callable(fn):
raise ValueError("The Condition function must be callable")
self.name = name
self.fn = fn
self.args = args
self.kwargs = kwargs
# last check holds the state of the last check.
self.last_check = False
def __str__(self) -> str:
return f"<Condition (name: {self.name}, met: {self.last_check})>"
def __repr__(self) -> str:
return self.__str__()
async def check(self) -> bool:
"""Check that the condition was met.
Returns:
True if the condition was met; False otherwise.
"""
if asyncio.iscoroutinefunction(self.fn):
self.last_check = bool(await self.fn(*self.args, **self.kwargs))
else:
self.last_check = bool(self.fn(*self.args, **self.kwargs))
return self.last_check
async def wait_for_condition(
condition: Condition,
interval: servo.DurationDescriptor = 0.05,
fail_on_api_error: bool = True,
) -> None:
"""Wait for a condition to be met.
Args:
condition: The Condition to wait for.
timeout: The maximum time to wait, in seconds, for the condition to be met.
If unspecified, this function will wait indefinitely. If specified and
the timeout is met or exceeded, a TimeoutError will be raised.
interval: The time, in seconds, to wait before re-checking the condition.
fail_on_api_error: Fail the condition checks if a Kubernetes API error is
incurred. An API error can be raised for a number of reasons, including
a Pod being restarted and temporarily unavailable. Disabling this will
cause those errors to be ignored, allowing the check to continue until
timeout or resolution. (default: True).
Raises:
TimeoutError: The specified timeout was exceeded.
"""
servo.logger.debug(f"waiting for condition: {condition}")
started_at = datetime.datetime.now()
duration = servo.Duration(interval)
async def _wait_for_condition() -> None:
servo.logger.debug(f"wait for condition: {condition}")
while True:
try:
servo.logger.trace(f"checking condition {condition}")
if await condition.check():
servo.logger.trace(f"condition passed: {condition}")
break
# if the condition is not met, sleep for the interval
# to re-check later
servo.logger.trace(f"sleeping for {duration}")
await asyncio.sleep(duration.total_seconds())
except asyncio.CancelledError:
servo.logger.trace(f"wait for condition cancelled: {condition}")
raise
except kubernetes_asyncio.client.exceptions.ApiException as e:
servo.logger.warning(f"encountered API exception while waiting: {e}")
if fail_on_api_error:
raise
task = asyncio.create_task(_wait_for_condition())
try:
await task
except asyncio.CancelledError:
task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await task
raise
finally:
servo.logger.debug(f"wait completed (total={servo.Duration.since(started_at)}) {condition}")
class Resource(str, enum.Enum):
memory = "memory"
cpu = "cpu"
@classmethod
def values(cls) -> List[str]:
"""
Return a list of strings that identifies all resource values.
"""
return list(map(lambda rsrc: rsrc.value, cls.__members__.values()))
class ResourceRequirement(enum.Enum):
"""
The ResourceRequirement enumeration determines how optimization values are submitted to the
Kubernetes scheduler to allocate core compute resources. Requests establish the lower bounds
of the CPU and memory necessary for an application to execute while Limits define the upper
bounds for resources that can be consumed by a given Pod. The Opsani engine can determine
optimal values for these settings by identifying performant, low cost configurations that meet
target SLOs and/or maximizing performance while identifying the point of diminishing returns
on further resourcing.
"""
request = 'request'
limit = 'limit'
@property
def resources_key(self) -> str:
"""
Return a string value for accessing resource requirements within a Kubernetes Container representation.
"""
if self == ResourceRequirement.request:
return "requests"
elif self == ResourceRequirement.limit:
return "limits"
else:
raise NotImplementedError(
f'missing resources_key implementation for resource requirement "{self}"'
)
@runtime_checkable
class KubernetesObj(Protocol):
"""
KubernetesObj is a protocol that defines the common attributes
of objects retrieved from the Kubernetes API.
"""
@property
def api_version(self) -> str:
...
@property
def kind(self) -> str:
...
@property
def metadata(self) -> kubernetes_asyncio.client.V1ObjectMeta:
...
class KubernetesModel(abc.ABC, servo.logging.Mixin):
"""
KubernetesModel is an abstract base class for Servo connector
models that wrap Kubernetes API objects.
This base class provides common functionality and common object
properties for all API wrappers. It also defines the following
abstract methods which all subclasses must implement:
- ``create``: create the resource on the cluster
- ``patch``: partially update the resource on the cluster
- ``delete``: remove the resource from the cluster
- ``refresh``: refresh the underlying object model
- ``is_ready``: check if the object is in the ready state
Args:
api_object: The underlying Kubernetes API object.
Attributes:
obj: The underlying Kubernetes API object.
"""
obj: KubernetesObj
"""The underlying Kubernetes API object. Subclasses must update
the type hint to reflect the type that they are wrapping.
"""
api_clients: ClassVar[Dict[str, Type]]
"""A mapping of all the supported api clients for the API
object type. Various resources can have multiple versions,
e.g. "apps/v1", "apps/v1beta1", etc. The preferred version
for each resource type should be defined under the "preferred"
key. The preferred API client will be used when the apiVersion
is not specified for the resource.
"""
def __init__(self, obj, **kwargs) -> None: # noqa: D107
self.obj = obj
self._logger = servo.logger
def __str__(self) -> str:
return str(self.obj)
def __repr__(self) -> str:
return self.__str__()
@classmethod
def obj_type(cls) -> Type:
"""The type of the underlying Kubernetes API object."""
return get_type_hints(cls)["obj"]
@property
def api_version(self) -> str:
"""The API version of the Kubernetes object (`obj.apiVersion``)."""
return self.obj.api_version
@property
def name(self) -> str:
"""The name of the Kubernetes object (``obj.metadata.name``)."""
return cast(str, self.obj.metadata.name)
@name.setter
def name(self, name: str):
"""Set the name of the Kubernetes object (``obj.metadata.name``)."""
self.obj.metadata.name = name
@property
def namespace(self) -> str:
"""The namespace of the Kubernetes object (``obj.metadata.namespace``)."""
return cast(str, self.obj.metadata.namespace)
@namespace.setter
def namespace(self, namespace: str):
"""Set the namespace of the Kubernetes object (``obj.metadata.namespace``)."""
self.obj.metadata.namespace = namespace
@contextlib.asynccontextmanager
async def api_client(self) -> Generator[Any, None, None]:
"""The API client for the Kubernetes object. This is determined
by the ``apiVersion`` of the object configuration.
Raises:
ValueError: The API version is not supported.
"""
c = self.api_clients.get(self.api_version)
# If we didn't find the client in the api_clients dict, use the
# preferred version.
if c is None:
self.logger.debug(
f"unknown API version ({self.api_version}) for {self.__class__.__name__}, falling back to preferred version"
)
c = self.api_clients.get("preferred")
if c is None:
raise ValueError(
"unknown version specified and no preferred version "
f"defined for resource ({self.api_version})"
)
# If we did find it, initialize that client version.
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
yield c(api)
@classmethod
@contextlib.asynccontextmanager
async def preferred_client(cls) -> Generator[Any, None, None]:
"""The preferred API client type for the Kubernetes object. This is defined in the
``api_clients`` class member dict for each object.
Raises:
ValueError: No preferred client is defined for the object.
"""
c = cls.api_clients.get("preferred")
if c is None:
raise ValueError(
f"no preferred api client defined for object {cls.__name__}",
)
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
yield c(api)
@abc.abstractclassmethod
async def read(cls, name: str, namespace: str) -> "KubernetesModel":
"""Read the underlying Kubernetes resource from the cluster and
return a model instance.
Args:
name: The name of the resource to read.
namespace: The namespace to read the resource from.
"""
@abc.abstractmethod
async def create(self, namespace: str = None) -> None:
"""Create the underlying Kubernetes resource in the cluster
under the given namespace.
Args:
namespace: The namespace to create the resource under.
If no namespace is provided, it will use the instance's
namespace member, which is set when the object is created
via the kubernetes_asyncio.client
"""
@abc.abstractmethod
async def patch(self) -> None:
"""Partially update the underlying Kubernetes resource in the cluster."""
@abc.abstractmethod
async def delete(self, options:kubernetes_asyncio.client.V1DeleteOptions) -> kubernetes_asyncio.client.V1Status:
"""Delete the underlying Kubernetes resource from the cluster.
This method expects the resource to have been loaded or otherwise
assigned a namespace already. If it has not, the namespace will need
to be set manually.
Args:
options: Options for resource deletion.
"""
@abc.abstractmethod
async def refresh(self) -> None:
"""Refresh the local state (``obj``) of the underlying Kubernetes resource."""
@abc.abstractmethod
async def is_ready(self) -> bool:
"""Check if the resource is in the ready state.
It is up to the wrapper subclass to define what "ready" means for
that particular resource.
Returns:
True if in the ready state; False otherwise.
"""
async def wait_until_ready(
self,
interval: servo.DurationDescriptor = 1,
fail_on_api_error: bool = False,
) -> None:
"""Wait until the resource is in the ready state.
Args:
timeout: The maximum time to wait, in seconds, for the resource
to reach the ready state. If unspecified, this will wait
indefinitely. If specified and the timeout is met or exceeded,
a TimeoutError will be raised.
interval: The time, in seconds, to wait before re-checking if the
object is ready.
fail_on_api_error: Fail if an API error is raised. An API error can
be raised for a number of reasons, such as 'resource not found',
which could be the case when a resource is just being started or
restarted. When waiting for readiness we generally do not want to
fail on these conditions.
Raises:
TimeoutError: The specified timeout was exceeded.
"""
ready_condition = Condition(
"api object ready",
self.is_ready,
)
task = asyncio.create_task(
wait_for_condition(
condition=ready_condition,
interval=interval,
fail_on_api_error=fail_on_api_error,
)
)
try:
await task
except asyncio.CancelledError:
task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await task
raise
async def wait_until_deleted(
self,
interval: servo.DurationDescriptor = 1
) -> None:
"""Wait until the resource is deleted from the cluster.
Args:
timeout: The maximum time to wait, in seconds, for the resource to
be deleted from the cluster. If unspecified, this will wait
indefinitely. If specified and the timeout is met or exceeded,
a TimeoutError will be raised.
interval: The time, in seconds, to wait before re-checking if the
object has been deleted.
Raises:
TimeoutError: The specified timeout was exceeded.
"""
async def deleted_fn():
try:
await self.refresh()
except kubernetes_asyncio.client.exceptions.ApiException as e:
# If we can no longer find the deployment, it is deleted.
# If we get any other exception, raise it.
if e.status == 404 and e.reason == "Not Found":
return True
else:
self.logger.error("error refreshing object state")
raise e
else:
# The object was still found, so it has not been deleted
return False
delete_condition = Condition("api object deleted", deleted_fn)
task = asyncio.create_task(
wait_for_condition(
condition=delete_condition,
interval=interval,
)
)
try:
await task
except asyncio.CancelledError:
task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await task
raise
async def raise_for_status(self) -> None:
"""Raise an exception if in an unhealthy state."""
self.logger.warning(f"raise_for_status not implemented on {self.__class__.__name__}")
class Namespace(KubernetesModel):
"""Kubetest wrapper around a Kubernetes `Namespace`_ API Object.
The actual ``kubernetes.client.V1Namespace`` instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `Namespace`_.
.. _Namespace:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#namespace-v1-core
"""
obj:kubernetes_asyncio.client.V1Namespace
api_clients: ClassVar[Dict[str, Type]] = {
"preferred":kubernetes_asyncio.client.CoreV1Api,
"v1":kubernetes_asyncio.client.CoreV1Api,
}
@classmethod
def new(cls, name: str) -> "Namespace":
"""Create a new Namespace with object backing.
Args:
name: The name of the new Namespace.
Returns:
A new Namespace instance.
"""
return cls(
obj=kubernetes_asyncio.client.V1Namespace(
api_version="v1", metadata=kubernetes_asyncio.client.V1ObjectMeta(name=name)
)
)
@classmethod
async def read(cls, name: str) -> "Namespace":
"""Read a Namespace from the Kubernetes API.
Args:
name: The name of the Namespace to read.
Returns:
A hydrated Namespace instance.
"""
namespace = cls.new(name)
await namespace.refresh()
return namespace
async def create(self, name: str = None) -> None:
"""Create the Namespace under the given name.
Args:
name: The name to create the Namespace under. If the
name is not provided, it will be assumed to already be
in the underlying object spec. If it is not, namespace
operations will fail.
"""
if name is not None:
self.name = name
self.logger.info(f'creating namespace "{self.name}"')
async with self.api_client() as api_client:
self.obj = await api_client.create_namespace(
body=self.obj,
)
async def patch(self) -> None:
"""
TODO: Add docs....
"""
async with self.api_client() as api_client:
await api_client.patch_namespace(
name=self.name,
body=self.obj,
)
async def delete(self, options:kubernetes_asyncio.client.V1DeleteOptions = None) -> kubernetes_asyncio.client.V1Status:
"""Delete the Namespace.
Args:
options: Options for Namespace deletion.
Returns:
The status of the delete operation.
"""
if options is None:
options =kubernetes_asyncio.client.V1DeleteOptions()
self.logger.info(f'deleting namespace "{self.name}"')
self.logger.debug(f"delete options: {options}")
async with self.api_client() as api_client:
return await api_client.delete_namespace(
name=self.name,
body=options,
)
async def refresh(self) -> None:
"""Refresh the underlying Kubernetes Namespace resource."""
async with self.api_client() as api_client:
self.obj = await api_client.read_namespace(
name=self.name,
)
async def is_ready(self) -> bool:
"""Check if the Namespace is in the ready state.
Returns:
True if in the ready state; False otherwise.
"""
await self.refresh()
status = self.obj.status
if status is None:
return False
return status.phase.lower() == "active"
_DEFAULT_SENTINEL = object()
class Container(servo.logging.Mixin):
"""Kubetest wrapper around a Kubernetes `Container`_ API Object.
The actual ``kubernetes.client.V1Container`` instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `Container`_.
This wrapper does **NOT** subclass the ``objects.ApiObject`` like other
object wrappers because it is not intended to be created or
managed from manifest file. It is merely meant to wrap the
Container spec for a Pod to make Container-targeted actions
easier.
.. _Container:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core
"""
def __init__(self, api_object, pod) -> None: # noqa: D107
self.obj = api_object
self.pod = pod
@property
def name(self) -> str:
return self.obj.name
@property
def image(self) -> str:
"""
Returns the container image name from the underlying container object.
"""
return self.obj.image
async def get_restart_count(self) -> int:
"""Get the number of times the Container has been restarted.
Returns:
The number of times the Container has been restarted.
"""
container_name = self.obj.name
pod_status = await self.pod.get_status()
# If there are no container status, the container hasn't started
# yet, so there cannot be any restarts.
if pod_status.container_statuses is None:
return 0
for status in pod_status.container_statuses:
if status.name == container_name:
return status.restart_count
raise RuntimeError(f"Unable to determine container status for {container_name}")
@property
def resources(self) -> kubernetes_asyncio.client.V1ResourceRequirements:
"""
Return the resource requirements for the Container.
Returns:
The Container resource requirements.
"""
return self.obj.resources
@resources.setter
def resources(self, resources: kubernetes_asyncio.client.V1ResourceRequirements) -> None:
"""
Set the resource requirements for the Container.
Args:
resources: The resource requirements to set.
"""
self.obj.resources = resources
def get_resource_requirements(self, name: str) -> Dict[ResourceRequirement, Optional[str]]:
"""Return a dictionary mapping resource requirements to values for a given resource (e.g., cpu or memory).
This method is safe to call for containers that do not define any resource requirements (e.g., the `resources` property is None).
Requirements that are not defined for the named resource are returned as None. For example, a container
that defines CPU requests but does not define limits would return a dict with a `None` value for
the `ResourceRequirement.limit` key.
Args:
name: The name of the resource to set the requirements of (e.g., "cpu" or "memory").
Returns:
A dictionary mapping ResourceRequirement enum members to optional string values.
"""
resources: kubernetes_asyncio.client.V1ResourceRequirements = getattr(self, 'resources', kubernetes_asyncio.client.V1ResourceRequirements())
requirements = {}
for requirement in ResourceRequirement:
# Get the 'requests' or 'limits' nested structure
requirement_subdict = getattr(resources, requirement.resources_key, {})
if requirement_subdict:
requirements[requirement] = requirement_subdict.get(name)
else:
requirements[requirement] = None
return requirements
def set_resource_requirements(self, name: str, requirements: Dict[ResourceRequirement, Optional[str]]) -> None:
"""Sets resource requirements on the container for the values in the given dictionary.
If no resources have been defined yet, a resources model is provisioned.
If no requirements have been defined for the given resource name, a requirements dictionary is defined.
Values of None are removed from the target requirements.
ResourceRequirement keys that are not present in the dict are not modified.
Args:
name: The name of the resource to set the requirements of (e.g., "cpu" or "memory").
requirements: A dict mapping requirements to target values (e.g., `{ResourceRequirement.request: '500m', ResourceRequirement.limit: '2000m'})
"""
resources: kubernetes_asyncio.client.V1ResourceRequirements = copy.copy(
getattr(self, 'resources', kubernetes_asyncio.client.V1ResourceRequirements())
)
for requirement, value in requirements.items():
resource_to_values = getattr(resources, requirement.resources_key, {})
if not resource_to_values:
resource_to_values = {}
if value is not None:
# NOTE: Coerce to string as values are headed into Kubernetes resource model
resource_to_values[name] = str(value)
else:
resource_to_values.pop(name, None)
setattr(resources, requirement.resources_key, resource_to_values)
self.resources = resources
@property
def ports(self) -> List[kubernetes_asyncio.client.V1ContainerPort]:
"""
Return the ports for the Container.
Returns:
The Container ports.
"""
return self.obj.ports or []
def __str__(self) -> str:
return str(self.obj)
def __repr__(self) -> str:
return self.__str__()
class Pod(KubernetesModel):
"""Wrapper around a Kubernetes `Pod`_ API Object.
The actual ``kubernetes.client.V1Pod`` instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `Pod`_.
.. _Pod:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#pod-v1-core
"""
obj:kubernetes_asyncio.client.V1Pod
api_clients: ClassVar[Dict[str, Type]] = {
"preferred":kubernetes_asyncio.client.CoreV1Api,
"v1":kubernetes_asyncio.client.CoreV1Api,
}
@classmethod
async def read(cls, name: str, namespace: str) -> "Pod":
"""Read the Pod from the cluster under the given namespace.
Args:
name: The name of the Pod to read.
namespace: The namespace to read the Pod from.
"""
servo.logger.debug(f'reading pod "{name}" in namespace "{namespace}"')
async with cls.preferred_client() as api_client:
obj = await api_client.read_namespaced_pod_status(name, namespace)
return Pod(obj)
async def create(self, namespace: str = None) -> None:
"""Create the Pod under the given namespace.
Args:
namespace: The namespace to create the Pod under.
If the Pod was loaded via the kubetest client, the
namespace will already be set, so it is not needed
here. Otherwise, the namespace will need to be provided.
"""
if namespace is None:
namespace = self.namespace
self.logger.info(f'creating pod "{self.name}" in namespace "{namespace}"')
async with self.preferred_client() as api_client:
self.obj = await api_client.create_namespaced_pod(
namespace=namespace,
body=self.obj,
)
async def patch(self) -> None:
"""
Patches a Pod, applying spec changes to the cluster.
"""
self.logger.info(f'patching pod "{self.name}"')
async with self.api_client() as api_client:
api_client.api_client.set_default_header('content-type', 'application/strategic-merge-patch+json')
await api_client.patch_namespaced_pod(
name=self.name,
namespace=self.namespace,
body=self.obj,
)
async def delete(self, options:kubernetes_asyncio.client.V1DeleteOptions = None) ->kubernetes_asyncio.client.V1Status:
"""Delete the Pod.
This method expects the Pod to have been loaded or otherwise
assigned a namespace already. If it has not, the namespace will
need to be set manually.
Args:
options: Options for Pod deletion.
Return:
The status of the delete operation.
"""
if options is None:
options =kubernetes_asyncio.client.V1DeleteOptions()
self.logger.info(f'deleting pod "{self.name}"')
self.logger.trace(f"delete options: {options}")
async with self.api_client() as api_client:
return await api_client.delete_namespaced_pod(
name=self.name,
namespace=self.namespace,
body=options,
)
async def refresh(self) -> None:
"""Refresh the underlying Kubernetes Pod resource."""
async with self.api_client() as api_client:
self.obj = await api_client.read_namespaced_pod_status(
name=self.name,
namespace=self.namespace,
)
async def is_ready(self) -> bool:
"""Check if the Pod is in the ready state.
Returns:
True if in the ready state; False otherwise.
"""
self.logger.trace("refreshing pod status to check is_ready")
await self.refresh()
# if there is no status, the pod is definitely not ready
status = self.obj.status
self.logger.trace(f"current pod status is {status}")
if status is None:
return False
# check the pod phase to make sure it is running. a pod in
# the 'failed' or 'success' state will no longer be running,
# so we only care if the pod is in the 'running' state.
status.phase
self.logger.trace(f"current pod phase is {status}")
if not status.conditions:
return False
self.logger.trace(f"checking status conditions {status.conditions}")
for cond in status.conditions:
if cond.reason == "Unschedulable":
return False
# we only care about the condition type 'ready'
if cond.type.lower() != "ready":
continue
# check that the readiness condition is True
return cond.status.lower() == "true"
# Catchall
self.logger.trace(f"unable to find ready=true, continuing to wait...")
return False
async def raise_for_status(self, adjustments: List[servo.Adjustment]) -> None:
"""Raise an exception if the Pod status is not not ready."""
# NOTE: operate off of current state, assuming you have checked is_ready()
status = self.obj.status
self.logger.trace(f"current pod status is {status}")
if status is None:
raise RuntimeError(f'No such pod: {self.name}')
# check the pod phase to make sure it is running. a pod in
# the 'failed' or 'success' state will no longer be running,
# so we only care if the pod is in the 'running' state.
# phase = status.phase
if not status.conditions:
raise RuntimeError(f'Pod is not running: {self.name}')
self.logger.trace(f"checking container statuses: {status.container_statuses}")
if status.container_statuses:
for cont_stat in status.container_statuses:
if cont_stat.state and cont_stat.state.waiting and cont_stat.state.waiting.reason in ["ImagePullBackOff", "ErrImagePull"]:
raise servo.AdjustmentFailedError("Container image pull failure detected", reason="image-pull-failed")
restarted_container_statuses = list(filter(lambda cont_stat: cont_stat.restart_count > 0, (status.container_statuses or [])))
if restarted_container_statuses:
container_messages = list(map(lambda cont_stat: f"{cont_stat.name} x{cont_stat.restart_count}", restarted_container_statuses))
raise servo.AdjustmentRejectedError(
f"Tuning optimization {self.name} crash restart detected on container(s): {', '.join(container_messages)}",
reason="unstable"
)
self.logger.trace(f"checking status conditions {status.conditions}")
for cond in status.conditions:
if cond.reason == "Unschedulable":
# FIXME: The servo rejected error should be raised further out. This should be a generic scheduling error
unschedulable_adjustments = list(filter(lambda a: a.setting_name in cond.message, adjustments))
raise servo.AdjustmentRejectedError(
f"Requested adjustment(s) ({', '.join(map(str, unschedulable_adjustments))}) cannot be scheduled due to \"{cond.message}\"",
reason="unschedulable"
)
if cond.type == "Ready" and cond.status == "False":
raise servo.AdjustmentRejectedError(f"(reason {cond.reason}) {cond.message}", reason="start-failed")
# we only care about the condition type 'ready'
if cond.type.lower() != "ready":
continue
# check that the readiness condition is True
if cond.status.lower() == "true":
return
# Catchall
self.logger.trace(f"unable to find ready=true, continuing to wait...")
raise RuntimeError(f"Unknown Pod status for '{self.name}': {status}")
async def get_status(self) ->kubernetes_asyncio.client.V1PodStatus:
"""Get the status of the Pod.
Returns:
The status of the Pod.
"""
# first, refresh the pod state to ensure latest status
await self.refresh()
# return the status of the pod
return cast(kubernetes_asyncio.client.V1PodStatus, self.obj.status)
@property
def containers(self) -> List[Container]:
"""
Return a list of Container objects from the underlying pod template spec.
"""
return list(map(lambda c: Container(c, self), self.obj.spec.containers))
async def get_containers(self) -> List[Container]:
"""Get the Pod's containers.
Returns:
A list of containers that belong to the Pod.
"""
self.logger.debug(f'getting containers for pod "{self.name}"')
await self.refresh()
return self.containers
def get_container(self, name: str) -> Union[Container, None]:
"""Get a container in the Pod by name.
Args:
name (str): The name of the Container.
Returns:
Container: The Pod's Container with the matching name. If
no container with the given name is found, ``None`` is returned.
"""
return next(filter(lambda c: c.name == name, self.containers), None)
async def get_restart_count(self) -> int:
"""Get the total number of Container restarts for the Pod.
Returns:
The total number of Container restarts.
"""
status = await self.get_status()
if status.container_statuses is None:
return 0
total = 0
for container_status in status.container_statuses:
total += container_status.restart_count
return total
async def containers_started(self) -> bool:
"""Check if the Pod's Containers have all started.
Returns:
True if all Containers have started; False otherwise.
"""
# start the flag as true - we will check the state and set
# this to False if any container is not yet running.
containers_started = True
status = await self.get_status()
if status.container_statuses is not None:
for container_status in status.container_statuses:
if container_status.state is not None:
if container_status.state.running is not None:
if container_status.state.running.started_at is not None:
# The container is started, so move on to check the
# next container
continue
# If we get here, then the container has not started.
containers_started = containers_started and False
break
return containers_started
def uid(self) -> str:
"""
Gets the UID for the Pod.
UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids # noqa: E501
"""
return self.obj.metadata.uid
class Service(KubernetesModel):
"""Kubetest wrapper around a Kubernetes `Service`_ API Object.
The actual ``kubernetes.client.V1Service`` instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `Service`_.
.. _Service:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#service-v1-core
"""
obj: kubernetes_asyncio.client.V1Service
api_clients: ClassVar[Dict[str, Type]] = {
'preferred': kubernetes_asyncio.client.CoreV1Api,
'v1': kubernetes_asyncio.client.CoreV1Api,
}
@classmethod
async def read(cls, name: str, namespace: str) -> "Service":
"""Read the Service from the cluster under the given namespace.
Args:
name: The name of the Service to read.
namespace: The namespace to read the Service from.
"""
servo.logger.trace(f'reading service "{name}" in namespace "{namespace}"')
async with cls.preferred_client() as api_client:
obj = await api_client.read_namespaced_service(name, namespace)
servo.logger.trace("service: ", obj)
return Service(obj)
async def create(self, namespace: str = None) -> None:
"""Creates the Service under the given namespace.
Args:
namespace: The namespace to create the Service under.
If the Service was loaded via the kubetest client, the
namespace will already be set, so it is not needed here.
Otherwise, the namespace will need to be provided.
"""
if namespace is None:
namespace = self.namespace
self.logger.info(f'creating service "{self.name}" in namespace "{self.namespace}"')
async with self.api_client() as api_client:
self.obj = await api_client.create_namespaced_service(
namespace=namespace,
body=self.obj,
)
async def patch(self) -> None:
"""
TODO: Add docs....
"""
async with self.api_client() as api_client:
api_client.api_client.set_default_header('content-type', 'application/strategic-merge-patch+json')
await api_client.patch_namespaced_service(
name=self.name,
namespace=self.namespace,
body=self.obj,
)
async def delete(self, options: kubernetes_asyncio.client.V1DeleteOptions = None) -> kubernetes_asyncio.client.V1Status:
"""Deletes the Service.
This method expects the Service to have been loaded or otherwise
assigned a namespace already. If it has not, the namespace will need
to be set manually.
Args:
options: Options for Service deletion.
Returns:
The status of the delete operation.
"""
if options is None:
options = kubernetes_asyncio.client.V1DeleteOptions()
self.logger.info(f'deleting service "{self.name}"')
self.logger.debug(f'delete options: {options}')
async with self.api_client() as api_client:
return await api_client.delete_namespaced_service(
name=self.name,
namespace=self.namespace,
body=options,
)
async def refresh(self) -> None:
"""Refresh the underlying Kubernetes Service resource."""
async with self.api_client() as api_client:
self.obj = await api_client.read_namespaced_service(
name=self.name,
namespace=self.namespace,
)
async def is_ready(self) -> bool:
"""Check if the Service is in the ready state.
The readiness state is not clearly available from the Service
status, so to see whether or not the Service is ready this
will check whether the endpoints of the Service are ready.
This comes with the caveat that in order for a Service to
have endpoints, there needs to be some backend hooked up to it.
If there is no backend, the Service will never have endpoints,
so this will never resolve to True.
Returns:
True if in the ready state; False otherwise.
"""
await self.refresh()
# check the status. if there is no status, the service is
# definitely not ready.
if self.obj.status is None:
return False
endpoints = await self.get_endpoints()
# if the Service has no endpoints, its not ready.
if len(endpoints) == 0:
return False
# get the service endpoints and check that they are all ready.
for endpoint in endpoints:
# if we have an endpoint, but there are no subsets, we
# consider the endpoint to be not ready.
if endpoint.subsets is None:
return False
for subset in endpoint.subsets:
# if the endpoint has no addresses setup yet, its not ready
if subset.addresses is None or len(subset.addresses) == 0:
return False
# if there are still addresses that are not ready, the
# service is not ready
not_ready = subset.not_ready_addresses
if not_ready is not None and len(not_ready) > 0:
return False
# if we got here, then all endpoints are ready, so the service
# must also be ready
return True
@property
def status(self) -> kubernetes_asyncio.client.V1ServiceStatus:
return self.obj.status
async def get_status(self) -> kubernetes_asyncio.client.V1ServiceStatus:
"""Get the status of the Service.
Returns:
The status of the Service.
"""
self.logger.info(f'checking status of service "{self.name}"')
# first, refresh the service state to ensure the latest status
await self.refresh()
# return the status from the service
return self.obj.status
@property
def ports(self) -> List[kubernetes_asyncio.client.V1ServicePort]:
"""Return the list of ports exposed by the service."""
return self.obj.spec.ports
def find_port(self, selector: Union[str, int]) -> Optional[kubernetes_asyncio.client.V1ServicePort]:
for port in self.ports:
if isinstance(selector, str):
if port.name == selector:
return port
elif isinstance(selector, int):
if port.port == selector:
return port
else:
raise TypeError(f"Unknown port selector type '{selector.__class__.__name__}': {selector}")
return None
async def get_endpoints(self) -> List[kubernetes_asyncio.client.V1Endpoints]:
"""Get the endpoints for the Service.
This can be useful for checking internal IP addresses used
in containers, e.g. for container auto-discovery.
Returns:
A list of endpoints associated with the Service.
"""
self.logger.info(f'getting endpoints for service "{self.name}"')
async with self.api_client() as api_client:
endpoints = await api_client.list_namespaced_endpoints(
namespace=self.namespace,
)
svc_endpoints = []
for endpoint in endpoints.items:
# filter to include only the endpoints with the same
# name as the service.
if endpoint.metadata.name == self.name:
svc_endpoints.append(endpoint)
self.logger.debug(f'endpoints: {svc_endpoints}')
return svc_endpoints
async def _proxy_http_request(self, method, path, **kwargs) -> tuple:
"""Template request to proxy of a Service.
Args:
method: The http request method e.g. 'GET', 'POST' etc.
path: The URI path for the request.
kwargs: Keyword arguments for the proxy_http_get function.
Returns:
The response data
"""
path_params = {
"name": f'{self.name}:{self.obj.spec.ports[0].port}',
"namespace": self.namespace,
"path": path
}
return await kubernetes_asyncio.client.CoreV1Api().api_client.call_api(
'/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}',
method,
path_params=path_params,
**kwargs
)
async def proxy_http_get(self, path: str, **kwargs) -> tuple:
"""Issue a GET request to proxy of a Service.
Args:
path: The URI path for the request.
kwargs: Keyword arguments for the proxy_http_get function.
Returns:
The response data
"""
return await self._proxy_http_request('GET', path, **kwargs)
async def proxy_http_post(self, path: str, **kwargs) -> tuple:
"""Issue a POST request to proxy of a Service.
Args:
path: The URI path for the request.
kwargs: Keyword arguments for the proxy_http_post function.
Returns:
The response data
"""
return await self._proxy_http_request('POST', path, **kwargs)
@property
def selector(self) -> Dict[str, str]:
return self.obj.spec.selector
async def get_pods(self) -> List[Pod]:
"""Get the pods that the Service is routing traffic to.
Returns:
A list of pods that the service is routing traffic to.
"""
self.logger.debug(f'getting pods for service "{self.name}"')
async with Pod.preferred_client() as api_client:
self.obj.spec.selector.match_labels
pod_list:kubernetes_asyncio.client.V1PodList = await api_client.list_namespaced_pod(
namespace=self.namespace, label_selector=selector_string(self.selector)
)
pods = [Pod(p) for p in pod_list.items]
return pods
class WatchTimeoutError(Exception):
"""The kubernetes watch timeout has elapsed. The api client raises no error
on timeout expiration so this should be raised in fall-through logic.
"""
class Deployment(KubernetesModel):
"""Kubetest wrapper around a Kubernetes `Deployment`_ API Object.
The actual ``kubernetes.client.V1Deployment`` instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `Deployment`_.
.. _Deployment:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#deployment-v1-apps
"""
obj:kubernetes_asyncio.client.V1Deployment
api_clients: ClassVar[Dict[str, Type]] = {
"preferred":kubernetes_asyncio.client.AppsV1Api,
"apps/v1":kubernetes_asyncio.client.AppsV1Api,
"apps/v1beta1":kubernetes_asyncio.client.AppsV1beta1Api,
"apps/v1beta2":kubernetes_asyncio.client.AppsV1beta2Api,
}
async def create(self, namespace: str = None) -> None:
"""Create the Deployment under the given namespace.
Args:
namespace: The namespace to create the Deployment under.
If the Deployment was loaded via the kubetest client, the
namespace will already be set, so it is not needed here.
Otherwise, the namespace will need to be provided.
"""
if namespace is None:
namespace = self.namespace
self.logger.info(
f'creating deployment "{self.name}" in namespace "{self.namespace}"'
)
async with self.api_client() as api_client:
self.obj = await api_client.create_namespaced_deployment(
namespace=namespace,
body=self.obj,
)
@classmethod
async def read(cls, name: str, namespace: str) -> "Deployment":
"""Read a Deployment by name under the given namespace.
Args:
name: The name of the Deployment to read.
namespace: The namespace to read the Deployment from.
"""
async with cls.preferred_client() as api_client:
obj = await api_client.read_namespaced_deployment(name, namespace)
return Deployment(obj)
async def patch(self) -> None:
"""Update the changed attributes of the Deployment."""
async with self.api_client() as api_client:
api_client.api_client.set_default_header('content-type', 'application/strategic-merge-patch+json')
self.obj = await api_client.patch_namespaced_deployment(
name=self.name,
namespace=self.namespace,
body=self.obj
)
async def replace(self) -> None:
"""Update the changed attributes of the Deployment."""
async with self.api_client() as api_client:
self.obj = await api_client.replace_namespaced_deployment(
name=self.name,
namespace=self.namespace,
body=self.obj
)
async def delete(self, options:kubernetes_asyncio.client.V1DeleteOptions = None) ->kubernetes_asyncio.client.V1Status:
"""Delete the Deployment.
This method expects the Deployment to have been loaded or otherwise
assigned a namespace already. If it has not, the namespace will need
to be set manually.
Args:
options: Options for Deployment deletion.
Returns:
The status of the delete operation.
"""
if options is None:
options =kubernetes_asyncio.client.V1DeleteOptions()
self.logger.info(f'deleting deployment "{self.name}"')
self.logger.debug(f"delete options: {options}")
async with self.api_client() as api_client:
return await api_client.delete_namespaced_deployment(
name=self.name,
namespace=self.namespace,
body=options,
)
async def scale_to_zero(self) -> None:
"""This is used as a "soft" 'delete'/'destroy'.
Since the Deployment object is used as a wrapper around an existing k8s object that we did not create,
it shouldn't be destroyed. Instead, the deployments pods are destroyed by scaling it to 0 replicas.
"""
await self.refresh()
self.replicas = 0
await self.patch()
async def refresh(self) -> None:
"""Refresh the underlying Kubernetes Deployment resource."""
async with self.api_client() as api_client:
self.obj = await api_client.read_namespaced_deployment_status(
name=self.name,
namespace=self.namespace,
)
async def rollback(self) -> None:
"""Roll back an unstable Deployment revision to a previous version."""
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
api_client =kubernetes_asyncio.client.ExtensionsV1beta1Api(api)
self.obj = await api_client.create_namespaced_deployment_rollback(
name=self.name,
namespace=self.namespace,
body=self.obj,
)
async def get_status(self) ->kubernetes_asyncio.client.V1DeploymentStatus:
"""Get the status of the Deployment.
Returns:
The status of the Deployment.
"""
self.logger.info(f'checking status of deployment "{self.name}"')
# first, refresh the deployment state to ensure the latest status
await self.refresh()
# return the status from the deployment
return cast(kubernetes_asyncio.client.V1DeploymentStatus, self.obj.status)
async def get_pods(self) -> List[Pod]:
"""Get the pods for the Deployment.
Returns:
A list of pods that belong to the deployment.
"""
self.logger.debug(f'getting pods for deployment "{self.name}"')
async with Pod.preferred_client() as api_client:
label_selector = self.match_labels
pod_list:kubernetes_asyncio.client.V1PodList = await api_client.list_namespaced_pod(
namespace=self.namespace, label_selector=selector_string(label_selector)
)
pods = [Pod(p) for p in pod_list.items]
return pods
async def get_latest_pods(self) -> List[Pod]:
"""Get only the Deployment pods that belong to the latest ResourceVersion.
Returns:
A list of pods that belong to the latest deployment replicaset.
"""
self.logger.trace(f'getting replicaset for deployment "{self.name}"')
async with self.api_client() as api_client:
label_selector = self.obj.spec.selector.match_labels
rs_list:kubernetes_asyncio.client.V1ReplicasetList = await api_client.list_namespaced_replica_set(
namespace=self.namespace, label_selector=selector_string(label_selector)
)
# Verify all returned RS have this deployment as an owner
rs_list = [
rs for rs in rs_list.items if rs.metadata.owner_references and any(
ownRef.kind == "Deployment" and ownRef.uid == self.obj.metadata.uid
for ownRef in rs.metadata.owner_references
)
]
if not rs_list:
raise servo.ConnectorError(f'Unable to locate replicaset(s) for deployment "{self.name}"')
if missing_revision_rsets := list(filter(lambda rs: 'deployment.kubernetes.io/revision' not in rs.metadata.annotations, rs_list)):
raise servo.ConnectorError(
f'Unable to determine latest replicaset for deployment "{self.name}" due to missing revision annotation in replicaset(s)'
f' "{", ".join(list(map(lambda rs: rs.metadata.name, missing_revision_rsets)))}"'
)
latest_rs = sorted(rs_list, key= lambda rs: int(rs.metadata.annotations['deployment.kubernetes.io/revision']), reverse=True)[0]
return [
pod for pod in await self.get_pods()
if any(
ownRef.kind == "ReplicaSet" and ownRef.uid == latest_rs.metadata.uid
for ownRef in pod.obj.metadata.owner_references
)]
@property
def status(self) ->kubernetes_asyncio.client.V1DeploymentStatus:
"""Return the status of the Deployment.
Returns:
The status of the Deployment.
"""
return cast(kubernetes_asyncio.client.V1DeploymentStatus, self.obj.status)
@property
def resource_version(self) -> str:
"""
Returns the resource version of the Deployment.
"""
return self.obj.metadata.resource_version
@property
def observed_generation(self) -> str:
"""
Returns the observed generation of the Deployment status.
The generation is observed by the deployment controller.
"""
return self.obj.status.observed_generation
async def is_ready(self) -> bool:
"""Check if the Deployment is in the ready state.
Returns:
True if in the ready state; False otherwise.
"""
await self.refresh()
# if there is no status, the deployment is definitely not ready
status = self.obj.status
if status is None:
return False
# check the status for the number of total replicas and compare
# it to the number of ready replicas. if the numbers are
# equal, the deployment is ready; otherwise it is not ready.
total = status.replicas
ready = status.ready_replicas
if total is None:
return False
return total == ready
@property
def containers(self) -> List[Container]:
"""
Return a list of Container objects from the underlying pod template spec.
"""
return list(
map(lambda c: Container(c, None), self.obj.spec.template.spec.containers)
)
def find_container(self, name: str) -> Optional[Container]:
"""
Return the container with the given name.
"""
return next(filter(lambda c: c.name == name, self.containers), None)
async def get_target_container(self, config: ContainerConfiguration) -> Optional[Container]:
"""Return the container targeted by the supplied configuration"""
return self.find_container(config.name)
def set_container(self, name: str, container: Container) -> None:
"""Set the container with the given name to a new value."""
index = next(filter(lambda i: self.containers[i].name == name, range(len(self.containers))))
self.containers[index] = container
self.obj.spec.template.spec.containers[index] = container.obj
def remove_container(self, name: str) -> Optional[Container]:
"""Set the container with the given name to a new value."""
index = next(filter(lambda i: self.containers[i].name == name, range(len(self.containers))), None)
if index is not None:
return Container(
self.obj.spec.template.spec.containers.pop(index),
None
)
return None
@property
def replicas(self) -> int:
"""
Return the number of desired pods.
"""
return self.obj.spec.replicas
@replicas.setter
def replicas(self, replicas: int) -> None:
"""
Set the number of desired pods.
"""
self.obj.spec.replicas = replicas
@property
def match_labels(self) -> Dict[str, str]:
"""Return the matchLabels dict of the selector field"""
return self.obj.spec.selector.match_labels
@property
def label_selector(self) -> str:
"""
Return a string for matching the Deployment in Kubernetes API calls.
"""
return selector_string(self.obj.metadata.labels)
# TODO: I need to model these two and add label/annotation helpers
@property
def pod_template_spec(self) -> kubernetes_asyncio.client.models.V1PodTemplateSpec:
"""Return the pod template spec for instances of the Deployment."""
return self.obj.spec.template
async def get_pod_template_spec_copy(self) -> kubernetes_asyncio.client.models.V1PodTemplateSpec:
"""Return a deep copy of the pod template spec. Eg. for creation of a tuning pod"""
return copy.deepcopy(self.pod_template_spec)
def update_pod(self, pod: kubernetes_asyncio.client.models.V1Pod) -> kubernetes_asyncio.client.models.V1Pod:
"""Update the pod with the latest state of the controller if needed"""
# NOTE: Deployment currently needs no updating
return pod
@property
def pod_spec(self) -> kubernetes_asyncio.client.models.V1PodSpec:
"""Return the pod spec for instances of the Deployment."""
return self.pod_template_spec.spec
@backoff.on_exception(backoff.expo, kubernetes_asyncio.client.exceptions.ApiException, max_tries=3)
async def inject_sidecar(
self,
name: str,
image: str,
*,
service: Optional[str] = None,
port: Optional[int] = None,
index: Optional[int] = None,
service_port: int = 9980
) -> None:
"""
Injects an Envoy sidecar into a target Deployment that proxies a service
or literal TCP port, generating scrapeable metrics usable for optimization.
The service or port argument must be provided to define how traffic is proxied
between the Envoy sidecar and the container responsible for fulfilling the request.
Args:
name: The name of the sidecar to inject.
image: The container image for the sidecar container.
deployment: Name of the target Deployment to inject the sidecar into.
service: Name of the service to proxy. Envoy will accept ingress traffic
on the service port and reverse proxy requests back to the original
target container.
port: The name or number of a port within the Deployment to wrap the proxy around.
index: The index at which to insert the sidecar container. When `None`, the sidecar is appended.
service_port: The port to receive ingress traffic from an upstream service.
"""
await self.refresh()
if not (service or port):
raise ValueError(f"a service or port must be given")
if isinstance(port, str) and port.isdigit():
port = int(port)
# check for a port conflict
container_ports = list(itertools.chain(*map(operator.attrgetter("ports"), self.containers)))
if service_port in list(map(operator.attrgetter("container_port"), container_ports)):
raise ValueError(f"Port conflict: Deployment '{self.name}' already exposes port {service_port} through an existing container")
# lookup the port on the target service
if service:
try:
service_obj = await Service.read(service, self.namespace)
except kubernetes_asyncio.client.exceptions.ApiException as error:
if error.status == 404:
raise ValueError(f"Unknown Service '{service}'") from error
else:
raise error
if not port:
port_count = len(service_obj.obj.spec.ports)
if port_count == 0:
raise ValueError(f"Target Service '{service}' does not expose any ports")
elif port_count > 1:
raise ValueError(f"Target Service '{service}' exposes multiple ports -- target port must be specified")
port_obj = service_obj.obj.spec.ports[0]
else:
if isinstance(port, int):
port_obj = next(filter(lambda p: p.port == port, service_obj.obj.spec.ports), None)
elif isinstance(port, str):
port_obj = next(filter(lambda p: p.name == port, service_obj.obj.spec.ports), None)
else:
raise TypeError(f"Unable to resolve port value of type {port.__class__} (port={port})")
if not port_obj:
raise ValueError(f"Port '{port}' does not exist in the Service '{service}'")
# resolve symbolic name in the service target port to a concrete container port
if isinstance(port_obj.target_port, str):
container_port_obj = next(filter(lambda p: p.name == port_obj.target_port, container_ports), None)
if not container_port_obj:
raise ValueError(f"Port '{port_obj.target_port}' could not be resolved to a destination container port")
container_port = container_port_obj.container_port
else:
container_port = port_obj.target_port
else:
# find the container port
container_port_obj = next(filter(lambda p: p.container_port == port, container_ports), None)
if not container_port_obj:
raise ValueError(f"Port '{port}' could not be resolved to a destination container port")
container_port = container_port_obj.container_port
# build the sidecar container
container = kubernetes_asyncio.client.V1Container(
name=name,
image=image,
image_pull_policy="IfNotPresent",
resources=kubernetes_asyncio.client.V1ResourceRequirements(
requests={
"cpu": "125m",
"memory": "128Mi"
},
limits={
"cpu": "250m",
"memory": "256Mi"
}
),
env=[
kubernetes_asyncio.client.V1EnvVar(name="OPSANI_ENVOY_PROXY_SERVICE_PORT", value=str(service_port)),
kubernetes_asyncio.client.V1EnvVar(name="OPSANI_ENVOY_PROXIED_CONTAINER_PORT", value=str(container_port)),
kubernetes_asyncio.client.V1EnvVar(name="OPSANI_ENVOY_PROXY_METRICS_PORT", value="9901")
],
ports=[
kubernetes_asyncio.client.V1ContainerPort(name="opsani-proxy", container_port=service_port),
kubernetes_asyncio.client.V1ContainerPort(name="opsani-metrics", container_port=9901),
]
)
# add the sidecar to the Deployment
if index is None:
self.obj.spec.template.spec.containers.append(container)
else:
self.obj.spec.template.spec.containers.insert(index, container)
# patch the deployment
await self.patch()
async def eject_sidecar(self, name: str) -> bool:
"""Eject an Envoy sidecar from the Deployment.
Returns True if the sidecar was ejected.
"""
await self.refresh()
container = self.remove_container(name)
if container:
await self.replace()
return True
return False
@contextlib.asynccontextmanager
async def rollout(self, *, timeout: Optional[servo.DurationDescriptor] = None) -> None:
"""Asynchronously wait for changes to a deployment to roll out to the cluster."""
# NOTE: The timeout_seconds argument must be an int or the request will fail
timeout_seconds = int(servo.Duration(timeout).total_seconds()) if timeout else None
# Resource version lets us track any change. Observed generation only increments
# when the deployment controller sees a significant change that requires rollout
resource_version = self.resource_version
observed_generation = self.status.observed_generation
desired_replicas = self.replicas
self.logger.info(f"applying adjustments to Deployment '{self.name}' and rolling out to cluster")
# Yield to let the changes be made
yield self
# Return fast if nothing was changed
if self.resource_version == resource_version:
self.logger.info(
f"adjustments applied to Deployment '{self.name}' made no changes, continuing"
)
return
# Create a Kubernetes watch against the deployment under optimization to track changes
self.logger.debug(
f"watching deployment Using label_selector={self.label_selector}, resource_version={resource_version}"
)
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 = kubernetes_asyncio.client.AppsV1Api(api)
async with kubernetes_asyncio.watch.Watch().stream(
v1.list_namespaced_deployment,
self.namespace,
label_selector=self.label_selector,
timeout_seconds=timeout_seconds,
) as stream:
async for event in stream:
# NOTE: Event types are ADDED, DELETED, MODIFIED, ERROR
# TODO: Create an enum...
event_type, deployment = event["type"], event["object"]
status:kubernetes_asyncio.client.V1DeploymentStatus = deployment.status
self.logger.debug(
f"deployment watch yielded event: {event_type} {deployment.kind} {deployment.metadata.name} in {deployment.metadata.namespace}: {status}"
)
if event_type == "ERROR":
stream.stop()
# FIXME: Not sure what types we expect here
raise servo.AdjustmentRejectedError(str(deployment), reason="start-failed")
# Check that the conditions aren't reporting a failure
if status.conditions:
self._check_conditions(status.conditions)
# Early events in the watch may be against previous generation
if status.observed_generation == observed_generation:
self.logger.debug(
"observed generation has not changed, continuing watch"
)
continue
# Check the replica counts. Once available, updated, and ready match
# our expected count and the unavailable count is zero we are rolled out
if status.unavailable_replicas:
self.logger.debug(
"found unavailable replicas, continuing watch",
status.unavailable_replicas,
)
continue
replica_counts = [
status.replicas,
status.available_replicas,
status.ready_replicas,
status.updated_replicas,
]
if replica_counts.count(desired_replicas) == len(replica_counts):
# We are done: all the counts match. Stop the watch and return
self.logger.success(f"adjustments to Deployment '{self.name}' rolled out successfully", status)
stream.stop()
return
# watch doesn't raise a timeoutError when when elapsed, treat fall through as timeout
raise WatchTimeoutError()
def _check_conditions(self, conditions: List[kubernetes_asyncio.client.V1DeploymentCondition]) -> None:
for condition in conditions:
if condition.type == "Available":
if condition.status == "True":
# If we hit on this and have not raised yet we are good to go
break
elif condition.status in ("False", "Unknown"):
# Condition has not yet been met, log status and continue monitoring
self.logger.debug(
f"Condition({condition.type}).status == '{condition.status}' ({condition.reason}): {condition.message}"
)
else:
raise servo.AdjustmentFailedError(
f"encountered unexpected Condition status '{condition.status}'"
)
elif condition.type == "ReplicaFailure":
# TODO: Check what this error looks like
raise servo.AdjustmentRejectedError(
f"ReplicaFailure: message='{condition.status.message}', reason='{condition.status.reason}'",
reason="start-failed"
)
elif condition.type == "Progressing":
if condition.status in ("True", "Unknown"):
# Still working
self.logger.debug("Deployment update is progressing", condition)
break
elif condition.status == "False":
raise servo.AdjustmentRejectedError(
f"ProgressionFailure: message='{condition.status.message}', reason='{condition.status.reason}'",
reason="start-failed"
)
else:
raise servo.AdjustmentFailedError(
f"unknown deployment status condition: {condition.status}"
)
async def raise_for_status(self, adjustments: List[servo.Adjustment]) -> None:
# NOTE: operate off of current state, assuming you have checked is_ready()
status = self.obj.status
self.logger.trace(f"current deployment status is {status}")
if status is None:
raise RuntimeError(f'No such deployment: {self.name}')
if not status.conditions:
raise RuntimeError(f'Deployment is not running: {self.name}')
# Check for failure conditions
self._check_conditions(status.conditions)
await self.raise_for_failed_pod_adjustments(adjustments=adjustments)
# Catchall
self.logger.trace(f"unable to map deployment status to exception. Deployment: {self.obj}")
raise RuntimeError(f"Unknown Deployment status for '{self.name}': {status}")
async def raise_for_failed_pod_adjustments(self, adjustments: List[servo.Adjustment]):
pods = await self.get_latest_pods()
self.logger.trace(f"latest pod(s) status {list(map(lambda p: p.obj.status, pods))}")
unschedulable_pods = [
pod for pod in pods
if pod.obj.status.conditions and any(
cond.reason == "Unschedulable" for cond in pod.obj.status.conditions
)
]
if unschedulable_pods:
pod_messages = []
for pod in unschedulable_pods:
cond_msgs = []
for unschedulable_condition in filter(lambda cond: cond.reason == "Unschedulable", pod.obj.status.conditions):
unschedulable_adjustments = list(filter(lambda a: a.setting_name in unschedulable_condition.message, adjustments))
cond_msgs.append(
f"Requested adjustment(s) ({', '.join(map(str, unschedulable_adjustments))}) cannot be scheduled due to \"{unschedulable_condition.message}\""
)
pod_messages.append(f"{pod.obj.metadata.name} - {'; '.join(cond_msgs)}")
raise servo.AdjustmentRejectedError(
f"{len(unschedulable_pods)} pod(s) could not be scheduled for deployment {self.name}: {', '.join(pod_messages)}",
reason="unschedulable"
)
image_pull_failed_pods = [
pod for pod in pods
if pod.obj.status.container_statuses and any(
cont_stat.state and cont_stat.state.waiting and cont_stat.state.waiting.reason in ["ImagePullBackOff", "ErrImagePull"]
for cont_stat in pod.obj.status.container_statuses
)
]
if image_pull_failed_pods:
raise servo.AdjustmentFailedError(
f"Container image pull failure detected on {len(image_pull_failed_pods)} pods: {', '.join(map(lambda pod: pod.obj.metadata.name, pods))}",
reason="image-pull-failed"
)
restarted_pods_container_statuses = [
(pod, cont_stat) for pod in pods for cont_stat in (pod.obj.status.container_statuses or [])
if cont_stat.restart_count > 0
]
if restarted_pods_container_statuses:
pod_to_counts = collections.defaultdict(list)
for pod_cont_stat in restarted_pods_container_statuses:
pod_to_counts[pod_cont_stat[0].obj.metadata.name].append(f"{pod_cont_stat[1].name} x{pod_cont_stat[1].restart_count}")
pod_message = ", ".join(map(
lambda kv_tup: f"{kv_tup[0]} - {'; '.join(kv_tup[1])}",
list(pod_to_counts.items())
))
raise servo.AdjustmentRejectedError(
f"Deployment {self.name} pod(s) crash restart detected: {pod_message}",
reason="unstable"
)
# Unready pod catchall
unready_pod_conds = [
(pod, cond) for pod in pods for cond in (pod.obj.status.conditions or [])
if cond.type == "Ready" and cond.status == "False"
]
if unready_pod_conds:
pod_message = ", ".join(map(
lambda pod_cond: f"{pod_cond[0].obj.metadata.name} - (reason {pod_cond[1].reason}) {pod_cond[1].message}",
unready_pod_conds
))
raise servo.AdjustmentRejectedError(
f"Found {len(unready_pod_conds)} unready pod(s) for deployment {self.name}: {pod_message}",
reason="start-failed"
)
async def get_restart_count(self) -> int:
count = 0
for pod in await self.get_latest_pods():
try:
count += await pod.get_restart_count()
except kubernetes_asyncio.client.exceptions.ApiException as error:
if error.status == 404:
# Pod no longer exists, move on
pass
else:
raise error
return count
# Workarounds to allow use of api_client.deserialize() public method instead of private api_client._ApiClient__deserialize
# TODO: is this workaround worth it just to avoid using the private method?
# fix for https://github.com/kubernetes-client/python/issues/977#issuecomment-594045477
def default_kubernetes_json_serializer(o: Any) -> Any:
if isinstance(o, (datetime.datetime, datetime.date)):
return o.isoformat()
raise TypeError(f'Object of type {o.__class__.__name__} '
f'is not JSON serializable')
# https://github.com/kubernetes-client/python/issues/977#issuecomment-592030030
class FakeKubeResponse:
"""Mocks the RESTResponse object as a workaround for kubernetes python api_client deserialization"""
def __init__(self, obj):
self.data = json.dumps(obj, default=default_kubernetes_json_serializer)
# Use alias generator so that dromedary case can be parsed to snake case properties to match k8s python client behaviour
def to_dromedary_case(string: str) -> str:
split = string.split('_')
return split[0] + ''.join(word.capitalize() for word in split[1:])
class RolloutBaseModel(pydantic.BaseModel):
class Config:
# arbitrary_types_allowed = True
alias_generator = to_dromedary_case
allow_population_by_field_name = True
# Pydantic type models for argo rollout spec: https://argoproj.github.io/argo-rollouts/features/specification/
# https://github.com/argoproj/argo-rollouts/blob/master/manifests/crds/rollout-crd.yaml
# NOTE/TODO: fields typed with Any should maintain the same form when dumped as when they are parsed. Should the need
# arise to interact with such fields, they will need to have an explicit type defined so the alias_generator is applied
class RolloutV1LabelSelector(RolloutBaseModel): # must type out k8s models as well to allow parse_obj to work
match_expressions: Any
match_labels: Optional[Dict[str, str]]
class RolloutV1ObjectMeta(RolloutBaseModel):
annotations: Optional[Dict[str, str]]
cluster_name: Optional[str]
creation_timestamp: Optional[datetime.datetime]
deletion_grace_period_seconds: Optional[int]
deletion_timestamp: Optional[datetime.datetime]
finalizers: Optional[List[str]]
generate_name: Optional[str]
generation: Optional[int]
labels: Optional[Dict[str, str]]
managed_fields: Any
name: Optional[str]
namespace: Optional[str]
owner_references: Any
resource_version: Optional[str]
self_link: Optional[str]
uid: Optional[str]
class RolloutV1EnvVar(RolloutBaseModel):
name: str
value: Optional[str]
value_from: Any
class RolloutV1ContainerPort(RolloutBaseModel):
container_port: int
host_ip: Optional[str]
host_port: Optional[int]
name: Optional[str]
protocol: Optional[str]
class RolloutV1ResourceRequirements(RolloutBaseModel):
limits: Optional[Dict[str, str]]
requests: Optional[Dict[str, str]]
class RolloutV1Container(RolloutBaseModel):
args: Optional[List[str]]
command: Optional[List[str]]
env: Optional[List[RolloutV1EnvVar]]
env_from: Any
image: str
image_pull_policy: Optional[str]
lifecycle: Any
liveness_probe: Any
name: str
ports: Optional[List[RolloutV1ContainerPort]]
readiness_probe: Any
resources: Optional[RolloutV1ResourceRequirements]
security_context: Any
startup_probe: Any
stdin: Optional[bool]
stdin_once: Optional[bool]
termination_message_path: Optional[str]
termination_message_policy: Optional[str]
tty: Optional[bool]
volume_devices: Any
volume_mounts: Any
working_dir: Optional[str]
class RolloutV1PodSpec(RolloutBaseModel):
active_deadline_seconds: Optional[int]
affinity: Any
automount_service_account_token: Optional[bool]
containers: List[RolloutV1Container]
dns_config: Any
dns_policy: Optional[str]
enable_service_links: Optional[bool]
ephemeral_containers: Any
host_aliases: Any
host_ipc: Optional[bool]
host_network: Optional[bool]
host_pid: Optional[bool]
hostname: Optional[str]
image_pull_secrets: Any
init_containers: Optional[List[RolloutV1Container]]
node_name: Optional[str]
node_selector: Optional[Dict[str, str]]
overhead: Optional[Dict[str, str]]
preemption_policy: Optional[str]
priority: Optional[int]
priority_class_name: Optional[str]
readiness_gates: Any
restart_policy: Optional[str]
runtime_class_name: Optional[str]
scheduler_name: Optional[str]
security_context: Any
service_account: Optional[str]
service_account_name: Optional[str]
share_process_namespace: Optional[bool]
subdomain: Optional[str]
termination_grace_period_seconds: Optional[int]
tolerations: Any
topology_spread_constraints: Any
volumes: Any
class RolloutV1PodTemplateSpec(RolloutBaseModel):
metadata: RolloutV1ObjectMeta
spec: RolloutV1PodSpec
class RolloutV1WorkloadRef(RolloutBaseModel):
api_version: str
kind: str
name: str
class RolloutSpec(RolloutBaseModel):
replicas: int
selector: Optional[RolloutV1LabelSelector]
template: Optional[RolloutV1PodTemplateSpec]
workload_ref: Optional[RolloutV1WorkloadRef]
min_ready_seconds: Optional[int]
revision_history_limit: Optional[int]
paused: Optional[bool]
progress_deadline_seconds: Optional[int]
restart_at: Optional[datetime.datetime]
strategy: Any
class RolloutBlueGreenStatus(RolloutBaseModel):
active_selector: Optional[str]
post_promotion_analysis_run: Optional[str]
post_promotion_analysis_run_status: Any
pre_promotion_analysis_run: Optional[str]
pre_promotion_analysis_run_status: Any
preview_selector: Optional[str]
previous_active_selector: Optional[str]
scale_down_delay_start_time: Optional[datetime.datetime]
scale_up_preview_check_point: Optional[bool]
class RolloutStatusCondition(RolloutBaseModel):
last_transition_time: datetime.datetime
last_update_time: datetime.datetime
message: str
reason: str
status: str
type: str
class RolloutStatus(RolloutBaseModel):
hpa_replicas: Optional[int] = pydantic.Field(..., alias="HPAReplicas")
abort: Optional[bool]
aborted_at: Optional[datetime.datetime]
available_replicas: Optional[int]
blue_green: RolloutBlueGreenStatus
canary: Any # TODO type this out if connector needs to interact with it
collision_count: Optional[int]
conditions: List[RolloutStatusCondition]
controller_pause: Optional[bool]
current_pod_hash: str
current_step_hash: Optional[str]
current_step_index: Optional[int]
observed_generation: str
pause_conditions: Any
ready_replicas: Optional[int]
replicas: Optional[int]
restarted_at: Optional[datetime.datetime]
selector: str
stable_RS: Optional[str]
updated_replicas: Optional[int]
class RolloutObj(RolloutBaseModel): # TODO is this the right base to inherit from?
api_version: str
kind: str
metadata: RolloutV1ObjectMeta
spec: RolloutSpec
status: Optional[RolloutStatus]
# TODO expose to config if needed
ROLLOUT_GROUP = "argoproj.io"
ROLLOUT_VERSION = "v1alpha1"
ROLLOUT_PURAL = "rollouts"
class Rollout(KubernetesModel):
"""Wrapper around an ArgoCD Kubernetes `Rollout` Object.
The actual instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `Rollout`.
.. Rollout:
https://argoproj.github.io/argo-rollouts/features/specification/
"""
obj: RolloutObj
workload_ref_controller: Optional[Deployment] = None
_rollout_const_args: Dict[str, str] = dict(
group=ROLLOUT_GROUP,
version=ROLLOUT_VERSION,
plural=ROLLOUT_PURAL,
)
api_clients: ClassVar[Dict[str, Type]] = {
"preferred":kubernetes_asyncio.client.CustomObjectsApi,
f"{ROLLOUT_GROUP}/{ROLLOUT_VERSION}":kubernetes_asyncio.client.CustomObjectsApi,
}
async def create(self, namespace: str = None) -> None:
"""Create the Rollout under the given namespace.
Args:
namespace: The namespace to create the Rollout under.
"""
if namespace is None:
namespace = self.namespace
self.logger.info(
f'creating rollout "{self.name}" in namespace "{namespace}"'
)
self.logger.debug(f"rollout: {self.obj}")
async with self.api_client() as api_client:
self.obj = RolloutObj.parse_obj(await api_client.create_namespaced_custom_object(
namespace=namespace,
body=self.obj.dict(by_alias=True, exclude_none=True),
**self._rollout_const_args,
))
@classmethod
async def read(cls, name: str, namespace: str) -> "Rollout":
"""Read a Rollout by name under the given namespace.
Args:
name: The name of the Rollout to read.
namespace: The namespace to read the Rollout from.
"""
async with cls.preferred_client() as api_client:
obj = await api_client.get_namespaced_custom_object(
namespace=namespace,
name=name,
**cls._rollout_const_args,
)
rollout = Rollout(RolloutObj.parse_obj(obj))
if rollout.obj.spec.workload_ref:
await rollout.read_workfload_ref(namespace=namespace)
return rollout
async def read_workfload_ref(self, namespace: str) -> None:
if self.obj.spec.workload_ref.kind != "Deployment":
raise RuntimeError(f"Rollout integration does not currently support workloadRef kind of {self.obj.spec.workload_ref.kind}")
self.workload_ref_controller = await Deployment.read(
name=self.obj.spec.workload_ref.name,
namespace=namespace
)
if not self.workload_ref_controller:
raise ValueError(
f'cannot read Rollout: workloadRef Deployment "{self.obj.spec.workload_ref.name}"'
f' does not exist in Namespace "{namespace}"'
)
async def patch(self) -> None:
"""Update the changed attributes of the Rollout."""
async with self.api_client() as api_client:
self.obj = RolloutObj.parse_obj(await api_client.patch_namespaced_custom_object(
namespace=self.namespace,
name=self.name,
body=self.obj.dict(by_alias=True, exclude_none=True),
**self._rollout_const_args,
))
async def delete(self, options:kubernetes_asyncio.client.V1DeleteOptions = None) ->kubernetes_asyncio.client.V1Status:
"""Delete the Rollout.
This method expects the Rollout to have been loaded or otherwise
assigned a namespace already. If it has not, the namespace will need
to be set manually.
Args:
options: Unsupported, options for Rollout deletion.
Returns:
The status of the delete operation.
"""
if options is not None:
raise RuntimeError("Rollout deletion does not support V1DeleteOptions")
self.logger.info(f'deleting rollout "{self.name}"')
self.logger.trace(f"rollout: {self.obj}")
async with self.api_client() as api_client:
return await api_client.delete_namespaced_custom_object(
namespace=self.namespace,
name=self.name,
**self._rollout_const_args,
)
async def refresh(self) -> None:
"""Refresh the underlying Kubernetes Rollout resource."""
async with self.api_client() as api_client:
self.obj = RolloutObj.parse_obj(await api_client.get_namespaced_custom_object_status(
namespace=self.namespace,
name=self.name,
**self._rollout_const_args
))
if self.workload_ref_controller:
await self.workload_ref_controller.refresh()
async def rollback(self) -> None:
# TODO rollbacks are automated in Argo Rollouts, not sure if making this No Op will cause issues
# but I was unable to locate a means of triggering a rollout rollback manually
raise TypeError(
(
"rollback is not supported under the optimization of rollouts because rollbacks are applied to "
"Kubernetes Deployment objects whereas this is automated by argocd"
)
)
async def get_status(self) -> RolloutStatus:
"""Get the status of the Rollout.
Returns:
The status of the Rollout.
"""
self.logger.info(f'checking status of rollout "{self.name}"')
# first, refresh the rollout state to ensure the latest status
await self.refresh()
# return the status from the rollout
return self.obj.status
async def get_pods(self) -> List[Pod]:
"""Get the pods for the Rollout.
Returns:
A list of pods that belong to the rollout.
"""
self.logger.debug(f'getting pods for rollout "{self.name}"')
async with Pod.preferred_client() as api_client:
label_selector = self.match_labels
pod_list:kubernetes_asyncio.client.V1PodList = await api_client.list_namespaced_pod(
namespace=self.namespace, label_selector=selector_string(label_selector)
)
pods = [Pod(p) for p in pod_list.items]
return pods
@property
def status(self) -> RolloutStatus:
"""Return the status of the Rollout.
Returns:
The status of the Rollout.
"""
return self.obj.status
@property
def observed_generation(self) -> str:
"""
Returns the observed generation of the Deployment status.
The generation is observed by the deployment controller.
"""
if self.workload_ref_controller:
return self.workload_ref_controller.observed_generation
return self.obj.status.observed_generation
async def is_ready(self) -> bool:
"""Check if the Rollout is in the ready state.
Returns:
True if in the ready state; False otherwise.
"""
await self.refresh()
# if there is no status, the deployment is definitely not ready
status = self.obj.status
if status is None:
return False
# check for the rollout completed status condition
completed_condition = next(filter(lambda con: con.type == "Completed", status.conditions), None)
if completed_condition.status != "True":
return False
# check the status for the number of total replicas and compare
# it to the number of ready replicas. if the numbers are
# equal, the deployment is ready; otherwise it is not ready.
total = status.replicas
ready = status.ready_replicas
if total is None:
return False
return total == ready
@property
def containers(self) -> List[Container]:
"""
Return a list of Container objects from the underlying pod template spec.
"""
if self.workload_ref_controller:
return self.workload_ref_controller.containers
return list(
map(lambda c: Container(c, None), self.obj.spec.template.spec.containers)
)
def find_container(self, name: str) -> Optional[Container]:
"""
Return the container with the given name.
"""
return next(filter(lambda c: c.name == name, self.containers), None)
async def get_target_container(self, config: ContainerConfiguration) -> Optional[Container]:
"""Return the container targeted by the supplied configuration"""
target_container = self.find_container(config.name)
if target_container is not None and isinstance(target_container.obj, RolloutV1Container):
async with kubernetes_asyncio.client.ApiClient() as api_client:
target_container.obj = api_client.deserialize(
response=FakeKubeResponse(target_container.obj.dict(by_alias=True, exclude_none=True)),
response_type=kubernetes_asyncio.client.models.V1Container
)
return target_container
@property
def replicas(self) -> int:
"""
Return the number of desired pods.
"""
return self.obj.spec.replicas
@replicas.setter
def replicas(self, replicas: int) -> None:
"""
Set the number of desired pods.
"""
self.obj.spec.replicas = replicas
@property
def match_labels(self) -> Dict[str, str]:
"""Return the matchLabels dict of the selector field (from the workloadRef if applicable"""
if self.workload_ref_controller:
return self.workload_ref_controller.match_labels
return self.obj.spec.selector.match_labels
@property
def pod_template_spec(self) -> RolloutV1PodTemplateSpec:
"""Return the pod template spec for instances of the Rollout."""
if self.workload_ref_controller:
return self.workload_ref_controller.pod_template_spec
return self.obj.spec.template
async def get_pod_template_spec_copy(self) -> kubernetes_asyncio.client.models.V1PodTemplateSpec:
"""Return a deep copy of the pod template spec. Eg. for creation of a tuning pod"""
if self.workload_ref_controller:
return await self.workload_ref_controller.get_pod_template_spec_copy()
async with kubernetes_asyncio.client.ApiClient() as api_client:
return api_client.deserialize(
response=FakeKubeResponse(self.pod_template_spec.dict(by_alias=True, exclude_none=True)),
response_type=kubernetes_asyncio.client.models.V1PodTemplateSpec
)
def update_pod(self, pod: kubernetes_asyncio.client.models.V1Pod) -> kubernetes_asyncio.client.models.V1Pod:
"""Update the pod with the latest state of the controller if needed. In the case of argo rollouts, the
pod labels are updated with the latest template hash so that it will be routed to by the appropriate service"""
# Apply the latest template hash so the active service register the tuning pod as an endpoint
pod.metadata.labels["rollouts-pod-template-hash"] = self.obj.status.current_pod_hash
return pod
@backoff.on_exception(backoff.expo, kubernetes_asyncio.client.exceptions.ApiException, max_tries=3)
async def inject_sidecar(
self,
name: str,
image: str,
*args,
service: Optional[str] = None,
port: Optional[int] = None,
index: Optional[int] = None,
service_port: int = 9980
) -> None:
"""
Injects an Envoy sidecar into a target Deployment that proxies a service
or literal TCP port, generating scrapeable metrics usable for optimization.
The service or port argument must be provided to define how traffic is proxied
between the Envoy sidecar and the container responsible for fulfilling the request.
Args:
name: The name of the sidecar to inject.
image: The container image for the sidecar container.
service: Name of the service to proxy. Envoy will accept ingress traffic
on the service port and reverse proxy requests back to the original
target container.
port: The name or number of a port within the Deployment to wrap the proxy around.
index: The index at which to insert the sidecar container. When `None`, the sidecar is appended.
service_port: The port to receive ingress traffic from an upstream service.
"""
if self.workload_ref_controller:
await self.workload_ref_controller.inject_sidecar(
name=name, image=image, *args, service=service, port=port, index=index, service_port=service_port
)
return
await self.refresh()
if not (service or port):
raise ValueError(f"a service or port must be given")
if isinstance(port, str) and port.isdigit():
port = int(port)
# check for a port conflict
container_ports = list(itertools.chain(*map(operator.attrgetter("ports"), self.containers)))
if service_port in list(map(operator.attrgetter("container_port"), container_ports)):
raise ValueError(f"Port conflict: Rollout '{self.name}' already exposes port {service_port} through an existing container")
# lookup the port on the target service
if service:
try:
service_obj = await Service.read(service, self.namespace)
except kubernetes_asyncio.client.exceptions.ApiException as error:
if error.status == 404:
raise ValueError(f"Unknown Service '{service}'") from error
else:
raise error
if not port:
port_count = len(service_obj.obj.spec.ports)
if port_count == 0:
raise ValueError(f"Target Service '{service}' does not expose any ports")
elif port_count > 1:
raise ValueError(f"Target Service '{service}' exposes multiple ports -- target port must be specified")
port_obj = service_obj.obj.spec.ports[0]
else:
if isinstance(port, int):
port_obj = next(filter(lambda p: p.port == port, service_obj.obj.spec.ports), None)
elif isinstance(port, str):
port_obj = next(filter(lambda p: p.name == port, service_obj.obj.spec.ports), None)
else:
raise TypeError(f"Unable to resolve port value of type {port.__class__} (port={port})")
if not port_obj:
raise ValueError(f"Port '{port}' does not exist in the Service '{service}'")
# resolve symbolic name in the service target port to a concrete container port
if isinstance(port_obj.target_port, str):
container_port_obj = next(filter(lambda p: p.name == port_obj.target_port, container_ports), None)
if not container_port_obj:
raise ValueError(f"Port '{port_obj.target_port}' could not be resolved to a destination container port")
container_port = container_port_obj.container_port
else:
container_port = port_obj.target_port
else:
# find the container port
container_port_obj = next(filter(lambda p: p.container_port == port, container_ports), None)
if not container_port_obj:
raise ValueError(f"Port '{port}' could not be resolved to a destination container port")
container_port = container_port_obj.container_port
# build the sidecar container
container = RolloutV1Container(
name=name,
image=image,
image_pull_policy="IfNotPresent",
resources=RolloutV1ResourceRequirements(
requests={
"cpu": "125m",
"memory": "128Mi"
},
limits={
"cpu": "250m",
"memory": "256Mi"
}
),
env=[
RolloutV1EnvVar(name="OPSANI_ENVOY_PROXY_SERVICE_PORT", value=str(service_port)),
RolloutV1EnvVar(name="OPSANI_ENVOY_PROXIED_CONTAINER_PORT", value=str(container_port)),
RolloutV1EnvVar(name="OPSANI_ENVOY_PROXY_METRICS_PORT", value="9901")
],
ports=[
RolloutV1ContainerPort(name="opsani-proxy", container_port=service_port, protocol="TCP"),
RolloutV1ContainerPort(name="opsani-metrics", container_port=9901, protocol="TCP"),
]
)
# add the sidecar to the Rollout
if index is None:
self.obj.spec.template.spec.containers.append(container)
else:
self.obj.spec.template.spec.containers.insert(index, container)
# patch the Rollout
await self.patch()
# TODO: convert to rollout logic
async def eject_sidecar(self, name: str) -> bool:
"""Eject an Envoy sidecar from the Deployment.
Returns True if the sidecar was ejected.
"""
await self.refresh()
container = self.remove_container(name)
if container:
await self.replace()
return True
return False
# TODO: rebase this and _check_conditions for saturation mode
@contextlib.asynccontextmanager
async def rollout(self, *, timeout: Optional[servo.Duration] = None) -> None:
raise NotImplementedError('To be implemented in future update')
class Millicore(int):
"""
The Millicore class represents one one-thousandth of a vCPU or hyperthread in Kubernetes.
"""
@classmethod
def __get_validators__(cls) -> pydantic.CallableGenerator:
yield cls.parse
@classmethod
def parse(cls, v: pydantic.StrIntFloat) -> "Millicore":
"""
Parses a string, integer, or float input value into Millicore units.
Returns:
The input value in Millicore units.
Raises:
ValueError: Raised if the input cannot be parsed.
"""
if isinstance(v, str):
if v[-1] == "m":
return cls(int(v[:-1]))
else:
return cls(int(float(v) * 1000))
elif isinstance(v, (int, float, decimal.Decimal)):
return cls(int(float(v) * 1000))
else:
raise ValueError("could not parse millicore value")
def __str__(self) -> str:
if self % 1000 == 0:
return str(int(self) // 1000)
else:
return f"{int(self)}m"
def __float__(self) -> float:
return self / 1000.0
def __eq__(self, other) -> bool:
if isinstance(other, str):
return str(self) == other
elif isinstance(other, float):
return float(self) == other
return super().__eq__(other)
def human_readable(self) -> str:
return str(self)
class CPU(servo.CPU):
"""
The CPU class models a Kubernetes CPU resource in Millicore units.
"""
min: Millicore
max: Millicore
step: Millicore
value: Optional[Millicore]
# Kubernetes resource requirements
request: Optional[Millicore]
limit: Optional[Millicore]
get: pydantic.conlist(ResourceRequirement, min_items=1) = [ResourceRequirement.request, ResourceRequirement.limit]
set: pydantic.conlist(ResourceRequirement, min_items=1) = [ResourceRequirement.request, ResourceRequirement.limit]
def __opsani_repr__(self) -> dict:
o_dict = super().__opsani_repr__()
# normalize values into floats (see Millicore __float__)
for field in ("min", "max", "step", "value"):
value = getattr(self, field)
o_dict["cpu"][field] = float(value) if value is not None else None
return o_dict
# Gibibyte is the base unit of Kubernetes memory
MiB = 2 ** 20
GiB = 2 ** 30
class ShortByteSize(pydantic.ByteSize):
"""Kubernetes omits the 'B' suffix for some reason"""
@classmethod
def validate(cls, v: pydantic.StrIntFloat) -> "ShortByteSize":
if isinstance(v, str):
try:
return super().validate(v)
except:
# Append the byte suffix and retry parsing
return super().validate(v + "b")
elif isinstance(v, float):
v = v * GiB
return super().validate(v)
def human_readable(self) -> str:
sup = super().human_readable()
# Remove the 'B' suffix to align with Kubernetes units (`GiB` -> `Gi`)
if sup[-1] == 'B' and sup[-2].isalpha():
sup = sup[0:-1]
return sup
class Memory(servo.Memory):
"""
The Memory class models a Kubernetes Memory resource.
"""
min: ShortByteSize
max: ShortByteSize
step: ShortByteSize
value: Optional[ShortByteSize]
# Kubernetes resource requirements
request: Optional[ShortByteSize]
limit: Optional[ShortByteSize]
get: pydantic.conlist(ResourceRequirement, min_items=1) = [ResourceRequirement.request, ResourceRequirement.limit]
set: pydantic.conlist(ResourceRequirement, min_items=1) = [ResourceRequirement.request, ResourceRequirement.limit]
def __opsani_repr__(self) -> dict:
o_dict = super().__opsani_repr__()
# normalize values into floating point Gibibyte units
for field in ("min", "max", "step", "value"):
value = getattr(self, field)
o_dict["mem"][field] = float(value) / GiB if value is not None else None
return o_dict
def _normalize_adjustment(adjustment: servo.Adjustment) -> Tuple[str, Union[str, servo.Numeric]]:
"""Normalize an adjustment object into a Kubernetes native setting key/value pair."""
setting = "memory" if adjustment.setting_name == "mem" else adjustment.setting_name
value = adjustment.value
if setting == "memory":
# Add GiB suffix to Numerics and Numeric strings
if (isinstance(value, (int, float)) or
(isinstance(value, str) and value.replace('.', '', 1).isdigit())):
value = f"{value}Gi"
elif setting == "cpu":
value = str(Millicore.parse(value))
elif setting == "replicas":
value = int(float(value))
return setting, value
class BaseOptimization(abc.ABC, pydantic.BaseModel, servo.logging.Mixin):
"""
BaseOptimization is the base class for concrete implementations of optimization strategies.
Attributes:
name (str): The name of the Optimization. Used to set the name for the corresponding component.
timeout (Duration): Time interval to wait before considering Kubernetes operations to have failed.
adjustments (List[Adjustment]): List of adjustments applied to this optimization (NOTE optimizations are re-created for each
event dispatched to the connector. Thus, this value will only be populated during adjust event handling with only the adjustments
pertaining to that adjust event dispatch)
"""
name: str
timeout: servo.Duration
adjustments: List[servo.Adjustment] = []
@abc.abstractclassmethod
async def create(
cls, config: "BaseKubernetesConfiguration", *args, **kwargs
) -> "BaseOptimization":
""""""
...
@abc.abstractmethod
def adjust(
self, adjustment: servo.Adjustment, control: servo.Control = servo.Control()
) -> servo.Description:
"""
Adjust a setting on the underlying Deployment/Pod or Container.
"""
...
@abc.abstractmethod
async def apply(self) -> None:
"""
Apply the adjusted settings to the Kubernetes cluster.
"""
...
@abc.abstractmethod
async def raise_for_status(self) -> None:
"""Raise an exception if in an unhealthy state."""
...
@property
@abc.abstractmethod
def on_failure(self) -> FailureMode:
"""
Return the configured failure behavior.
"""
...
async def handle_error(self, error: Exception) -> bool:
"""
Handle an operational failure in accordance with the failure mode configured by the operator.
Well executed error handling requires context and strategic thinking. The servo base library
provides a rich set of primitives and patterns for approaching error handling but ultimately
the experience is reliant on the connector developer who has knowledge of the essential context
and understands the user needs and expectations.
The error handling implementation provided in this method handles the general cases out of the
box and relies on abstract methods (see below) to implement more advanced behaviors such as
rollback and tear-down.
Returns:
A boolean value that indicates if the error was handled.
Raises:
NotImplementedError: Raised if there is no handler for a given failure mode. Subclasses
must filter failure modes before calling the superclass implementation.
"""
# Ensure that we chain any underlying exceptions that may occur
try:
self.logger.error(f"handling error with with failure mode {self.on_failure}: {error.__class__.__name__} - {str(error)}")
self.logger.opt(exception=error).debug(f"kubernetes error details")
if self.on_failure == FailureMode.exception:
raise error
elif self.on_failure == FailureMode.ignore:
self.logger.opt(exception=error).warning(f"ignoring exception")
return True
elif self.on_failure == FailureMode.rollback:
await self.rollback(error)
elif self.on_failure == FailureMode.shutdown:
await self.shutdown(error)
else:
# Trap any new modes that need to be handled
raise NotImplementedError(
f"missing error handler for failure mode '{self.on_failure}'"
)
raise error # Always communicate errors to backend unless ignored
except Exception as handler_error:
raise handler_error from error # reraising an error from itself is safe
@abc.abstractmethod
async def rollback(self, error: Optional[Exception] = None) -> None:
"""
Asynchronously roll back the Optimization to a previous known
good state.
Args:
error: An optional exception that contextualizes the cause of the rollback.
"""
...
@abc.abstractmethod
async def shutdown(self, error: Optional[Exception] = None) -> None:
"""
Asynchronously shut down the Optimization.
Args:
error: An optional exception that contextualizes the cause of the destruction.
"""
...
@abc.abstractmethod
def to_components(self) -> List[servo.Component]:
"""
Return a list of Component representations of the Optimization.
Components are the canonical representation of optimizations in the Opsani API.
"""
...
@abc.abstractmethod
async def is_ready(self) -> bool:
"""
Verify Optimization target Resource/Controller is ready.
"""
...
def __hash__(self):
return hash(
(
self.name,
id(self),
)
)
class Config:
arbitrary_types_allowed = True
class DeploymentOptimization(BaseOptimization):
"""
The DeploymentOptimization class implements an optimization strategy based on directly reconfiguring a Kubernetes
Deployment and its associated containers.
"""
deployment_config: "DeploymentConfiguration"
deployment: Deployment
container_config: "ContainerConfiguration"
container: Container
@classmethod
async def create(
cls, config: "DeploymentConfiguration", **kwargs
) -> "DeploymentOptimization":
deployment = await Deployment.read(config.name, config.namespace)
replicas = config.replicas.copy()
replicas.value = deployment.replicas
# FIXME: Currently only supporting one container
for container_config in config.containers:
container = deployment.find_container(container_config.name)
if not container:
names = servo.utilities.strings.join_to_series(
list(map(lambda c: c.name, deployment.containers))
)
raise ValueError(
f'no container named "{container_config.name}" exists in the Pod (found {names})'
)
if container_config.static_environment_variables:
raise NotImplementedError("Configurable environment variables are not currently supported under Deployment optimization (saturation mode)")
name = container_config.alias or (
f"{deployment.name}/{container.name}" if container else deployment.name
)
return cls(
name=name,
deployment_config=config,
deployment=deployment,
container_config=container_config,
container=container,
**kwargs,
)
@property
def cpu(self) -> CPU:
"""
Return the current CPU setting for the optimization.
"""
cpu = self.container_config.cpu.copy()
# Determine the value in priority order from the config
resource_requirements = self.container.get_resource_requirements('cpu')
cpu.request = resource_requirements.get(ResourceRequirement.request)
cpu.limit = resource_requirements.get(ResourceRequirement.limit)
value = resource_requirements.get(
next(filter(lambda r: resource_requirements[r] is not None, self.container_config.cpu.get), None)
)
# NOTE: use copy + update to apply values that may be outside of the range
value = Millicore.parse(value)
cpu = cpu.copy(update={"value": value})
return cpu
@property
def memory(self) -> Memory:
"""
Return the current Memory setting for the optimization.
"""
memory = self.container_config.memory.copy()
# Determine the value in priority order from the config
resource_requirements = self.container.get_resource_requirements('memory')
memory.request = resource_requirements.get(ResourceRequirement.request)
memory.limit = resource_requirements.get(ResourceRequirement.limit)
value = resource_requirements.get(
next(filter(lambda r: resource_requirements[r] is not None, self.container_config.memory.get), None)
)
# NOTE: use copy + update to apply values that may be outside of the range
value = ShortByteSize.validate(value)
memory = memory.copy(update={"value": value})
return memory
@property
def replicas(self) -> servo.Replicas:
"""
Return the current Replicas setting for the optimization.
"""
replicas = self.deployment_config.replicas.copy()
replicas.value = self.deployment.replicas
return replicas
@property
def on_failure(self) -> FailureMode:
"""
Return the configured failure behavior. If not set explicitly, this will be cascaded
from the base kubernetes configuration (or its default)
"""
return self.deployment_config.on_failure
async def rollback(self, error: Optional[Exception] = None) -> None:
"""
Initiates an asynchronous rollback to a previous version of the Deployment.
Args:
error: An optional error that triggered the rollback.
"""
self.logger.info(f"adjustment failed: rolling back deployment... ({error})")
await asyncio.wait_for(
self.deployment.rollback(),
timeout=self.timeout.total_seconds(),
)
async def shutdown(self, error: Optional[Exception] = None) -> None:
"""
Initiates the asynchronous deletion of all pods in the Deployment under optimization.
Args:
error: An optional error that triggered the destruction.
"""
self.logger.info(f"adjustment failed: shutting down deployment's pods...")
await asyncio.wait_for(
self.deployment.scale_to_zero(),
timeout=self.timeout.total_seconds(),
)
def to_components(self) -> List[servo.Component]:
return [
servo.Component(name=self.name, settings=[self.cpu, self.memory, self.replicas])
]
def adjust(self, adjustment: servo.Adjustment, control: servo.Control = servo.Control()) -> None:
"""
Adjust the settings on the Deployment or a component Container.
Adjustments do not take effect on the cluster until the `apply` method is invoked
to enable aggregation of related adjustments and asynchronous application.
"""
self.adjustments.append(adjustment)
setting_name, value = _normalize_adjustment(adjustment)
self.logger.info(f"adjusting {setting_name} to {value}")
if setting_name in ("cpu", "memory"):
# NOTE: use copy + update to apply values that may be outside of the range
servo.logger.debug(f"Adjusting {setting_name}={value}")
setting = getattr(self.container_config, setting_name).copy(update={"value": value})
# Set only the requirements defined in the config
requirements: Dict[ResourceRequirement, Optional[str]] = {}
for requirement in setting.set:
requirements[requirement] = value
self.container.set_resource_requirements(setting_name, requirements)
elif setting_name == "replicas":
# NOTE: Assign to the config to trigger validations
self.deployment_config.replicas.value = value
self.deployment.replicas = value
else:
raise RuntimeError(
f"failed adjustment of unsupported Kubernetes setting '{adjustment.setting_name}'"
)
async def apply(self) -> None:
"""
Apply changes asynchronously and wait for them to roll out to the cluster.
Kubernetes deployments orchestrate a number of underlying resources. Awaiting the
outcome of a deployment change requires observation of the `resource_version` which
indicates if a given patch actually changed the resource, the `observed_generation`
which is a value managed by the deployments controller and indicates the effective
version of the deployment exclusive of insignificant changes that do not affect runtime
(such as label updates), and the `conditions` of the deployment status which reflect
state at a particular point in time. How these elements change during a rollout is
dependent on the deployment strategy in effect and its requirements (max unavailable,
surge, etc).
The logic implemented by this method is as follows:
- Capture the `resource_version` and `observed_generation`.
- Patch the underlying Deployment object via the Kubernetes API.
- Check that `resource_version` has been incremented or return early if nothing has changed.
- Create a Kubernetes Watch on the Deployment targeted by label selector and resource version.
- Observe events streamed via the watch.
- Look for the Deployment to report a Status Condition of `"Progressing"`.
- Wait for the `observed_generation` to increment indicating that the Deployment is applying our changes.
- Track the value of the `available_replicas`, `ready_replicas`, `unavailable_replicas`,
and `updated_replicas` attributes of the Deployment Status until `available_replicas`,
`ready_replicas`, and `updated_replicas` are all equal to the value of the `replicas` attribute of
the Deployment and `unavailable_replicas` is `None`. Return success.
- Raise an error upon expiration of an adjustment timeout or encountering a Deployment Status Condition
where `type=Progressing` and `status=False`.
This method abstracts the details of adjusting a Deployment and returns once the desired
changes have been fully rolled out to the cluster or an error has been encountered.
See https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
# The resource_version attribute lets us efficiently watch for changes
# reference: https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes
"""
try:
async with self.deployment.rollout(timeout=self.timeout) as deployment:
# Patch the Deployment via the Kubernetes API
await deployment.patch()
except WatchTimeoutError:
servo.logger.error(f"Timed out waiting for Deployment to become ready...")
await self.raise_for_status()
async def is_ready(self) -> bool:
is_ready, restart_count = await asyncio.gather(
self.deployment.is_ready(),
self.deployment.get_restart_count()
)
return is_ready and restart_count == 0
async def raise_for_status(self) -> None:
"""Raise an exception if in an unhealthy state."""
await self.deployment.raise_for_status(adjustments=self.adjustments)
# TODO: Break down into CanaryDeploymentOptimization and CanaryContainerOptimization
class CanaryOptimization(BaseOptimization):
"""CanaryOptimization objects manage the optimization of Containers within a Deployment using
a tuning Pod that is adjusted independently and compared against the performance and cost profile
of its siblings.
"""
# The deployment and container stanzas from the configuration
deployment_config: Optional["DeploymentConfiguration"]
rollout_config: Optional["RolloutConfiguration"]
container_config: "ContainerConfiguration"
# State for mainline resources. Read from the cluster
deployment: Optional[Deployment]
rollout: Optional[Rollout]
main_container: Container
# State for tuning resources
tuning_pod: Optional[Pod]
tuning_container: Optional[Container]
_tuning_pod_template_spec: Optional[kubernetes_asyncio.client.models.V1PodTemplateSpec] = pydantic.PrivateAttr()
@pydantic.root_validator
def check_deployment_and_rollout(cls, values):
if values.get('deployment_config') is not None and values.get('rollout_config') is not None:
raise ValueError("Cannot create a CanaryOptimization with both rollout and deployment configurations")
if values.get('deployment') is not None and values.get('rollout') is not None:
raise ValueError("Cannot create a CanaryOptimization with both rollout and deployment")
if values.get('deployment_config') is None and values.get('rollout_config') is None:
raise ValueError("CanaryOptimization must be initialized with either a rollout or deployment configuration")
if values.get('deployment') is None and values.get('rollout') is None:
raise ValueError("CanaryOptimization must be initialized with either a rollout or deployment")
return values
@property
def target_controller_config(self) -> Union["DeploymentConfiguration", "RolloutConfiguration"]:
return self.deployment_config or self.rollout_config
@property
def target_controller(self) -> Union[Deployment, Rollout]:
return self.deployment or self.rollout
@property
def target_controller_type(self) -> str:
return type(self.target_controller).__name__
@classmethod
async def create(
cls, deployment_or_rollout_config: Union["DeploymentConfiguration", "RolloutConfiguration"], **kwargs
) -> "CanaryOptimization":
read_args = (deployment_or_rollout_config.name, cast(str, deployment_or_rollout_config.namespace))
if isinstance(deployment_or_rollout_config, DeploymentConfiguration):
controller_type = "Deployment"
deployment_or_rollout = await Deployment.read(*read_args)
init_args = dict(deployment_config = deployment_or_rollout_config, deployment = deployment_or_rollout)
elif isinstance(deployment_or_rollout_config, RolloutConfiguration):
controller_type = "Rollout"
deployment_or_rollout = await Rollout.read(*read_args)
init_args = dict(rollout_config = deployment_or_rollout_config, rollout = deployment_or_rollout)
else:
raise NotImplementedError(f"Unknown configuration type '{type(deployment_or_rollout_config).__name__}'")
if not deployment_or_rollout:
raise ValueError(
f'cannot create CanaryOptimization: target {controller_type} "{deployment_or_rollout_config.name}"'
f' does not exist in Namespace "{deployment_or_rollout_config.namespace}"'
)
# NOTE: Currently only supporting one container
assert len(deployment_or_rollout_config.containers) == 1, "CanaryOptimization currently only supports a single container"
container_config = deployment_or_rollout_config.containers[0]
main_container = await deployment_or_rollout.get_target_container(container_config)
name = (
deployment_or_rollout_config.strategy.alias
if isinstance(deployment_or_rollout_config.strategy, CanaryOptimizationStrategyConfiguration)
and deployment_or_rollout_config.strategy.alias
else f"{deployment_or_rollout.name}/{main_container.name}-tuning"
)
optimization = cls(
name=name,
**init_args,
container_config=container_config,
main_container=main_container,
**kwargs,
)
await optimization._load_tuning_state()
return optimization
async def _load_tuning_state(self) -> None:
# Find an existing tuning Pod/Container if available
try:
tuning_pod = await Pod.read(self.tuning_pod_name, cast(str, self.namespace))
tuning_container = tuning_pod.get_container(self.container_config.name)
except kubernetes_asyncio.client.exceptions.ApiException as e:
if e.status != 404 or e.reason != "Not Found":
servo.logger.trace(f"Failed reading tuning pod: {e}")
raise
else:
tuning_pod = None
tuning_container = None
# TODO: Factor into a new class?
self.tuning_pod = tuning_pod
self.tuning_container = tuning_container
await self._configure_tuning_pod_template_spec()
@property
def pod_template_spec_container(self) -> Container:
container_obj = next(filter(lambda c: c.name == self.container_config.name, self._tuning_pod_template_spec.spec.containers))
return Container(container_obj, None)
def adjust(self, adjustment: servo.Adjustment, control: servo.Control = servo.Control()) -> None:
assert self.tuning_pod, "Tuning Pod not loaded"
assert self.tuning_container, "Tuning Container not loaded"
self.adjustments.append(adjustment)
setting_name, value = _normalize_adjustment(adjustment)
self.logger.info(f"adjusting {setting_name} to {value}")
if setting_name in ("cpu", "memory"):
# NOTE: use copy + update to apply values that may be outside of the range
servo.logger.debug(f"Adjusting {setting_name}={value}")
setting = getattr(self.container_config, setting_name).copy(update={"value": value})
# Set only the requirements defined in the config
requirements: Dict[ResourceRequirement, Optional[str]] = {}
for requirement in setting.set:
requirements[requirement] = value
servo.logger.debug(f"Assigning {setting_name}.{requirement}={value}")
servo.logger.debug(f"Setting resource requirements for {setting_name} to {requirements} on PodTemplateSpec")
self.pod_template_spec_container.set_resource_requirements(setting_name, requirements)
elif setting_name == "replicas":
if value != 1:
servo.logger.warning(
f'ignored attempt to set replicas to "{value}"'
)
else:
raise servo.AdjustmentFailedError(
f"failed adjustment of unsupported Kubernetes setting '{setting_name}'"
)
async def apply(self) -> None:
"""Apply the adjustments to the target."""
assert self.tuning_pod, "Tuning Pod not loaded"
assert self.tuning_container, "Tuning Container not loaded"
servo.logger.info("Applying adjustments to Tuning Pod")
task = asyncio.create_task(self.create_or_recreate_tuning_pod())
try:
await task
except asyncio.CancelledError:
task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await task
raise
# TODO: logging the wrong values -- should be coming from the podtemplatespec?
servo.logger.success(f"Built new tuning pod with container resources: {self.tuning_container.resources}")
@property
def namespace(self) -> str:
return self.target_controller_config.namespace
@property
def tuning_pod_name(self) -> str:
"""
Return the name of tuning Pod for this optimization.
"""
return f"{self.target_controller_config.name}-tuning"
async def delete_tuning_pod(self, *, raise_if_not_found: bool = True) -> Optional[Pod]:
"""
Delete the tuning Pod.
"""
try:
# TODO: Provide context manager or standard read option that handle not found? Lots of duplication on not found/conflict handling...
tuning_pod = await Pod.read(self.tuning_pod_name, self.namespace)
self.logger.info(
f"Deleting tuning Pod '{tuning_pod.name}' from namespace '{tuning_pod.namespace}'..."
)
await tuning_pod.delete()
await tuning_pod.wait_until_deleted()
self.logger.info(
f"Deleted tuning Pod '{tuning_pod.name}' from namespace '{tuning_pod.namespace}'."
)
self.tuning_pod = None
self.tuning_container = None
return tuning_pod
except kubernetes_asyncio.client.exceptions.ApiException as e:
if e.status != 404 or e.reason != "Not Found" and raise_if_not_found:
raise
self.tuning_pod = None
self.tuning_container = None
return None
@property
def target_controller_name(self) -> str:
return self.target_controller_config.name
@property
def container_name(self) -> str:
return self.container_config.name
# TODO: Factor into another class?
async def _configure_tuning_pod_template_spec(self) -> None:
# Configure a PodSpecTemplate for the tuning Pod state
pod_template_spec: kubernetes_asyncio.client.models.V1PodTemplateSpec = await self.target_controller.get_pod_template_spec_copy()
pod_template_spec.metadata.name = self.tuning_pod_name
if pod_template_spec.metadata.annotations is None:
pod_template_spec.metadata.annotations = {}
pod_template_spec.metadata.annotations["opsani.com/opsani_tuning_for"] = self.name
if pod_template_spec.metadata.labels is None:
pod_template_spec.metadata.labels = {}
pod_template_spec.metadata.labels["opsani_role"] = "tuning"
# Build a container from the raw podspec
container_obj = next(filter(lambda c: c.name == self.container_config.name, pod_template_spec.spec.containers))
container = Container(container_obj, None)
servo.logger.debug(f"Initialized new tuning container from Pod spec template: {container.name}")
if self.container_config.static_environment_variables:
if container.obj.env is None:
container.obj.env = []
# Filter out vars with the same name as the ones we are setting
container.obj.env = list(filter(
lambda e: e.name not in self.container_config.static_environment_variables,
container.obj.env
))
env_list = [
kubernetes_asyncio.client.V1EnvVar(name=k, value=v)
for k, v in self.container_config.static_environment_variables.items()
]
container.obj.env.extend(env_list)
if self.tuning_container:
servo.logger.debug(f"Copying resource requirements from existing tuning pod container '{self.tuning_pod.name}/{self.tuning_container.name}'")
resource_requirements = self.tuning_container.resources
container.resources = resource_requirements
else:
servo.logger.debug(f"No existing tuning pod container found, initializing resource requirement defaults")
set_container_resource_defaults_from_config(container, self.container_config)
# If the servo is running inside Kubernetes, register self as the controller for the Pod and ReplicaSet
servo_pod_name = os.environ.get("POD_NAME")
servo_pod_namespace = os.environ.get("POD_NAMESPACE")
if servo_pod_name is not None and servo_pod_namespace is not None:
self.logger.debug(
f"running within Kubernetes, registering as Pod controller... (pod={servo_pod_name}, namespace={servo_pod_namespace})"
)
servo_pod = await Pod.read(servo_pod_name, servo_pod_namespace)
pod_controller = next(
iter(
ow
for ow in servo_pod.obj.metadata.owner_references
if ow.controller
)
)
# TODO: Create a ReplicaSet class...
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
api_client = kubernetes_asyncio.client.AppsV1Api(api)
servo_rs: kubernetes_asyncio.client.V1ReplicaSet = (
await api_client.read_namespaced_replica_set(
name=pod_controller.name, namespace=servo_pod_namespace
)
) # still ephemeral
rs_controller = next(
iter(
ow for ow in servo_rs.metadata.owner_references if ow.controller
)
)
servo_dep: kubernetes_asyncio.client.V1Deployment = (
await api_client.read_namespaced_deployment(
name=rs_controller.name, namespace=servo_pod_namespace
)
)
pod_template_spec.metadata.owner_references = [
kubernetes_asyncio.client.V1OwnerReference(
api_version=servo_dep.api_version,
block_owner_deletion=True,
controller=True, # Ensures the pod will not be adopted by another controller
kind="Deployment",
name=servo_dep.metadata.name,
uid=servo_dep.metadata.uid,
)
]
self._tuning_pod_template_spec = pod_template_spec
async def create_or_recreate_tuning_pod(self) -> Pod:
"""
Creates a new Tuning Pod or deletes and recreates one from the current optimization state.
"""
servo.logger.info("Deleting existing tuning pod (if any)")
await self.delete_tuning_pod(raise_if_not_found=False)
return await self.create_tuning_pod()
async def create_tuning_pod(self) -> Pod:
"""
Creates a new Tuning Pod from the current optimization state.
"""
assert self._tuning_pod_template_spec, "Must have tuning pod template spec"
assert self.tuning_pod is None, "Tuning Pod already exists"
assert self.tuning_container is None, "Tuning Pod Container already exists"
self.logger.debug(
f"creating tuning pod '{self.tuning_pod_name}' based on {self.target_controller_type} '{self.target_controller_name}' in namespace '{self.namespace}'"
)
# Setup the tuning Pod -- our settings are updated on the underlying PodSpec template
self.logger.trace(f"building new tuning pod")
pod_obj = kubernetes_asyncio.client.V1Pod(
metadata=self._tuning_pod_template_spec.metadata, spec=self._tuning_pod_template_spec.spec
)
# Update pod with latest controller state
pod_obj = self.target_controller.update_pod(pod_obj)
tuning_pod = Pod(obj=pod_obj)
# Create the Pod and wait for it to get ready
self.logger.info(
f"Creating tuning Pod '{self.tuning_pod_name}' in namespace '{self.namespace}'"
)
await tuning_pod.create(self.namespace)
servo.logger.success(f"Created Tuning Pod '{self.tuning_pod_name}' in namespace '{self.namespace}'")
servo.logger.info(f"waiting up to {self.timeout} for Tuning Pod to become ready...")
progress = servo.EventProgress(self.timeout)
progress_logger = lambda p: self.logger.info(
p.annotate(f"waiting for '{self.tuning_pod_name}' to become ready...", prefix=False)
)
progress.start()
task = asyncio.create_task(tuning_pod.wait_until_ready())
task.add_done_callback(lambda _: progress.complete())
gather_task = asyncio.gather(
task,
progress.watch(progress_logger),
)
try:
await asyncio.wait_for(
gather_task,
timeout=self.timeout.total_seconds()
)
except asyncio.TimeoutError:
servo.logger.error(f"Timed out waiting for Tuning Pod to become ready...")
servo.logger.debug(f"Cancelling Task: {task}, progress: {progress}")
for t in {task, gather_task}:
t.cancel()
with contextlib.suppress(asyncio.CancelledError):
await t
servo.logger.debug(f"Cancelled Task: {t}, progress: {progress}")
await tuning_pod.raise_for_status(adjustments=self.adjustments)
# Load the in memory model for various convenience accessors
await tuning_pod.refresh()
await tuning_pod.get_containers()
# Hydrate local state
self.tuning_pod = tuning_pod
self.tuning_container = tuning_pod.get_container(self.container_config.name)
servo.logger.info(f"Tuning Pod successfully created")
return tuning_pod
@property
def tuning_cpu(self) -> Optional[CPU]:
"""
Return the current CPU setting for the target container of the tuning Pod (if any).
"""
if not self.tuning_pod:
return None
cpu = self.container_config.cpu.copy()
# Determine the value in priority order from the config
resource_requirements = self.tuning_container.get_resource_requirements('cpu')
cpu.request = resource_requirements.get(ResourceRequirement.request)
cpu.limit = resource_requirements.get(ResourceRequirement.limit)
value = resource_requirements.get(
next(filter(lambda r: resource_requirements[r] is not None, self.container_config.cpu.get), None)
)
value = Millicore.parse(value)
# NOTE: use copy + update to apply values that may be outside of the range
cpu = cpu.copy(update={"value": value})
return cpu
@property
def tuning_memory(self) -> Optional[Memory]:
"""
Return the current Memory setting for the target container of the tuning Pod (if any).
"""
if not self.tuning_pod:
return None
memory = self.container_config.memory.copy()
# Determine the value in priority order from the config
resource_requirements = self.tuning_container.get_resource_requirements('memory')
memory.request = resource_requirements.get(ResourceRequirement.request)
memory.limit = resource_requirements.get(ResourceRequirement.limit)
value = resource_requirements.get(
next(filter(lambda r: resource_requirements[r] is not None, self.container_config.memory.get), None)
)
value = ShortByteSize.validate(value)
# NOTE: use copy + update to apply values that may be outside of the range
memory = memory.copy(update={"value": value})
return memory
@property
def tuning_replicas(self) -> servo.Replicas:
"""
Return the current Replicas setting for the optimization.
"""
value = 1 if self.tuning_pod else 0
return servo.Replicas(
min=0,
max=1,
value=value,
pinned=True,
)
@property
def on_failure(self) -> FailureMode:
"""
Return the configured failure behavior. If not set explicitly, this will be cascaded
from the base kubernetes configuration (or its default)
"""
return self.target_controller_config.on_failure
@property
def main_cpu(self) -> CPU:
"""
Return the current CPU setting for the main containers.
"""
# Determine the value in priority order from the config
resource_requirements = self.main_container.get_resource_requirements('cpu')
value = resource_requirements.get(
next(filter(lambda r: resource_requirements[r] is not None, self.container_config.cpu.get), None)
)
millicores = Millicore.parse(value)
# NOTE: use copy + update to accept values from mainline outside of our range
cpu = self.container_config.cpu.copy(update={"pinned": True, "value": millicores})
cpu.request = resource_requirements.get(ResourceRequirement.request)
cpu.limit = resource_requirements.get(ResourceRequirement.limit)
return cpu
@property
def main_memory(self) -> Memory:
"""
Return the current Memory setting for the main containers.
"""
# Determine the value in priority order from the config
resource_requirements = self.main_container.get_resource_requirements('memory')
value = resource_requirements.get(
next(filter(lambda r: resource_requirements[r] is not None, self.container_config.memory.get), None)
)
short_byte_size = ShortByteSize.validate(value)
# NOTE: use copy + update to accept values from mainline outside of our range
memory = self.container_config.memory.copy(update={"pinned": True, "value": short_byte_size})
memory.request = resource_requirements.get(ResourceRequirement.request)
memory.limit = resource_requirements.get(ResourceRequirement.limit)
return memory
@property
def main_replicas(self) -> servo.Replicas:
"""
Return the current Replicas setting for the main Pods Deployment.
NOTE: This is a synthetic setting because the replica count of the main Deployment is not
under out control. The min, max, and value are aligned on each synthetic read.
"""
return servo.Replicas(
min=0,
max=99999,
value=self.target_controller.replicas,
pinned=True,
)
@property
def main_name(self) -> str:
"""Return the name for identifying the main instance settings & metrics.
The name respects the alias defined in the config or else synthesizes a name from the Deployment
and Container names.
"""
return (
self.container_config.alias
or f"{self.target_controller_config.name}/{self.container_config.name}"
)
def to_components(self) -> List[servo.Component]:
"""
Return a Component representation of the canary and its reference target.
Note that all settings on the target are implicitly pinned because only the canary
is to be modified during optimization.
"""
return [
servo.Component(
name=self.main_name,
settings=[
self.main_cpu,
self.main_memory,
self.main_replicas,
],
),
servo.Component(
name=self.name,
settings=[
self.tuning_cpu,
self.tuning_memory,
self.tuning_replicas,
],
),
]
async def rollback(self, error: Optional[Exception] = None) -> None:
"""
Not supported. Raises a TypeError when called.
Rollbacks are not supported by the canary optimization strategy
because they are dependent on Kubernetes Deployments.
"""
raise TypeError(
(
"rollback is not supported under the canary optimization strategy because rollbacks are applied to "
"Kubernetes Deployment objects and canary optimization is performed against a standalone Pod."
)
)
async def destroy(self, error: Optional[Exception] = None) -> None:
if await self.delete_tuning_pod(raise_if_not_found=False) is None:
self.logger.debug(f'no tuning pod exists, ignoring destroy')
return
self.logger.success(f'destroyed tuning Pod "{self.tuning_pod_name}"')
async def shutdown(self, error: Optional[Exception] = None) -> None:
await self.destroy(error)
async def handle_error(self, error: Exception) -> bool:
if self.on_failure == FailureMode.rollback or self.on_failure == FailureMode.shutdown:
# Ensure that we chain any underlying exceptions that may occur
try:
if self.on_failure == FailureMode.rollback:
self.logger.warning(
f"cannot rollback a tuning Pod: falling back to shutdown: {error}"
)
await asyncio.wait_for(self.shutdown(), timeout=self.timeout.total_seconds())
# create a new canary against baseline
self.logger.info(
"creating new tuning pod against baseline following failed adjust"
)
await self._configure_tuning_pod_template_spec() # reset to baseline from the target controller
self.tuning_pod = await self.create_or_recreate_tuning_pod()
raise error # Always communicate errors to backend unless ignored
except Exception as handler_error:
raise handler_error from error
else:
return await super().handle_error(error)
async def is_ready(self) -> bool:
is_ready, restart_count = await asyncio.gather(
self.tuning_pod.is_ready(),
self.tuning_pod.get_restart_count()
)
return is_ready and restart_count == 0
async def raise_for_status(self) -> None:
"""Raise an exception if in an unhealthy state."""
await self.tuning_pod.raise_for_status(adjustments=self.adjustments)
class Config:
arbitrary_types_allowed = True
extra = pydantic.Extra.forbid
class KubernetesOptimizations(pydantic.BaseModel, servo.logging.Mixin):
"""
Models the state of resources under optimization in a Kubernetes cluster.
"""
config: "KubernetesConfiguration"
namespace: Namespace
optimizations: List[BaseOptimization]
runtime_id: str
spec_id: str
version_id: str
@classmethod
async def create(
cls, config: "KubernetesConfiguration"
) -> "KubernetesOptimizations":
"""
Read the state of all components under optimization from the cluster and return an object representation.
"""
namespace = await Namespace.read(config.namespace)
optimizations: List[BaseOptimization] = []
images = {}
runtime_ids = {}
pod_tmpl_specs = {}
for deployment_or_rollout_config in (config.deployments or []) + (config.rollouts or []):
if deployment_or_rollout_config.strategy == OptimizationStrategy.default:
if isinstance(deployment_or_rollout_config, RolloutConfiguration):
raise NotImplementedError("Saturation mode not currently supported on Argo Rollouts")
optimization = await DeploymentOptimization.create(
deployment_or_rollout_config, timeout=deployment_or_rollout_config.timeout
)
deployment_or_rollout = optimization.deployment
container = optimization.container
elif deployment_or_rollout_config.strategy == OptimizationStrategy.canary:
optimization = await CanaryOptimization.create(
deployment_or_rollout_config, timeout=deployment_or_rollout_config.timeout
)
deployment_or_rollout = optimization.target_controller
container = optimization.main_container
# Ensure the canary is available
# TODO: We don't want to do this implicitly but this is a first step
if not optimization.tuning_pod:
servo.logger.info("Creating new tuning pod...")
await optimization.create_tuning_pod()
else:
raise ValueError(
f"unknown optimization strategy: {deployment_or_rollout_config.strategy}"
)
optimizations.append(optimization)
# compile artifacts for checksum calculation
pods = await deployment_or_rollout.get_pods()
runtime_ids[optimization.name] = [pod.uid for pod in pods]
pod_tmpl_specs[deployment_or_rollout.name] = deployment_or_rollout.pod_template_spec.spec
images[container.name] = container.image
# Compute checksums for change detection
spec_id = servo.utilities.hashing.get_hash([pod_tmpl_specs[k] for k in sorted(pod_tmpl_specs.keys())])
runtime_id = servo.utilities.hashing.get_hash(runtime_ids)
version_id = servo.utilities.hashing.get_hash([images[k] for k in sorted(images.keys())])
return KubernetesOptimizations(
config=config,
namespace=namespace,
optimizations=optimizations,
spec_id=spec_id,
runtime_id=runtime_id,
version_id=version_id,
)
def to_components(self) -> List[servo.Component]:
"""
Return a list of Component objects modeling the state of local optimization activities.
Components are the canonical representation of systems under optimization. They
are used for data exchange with the Opsani API
"""
components = list(map(lambda opt: opt.to_components(), self.optimizations))
return list(itertools.chain(*components))
def to_description(self) -> servo.Description:
"""
Return a representation of the current state as a Description object.
Description objects are used to report state to the Opsani API in order
to synchronize with the Optimizer service.
Returns:
A Description of the current state.
"""
return servo.Description(components=self.to_components())
def find_optimization(self, name: str) -> Optional[BaseOptimization]:
"""
Find and return an optimization by name.
"""
return next(filter(lambda a: a.name == name, self.optimizations), None)
async def apply(self, adjustments: List[servo.Adjustment]) -> None:
"""
Apply a sequence of adjustments and wait for them to take effect on the cluster.
"""
# Exit early if there is nothing to do
if not adjustments:
self.logger.debug("early exiting from adjust: no adjustments")
return
summary = f"[{', '.join(list(map(str, adjustments)))}]"
self.logger.info(
f"Applying {len(adjustments)} Kubernetes adjustments: {summary}"
)
# Adjust settings on the local data model
for adjustment in adjustments:
if adjustable := self.find_optimization(adjustment.component_name):
self.logger.info(f"adjusting {adjustment.component_name}: {adjustment}")
adjustable.adjust(adjustment)
else:
self.logger.debug(f'ignoring unrecognized adjustment "{adjustment}"')
# Apply the changes to Kubernetes and wait for the results
timeout = self.config.timeout
if self.optimizations:
self.logger.debug(
f"waiting for adjustments to take effect on {len(self.optimizations)} optimizations"
)
try:
gather_apply = asyncio.gather(
*list(map(lambda a: a.apply(), self.optimizations)),
return_exceptions=True,
)
results = await asyncio.wait_for(gather_apply, timeout=timeout.total_seconds() + 60) # allow sub-optimization timeouts to expire first
except asyncio.exceptions.TimeoutError as error:
self.logger.error(
f"timed out after {timeout} + 60s waiting for adjustments to apply"
)
# Prevent "_GatheringFuture exception was never retrieved" warning if the above wait_for raises a timeout error
# https://bugs.python.org/issue29432
try:
await gather_apply
except asyncio.CancelledError:
pass
for optimization in self.optimizations:
if await optimization.handle_error(error):
# Stop error propagation once it has been handled
break
raise # No results to process in this case, reraise timeout if handlers didn't
for result in results:
if isinstance(result, Exception):
for optimization in self.optimizations:
if await optimization.handle_error(result):
# Stop error propagation once it has been handled
break
else:
self.logger.warning(f"failed to apply adjustments: no adjustables")
# TODO: Run sanity checks to look for out of band changes
async def raise_for_status(self) -> None:
handle_error_tasks = []
def _raise_for_task(task: asyncio.Task, optimization: BaseOptimization) -> None:
if task.done() and not task.cancelled():
if exception := task.exception():
handle_error_tasks.append(asyncio.create_task(optimization.handle_error(exception)))
tasks = []
for optimization in self.optimizations:
task = asyncio.create_task(optimization.raise_for_status())
task.add_done_callback(functools.partial(_raise_for_task, optimization=optimization))
tasks.append(task)
for future in asyncio.as_completed(tasks, timeout=self.config.timeout.total_seconds()):
try:
await future
except Exception as error:
servo.logger.exception(f"Optimization failed with error: {error}")
# TODO: first handler to raise will likely interrupt other tasks.
# Gather with return_exceptions=True and aggregate resulting exceptions before raising
await asyncio.gather(*handle_error_tasks)
async def is_ready(self):
if self.optimizations:
self.logger.debug(
f"Checking for readiness of {len(self.optimizations)} optimizations"
)
try:
results = await asyncio.wait_for(
asyncio.gather(
*list(map(lambda a: a.is_ready(), self.optimizations)),
),
timeout=self.config.timeout.total_seconds()
)
return all(results)
except asyncio.TimeoutError:
return False
else:
return True
class Config:
arbitrary_types_allowed = True
DNSSubdomainName = pydantic.constr(
strip_whitespace=True,
min_length=1,
max_length=253,
regex="^[0-9a-zA-Z]([0-9a-zA-Z\\.-])*[0-9A-Za-z]$",
)
DNSSubdomainName.__doc__ = (
"""DNSSubdomainName models a Kubernetes DNS Subdomain Name used as the name for most resource types.
Valid DNS Subdomain Names conform to [RFC 1123](https://tools.ietf.org/html/rfc1123) and must:
* contain no more than 253 characters
* contain only lowercase alphanumeric characters, '-' or '.'
* start with an alphanumeric character
* end with an alphanumeric character
See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
"""
)
DNSLabelName = pydantic.constr(
strip_whitespace=True,
min_length=1,
max_length=63,
regex="^[0-9a-zA-Z]([0-9a-zA-Z-])*[0-9A-Za-z]$",
)
DNSLabelName.__doc__ = (
"""DNSLabelName models a Kubernetes DNS Label Name identified used to name some resource types.
Valid DNS Label Names conform to [RFC 1123](https://tools.ietf.org/html/rfc1123) and must:
* contain at most 63 characters
* contain only lowercase alphanumeric characters or '-'
* start with an alphanumeric character
* end with an alphanumeric character
See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names
"""
)
ContainerTagName = pydantic.constr(
strip_whitespace=True,
min_length=1,
max_length=128,
regex="^[0-9a-zA-Z]([0-9a-zA-Z_\\.\\-/:@])*$",
) # NOTE: This regex is not a full validation
ContainerTagName.__doc__ = (
"""ContainerTagName models the name of a container referenced in a Kubernetes manifest.
Valid container tags must:
* be valid ASCII and may contain lowercase and uppercase letters, digits, underscores, periods and dashes.
* not start with a period or a dash
* may contain a maximum of 128 characters
"""
)
class EnvironmentConfiguration(servo.BaseConfiguration):
...
class CommandConfiguration(servo.BaseConfiguration):
...
class ContainerConfiguration(servo.BaseConfiguration):
"""
The ContainerConfiguration class models the configuration of an optimizeable container within a Kubernetes Deployment.
"""
name: ContainerTagName
alias: Optional[ContainerTagName]
command: Optional[str] # TODO: create model...
cpu: CPU
memory: Memory
env: Optional[List[str]] # (adjustable environment variables) TODO: create model...
static_environment_variables: Optional[Dict[str, str]]
class OptimizationStrategy(str, enum.Enum):
"""
OptimizationStrategy is an enumeration of the possible ways to perform optimization on a Kubernetes Deployment.
"""
default = "default"
"""The default strategy directly applies adjustments to the target Deployment and its containers.
"""
canary = "canary"
"""The canary strategy creates a servo managed standalone tuning Pod based on the target Deployment and makes
adjustments to it instead of the Deployment itself.
"""
class BaseOptimizationStrategyConfiguration(pydantic.BaseModel):
type: OptimizationStrategy = pydantic.Field(..., const=True)
def __eq__(self, other) -> bool:
if isinstance(other, OptimizationStrategy):
return self.type == other
return super().__eq__(other)
class Config:
extra = pydantic.Extra.forbid
class DefaultOptimizationStrategyConfiguration(BaseOptimizationStrategyConfiguration):
type = pydantic.Field(OptimizationStrategy.default, const=True)
class CanaryOptimizationStrategyConfiguration(BaseOptimizationStrategyConfiguration):
type = pydantic.Field(OptimizationStrategy.canary, const=True)
alias: Optional[ContainerTagName]
class FailureMode(str, enum.Enum):
"""
The FailureMode enumeration defines how to handle a failed adjustment of a Kubernetes resource.
"""
rollback = "rollback"
shutdown = "shutdown"
ignore = "ignore"
exception = "exception"
destroy = "destroy" # deprecated, but accepted as "shutdown"
@classmethod
def options(cls) -> List[str]:
"""
Return a list of strings that identifies all failure mode configuration options.
"""
return list(map(lambda mode: mode.value, cls.__members__.values()))
class PermissionSet(pydantic.BaseModel):
"""Permissions objects model Kubernetes permissions granted through RBAC."""
group: str
resources: List[str]
verbs: List[str]
STANDARD_PERMISSIONS = [
PermissionSet(
group="apps",
resources=["deployments", "replicasets"],
verbs=["get", "list", "watch", "update", "patch"],
),
PermissionSet(
group="",
resources=["namespaces"],
verbs=["get"],
),
PermissionSet(
group="",
resources=["pods", "pods/logs", "pods/status"],
verbs=["create", "delete", "get", "list", "watch"],
),
]
ROLLOUT_PERMISSIONS = [
PermissionSet(
group="argoproj.io",
resources=["rollouts", "rollouts/status"],
verbs=["get", "list", "watch", "update", "patch"],
),
]
class BaseKubernetesConfiguration(servo.BaseConfiguration):
"""
BaseKubernetesConfiguration provides a set of configuration primitives for optimizable Kubernetes resources.
Child classes of `BaseKubernetesConfiguration` such as the `DeploymentConfiguration` can benefit from
the cascading configuration behavior implemented on the `KubernetesConfiguration` class.
Common settings will be cascaded from the containing class for attributes if they have not been explicitly set
and are equal to the default value. Settings that are mandatory in the superclass (such as timeout and namespace)
but are available for override should be declared as optional on `BaseKubernetesConfiguration` and overridden and
declared as mandatory in `BaseKubernetesConfiguration`'.
"""
kubeconfig: Optional[pydantic.FilePath] = pydantic.Field(
description="Path to the kubeconfig file. If `None`, use the default from the environment.",
)
context: Optional[str] = pydantic.Field(description="Name of the kubeconfig context to use.")
namespace: Optional[DNSSubdomainName] = pydantic.Field(
description="Kubernetes namespace where the target deployments are running.",
)
settlement: Optional[servo.Duration] = pydantic.Field(
description="Duration to observe the application after an adjust to ensure the deployment is stable. May be overridden by optimizer supplied `control.adjust.settlement` value."
)
on_failure: FailureMode = pydantic.Field(
FailureMode.exception,
description=f"How to handle a failed adjustment. Options are: {servo.utilities.strings.join_to_series(list(FailureMode.__members__.values()))}",
)
timeout: Optional[servo.Duration] = pydantic.Field(
description="Time interval to wait before considering Kubernetes operations to have failed."
)
@pydantic.validator("on_failure")
def validate_failure_mode(cls, v):
if v == FailureMode.destroy:
servo.logger.warning(f"Deprecated value 'destroy' used for 'on_failure', replacing with 'shutdown'")
return FailureMode.shutdown
return v
StrategyTypes = Union[
OptimizationStrategy,
DefaultOptimizationStrategyConfiguration,
CanaryOptimizationStrategyConfiguration,
]
class DeploymentConfiguration(BaseKubernetesConfiguration):
"""
The DeploymentConfiguration class models the configuration of an optimizable Kubernetes Deployment.
"""
name: DNSSubdomainName
containers: List[ContainerConfiguration]
strategy: StrategyTypes = OptimizationStrategy.default
replicas: servo.Replicas
class RolloutConfiguration(BaseKubernetesConfiguration):
"""
The RolloutConfiguration class models the configuration of an optimizable Argo Rollout.
"""
name: DNSSubdomainName
containers: List[ContainerConfiguration]
strategy: StrategyTypes = OptimizationStrategy.canary
replicas: servo.Replicas
class KubernetesConfiguration(BaseKubernetesConfiguration):
namespace: DNSSubdomainName = DNSSubdomainName("default")
timeout: servo.Duration = "5m"
permissions: List[PermissionSet] = pydantic.Field(
STANDARD_PERMISSIONS,
description="Permissions required by the connector to operate in Kubernetes.",
)
deployments: Optional[List[DeploymentConfiguration]] = pydantic.Field(
description="Deployments to be optimized.",
)
rollouts: Optional[List[RolloutConfiguration]] = pydantic.Field(
description="Argo rollouts to be optimized.",
)
@pydantic.root_validator
def check_deployment_and_rollout(cls, values):
if (not values.get('deployments')) and (not values.get('rollouts')):
raise ValueError("No optimization target(s) were specified")
return values
@classmethod
def generate(cls, **kwargs) -> "KubernetesConfiguration":
return cls(
namespace="default",
description="Update the namespace, deployment, etc. to match your Kubernetes cluster",
deployments=[
DeploymentConfiguration(
name="app",
replicas=servo.Replicas(
min=1,
max=2,
),
containers=[
ContainerConfiguration(
name="opsani/fiber-http:latest",
cpu=CPU(min="250m", max=4, step="125m"),
memory=Memory(min="256MiB", max="4GiB", step="128MiB"),
)
],
)
],
**kwargs,
)
def __init__(self, *args, **kwargs) -> None: # noqa: D107
super().__init__(*args, **kwargs)
self.cascade_common_settings()
def cascade_common_settings(self, *, overwrite: bool = False) -> None:
"""
Apply common settings to child models that inherit from BaseKubernetesConfiguration.
This method provides enables hierarchical overrides of common configuration values
based on shared inheritance. Each attribute is introspected and if it inherits from
`BaseKubernetesConfiguration`, any common attribute values are copied onto the child
model, cascading them downward. Only attributes whose value is equal to the default
and have not been explicitly set are updated.
# FIXME: Cascaded settings should only be optional if they can be optional at the top level. Right now we are implying that namespace can be None as well.
"""
for name, field in self.__fields__.items():
if issubclass(field.type_, BaseKubernetesConfiguration):
attribute = getattr(self, name)
for obj in (
attribute if isinstance(attribute, Collection) else [attribute]
):
# don't cascade if optional and not set
if obj is None:
continue
for (
field_name,
field,
) in BaseKubernetesConfiguration.__fields__.items():
if field_name in servo.BaseConfiguration.__fields__:
# don't cascade from the base class
continue
if field_name in obj.__fields_set__ and not overwrite:
self.logger.trace(
f"skipping config cascade for field '{field_name}' set with value '{getattr(obj, field_name)}'"
)
continue
current_value = getattr(obj, field_name)
if overwrite or current_value == field.default:
parent_value = getattr(self, field_name)
setattr(obj, field_name, parent_value)
self.logger.trace(
f"cascaded setting '{field_name}' from KubernetesConfiguration to child '{attribute}': value={parent_value}"
)
else:
self.logger.trace(
f"declining to cascade value to field '{field_name}': the default value is set and overwrite is false"
)
async def load_kubeconfig(self) -> None:
"""
Asynchronously load the Kubernetes configuration
"""
config_file = pathlib.Path(self.kubeconfig or kubernetes_asyncio.config.kube_config.KUBE_CONFIG_DEFAULT_LOCATION).expanduser()
if config_file.exists():
await kubernetes_asyncio.config.load_kube_config(
config_file=str(config_file),
context=self.context,
)
elif os.getenv("KUBERNETES_SERVICE_HOST"):
kubernetes_asyncio.config.load_incluster_config()
else:
raise RuntimeError(
f"unable to configure Kubernetes client: no kubeconfig file nor in-cluser environment variables found"
)
KubernetesOptimizations.update_forward_refs()
DeploymentOptimization.update_forward_refs()
CanaryOptimization.update_forward_refs()
class KubernetesChecks(servo.BaseChecks):
"""Checks for ensuring that the Kubernetes connector is ready to run."""
config: KubernetesConfiguration
@servo.require("Connectivity to Kubernetes")
async def check_kubernetes_connectivity(self) -> None:
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 =kubernetes_asyncio.client.VersionApi(api)
await v1.get_code()
@servo.warn("Kubernetes version")
async def check_kubernetes_version(self) -> None:
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 =kubernetes_asyncio.client.VersionApi(api)
version = await v1.get_code()
assert int(version.major) >= 1
# EKS sets minor to "17+"
assert int(int("".join(c for c in version.minor if c.isdigit()))) >= 16
@servo.require("Required permissions")
async def check_kubernetes_permissions(self) -> None:
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 = kubernetes_asyncio.client.AuthorizationV1Api(api)
required_permissions = self.config.permissions
if self.config.rollouts:
required_permissions.append(ROLLOUT_PERMISSIONS)
for permission in required_permissions:
for resource in permission.resources:
for verb in permission.verbs:
attributes = kubernetes_asyncio.client.models.V1ResourceAttributes(
namespace=self.config.namespace,
group=permission.group,
resource=resource,
verb=verb,
)
spec =kubernetes_asyncio.client.models.V1SelfSubjectAccessReviewSpec(
resource_attributes=attributes
)
review =kubernetes_asyncio.client.models.V1SelfSubjectAccessReview(spec=spec)
access_review = await v1.create_self_subject_access_review(
body=review
)
assert (
access_review.status.allowed
), f'Not allowed to "{verb}" resource "{resource}"'
@servo.require('Namespace "{self.config.namespace}" is readable')
async def check_kubernetes_namespace(self) -> None:
await Namespace.read(self.config.namespace)
@servo.multicheck('Deployment "{item.name}" is readable')
async def check_kubernetes_deployments(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_dep(dep_config: DeploymentConfiguration) -> None:
await Deployment.read(dep_config.name, dep_config.namespace)
return (self.config.deployments or []), check_dep
@servo.multicheck('Rollout "{item.name}" is readable')
async def check_kubernetes_rollouts(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_rol(rol_config: RolloutConfiguration) -> None:
await Rollout.read(rol_config.name, rol_config.namespace)
return (self.config.rollouts or []), check_rol
async def _check_container_resource_requirements(
self,
target_controller: Union[Deployment, Rollout],
target_config: Union[DeploymentConfiguration, RolloutConfiguration]
) -> None:
for cont_config in target_config.containers:
container = target_controller.find_container(cont_config.name)
assert container, f"{type(target_controller).__name__} {target_config.name} has no container {cont_config.name}"
for resource in Resource.values():
current_state = None
container_requirements = container.get_resource_requirements(resource)
get_requirements = getattr(cont_config, resource).get
for requirement in get_requirements:
current_state = container_requirements.get(requirement)
if current_state:
break
assert current_state, (
f"{type(target_controller).__name__} {target_config.name} target container {cont_config.name} spec does not define the resource {resource}. "
f"At least one of the following must be specified: {', '.join(map(lambda req: req.resources_key, get_requirements))}"
)
@servo.multicheck('Containers in the "{item.name}" Deployment have resource requirements')
async def check_kubernetes_resource_requirements(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_dep_resource_requirements(
dep_config: DeploymentConfiguration,
) -> None:
deployment = await Deployment.read(dep_config.name, dep_config.namespace)
await self._check_container_resource_requirements(deployment, dep_config)
return (self.config.deployments or []), check_dep_resource_requirements
@servo.multicheck('Containers in the "{item.name}" Rollout have resource requirements')
async def check_kubernetes_rollout_resource_requirements(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_rol_resource_requirements(
rol_config: RolloutConfiguration,
) -> None:
rollout = await Rollout.read(rol_config.name, rol_config.namespace)
await self._check_container_resource_requirements(rollout, rol_config)
return (self.config.rollouts or []), check_rol_resource_requirements
@servo.multicheck('Deployment "{item.name}" is ready')
async def check_kubernetes_deployments_are_ready(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_deployment(dep_config: DeploymentConfiguration) -> None:
deployment = await Deployment.read(dep_config.name, dep_config.namespace)
if not await deployment.is_ready():
raise RuntimeError(f'Deployment "{deployment.name}" is not ready')
return (self.config.deployments or []), check_deployment
@servo.multicheck('Rollout "{item.name}" is ready')
async def check_kubernetes_rollouts_are_ready(self) -> Tuple[Iterable, servo.CheckHandler]:
async def check_rollout(rol_config: RolloutConfiguration) -> None:
rollout = await Rollout.read(rol_config.name, rol_config.namespace)
if not await rollout.is_ready():
raise RuntimeError(f'Rollout "{rollout.name}" is not ready')
return (self.config.rollouts or []), check_rollout
@servo.metadata(
description="Kubernetes adjust connector",
version="1.5.0",
homepage="https://github.com/opsani/kubernetes-connector",
license=servo.License.apache2,
maturity=servo.Maturity.stable,
)
class KubernetesConnector(servo.BaseConnector):
config: KubernetesConfiguration
@servo.on_event()
async def attach(self, servo_: servo.Servo) -> None:
# Ensure we are ready to talk to Kubernetes API
await self.config.load_kubeconfig()
self.telemetry[f"{self.name}.namespace"] = self.config.namespace
with self.logger.catch(level="DEBUG", message=f"Unable to set version telemetry for connector {self.name}"):
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 =kubernetes_asyncio.client.VersionApi(api)
version_obj = await v1.get_code()
self.telemetry[f"{self.name}.version"] = f"{version_obj.major}.{version_obj.minor}"
self.telemetry[f"{self.name}.platform"] = version_obj.platform
@servo.on_event()
async def detach(self, servo_: servo.Servo) -> None:
self.telemetry.remove(f"{self.name}.namespace")
self.telemetry.remove(f"{self.name}.version")
self.telemetry.remove(f"{self.name}.platform")
@servo.on_event()
async def describe(self, control: servo.Control = servo.Control()) -> servo.Description:
state = await self._create_optimizations()
return state.to_description()
@servo.on_event()
async def components(self) -> List[servo.Component]:
state = await self._create_optimizations()
return state.to_components()
@servo.before_event(servo.Events.measure)
async def before_measure(self, *, metrics: List[str] = None, control: servo.Control = servo.Control()) -> None:
# Build state before a measurement to ensure all necessary setup is done
# (e.g., Tuning Pod is up and running)
await self._create_optimizations()
@servo.on_event()
async def adjust(
self, adjustments: List[servo.Adjustment], control: servo.Control = servo.Control()
) -> servo.Description:
state = await self._create_optimizations()
# Apply the adjustments and emit progress status
progress_logger = lambda p: self.logger.info(
p.annotate(f"waiting up to {p.timeout} for adjustments to be applied...", prefix=False),
progress=p.progress,
)
progress = servo.EventProgress(timeout=self.config.timeout)
future = asyncio.create_task(state.apply(adjustments))
future.add_done_callback(lambda _: progress.trigger())
await asyncio.gather(
future,
progress.watch(progress_logger),
)
# Handle settlement
settlement = control.settlement or self.config.settlement
if settlement:
self.logger.info(
f"Settlement duration of {settlement} requested, waiting for pods to settle..."
)
progress = servo.DurationProgress(settlement)
progress_logger = lambda p: self.logger.info(
p.annotate(f"waiting {settlement} for pods to settle...", False),
progress=p.progress,
)
async def readiness_monitor() -> None:
while not progress.finished:
if not await state.is_ready():
# Raise a specific exception if the optimization defines one
try:
await state.raise_for_status()
except servo.AdjustmentRejectedError as e:
# Update rejections with start-failed to indicate the initial rollout was successful
if e.reason == "start-failed":
e.reason = "unstable"
raise
await asyncio.sleep(servo.Duration('50ms').total_seconds())
await asyncio.gather(
progress.watch(progress_logger),
readiness_monitor()
)
if not await state.is_ready():
self.logger.warning("Rejection triggered without running error handler")
raise servo.AdjustmentRejectedError(
"Optimization target became unready after adjustment settlement period (WARNING: error handler was not run)",
reason="unstable"
)
self.logger.info(
f"Settlement duration of {settlement} has elapsed, resuming optimization."
)
description = state.to_description()
return description
@servo.on_event()
async def check(
self,
matching: Optional[servo.CheckFilter],
halt_on: Optional[servo.ErrorSeverity] = servo.ErrorSeverity.critical,
) -> List[servo.Check]:
return await KubernetesChecks.run(
self.config, matching=matching, halt_on=halt_on
)
async def _create_optimizations(self) -> KubernetesOptimizations:
# Build a KubernetesOptimizations object with progress reporting
# This ensures that the Servo isn't reported as offline
progress_logger = lambda p: self.logger.info(
p.annotate(f"waiting up to {p.timeout} for Kubernetes optimization setup to complete", prefix=False),
progress=p.progress,
)
progress = servo.EventProgress(timeout=self.config.timeout)
future = asyncio.create_task(KubernetesOptimizations.create(self.config))
future.add_done_callback(lambda _: progress.trigger())
await asyncio.gather(
future,
progress.watch(progress_logger),
)
return future.result()
def selector_string(selectors: Mapping[str, str]) -> str:
"""Create a selector string from the given dictionary of selectors.
Args:
selectors: The selectors to stringify.
Returns:
The selector string for the given dictionary.
"""
return ",".join([f"{k}={v}" for k, v in selectors.items()])
def selector_kwargs(
fields: Mapping[str, str] = None,
labels: Mapping[str, str] = None,
) -> Dict[str, str]:
"""Create a dictionary of kwargs for Kubernetes object selectors.
Args:
fields: A mapping of fields used to restrict the returned collection of
Objects to only those which match these field selectors. By default,
no restricting is done.
labels: A mapping of labels used to restrict the returned collection of
Objects to only those which match these label selectors. By default,
no restricting is done.
Returns:
A dictionary that can be used as kwargs for many Kubernetes API calls for
label and field selectors.
"""
kwargs = {}
if fields is not None:
kwargs["field_selector"] = selector_string(fields)
if labels is not None:
kwargs["label_selector"] = selector_string(labels)
return kwargs
class ConfigMap(KubernetesModel):
"""Kubetest wrapper around a Kubernetes `ConfigMap`_ API Object.
The actual ``kubernetes.client.V1ConfigMap`` instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `ConfigMap`_.
.. _ConfigMap:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#configmap-v1-core
"""
obj_type =kubernetes_asyncio.client.V1ConfigMap
api_clients = {
"preferred":kubernetes_asyncio.client.CoreV1Api,
"v1":kubernetes_asyncio.client.CoreV1Api,
}
@classmethod
async def read(cls, name: str, namespace: str) -> "ConfigMap":
"""Read a ConfigMap by name under the given namespace.
Args:
name: The name of the Deployment to read.
namespace: The namespace to read the Deployment from.
"""
async with cls.preferred_client() as api_client:
obj = await api_client.read_namespaced_config_map(name, namespace)
return ConfigMap(obj)
async def create(self, namespace: str = None) -> None:
"""Create the ConfigMap under the given namespace.
Args:
namespace: The namespace to create the ConfigMap under.
If the ConfigMap was loaded via the kubetest client, the
namespace will already be set, so it is not needed here.
Otherwise, the namespace will need to be provided.
"""
if namespace is None:
namespace = self.namespace
servo.logger.info(f'creating configmap "{self.name}" in namespace "{self.namespace}"')
servo.logger.debug(f"configmap: {self.obj}")
self.obj = await self.api_client.create_namespaced_config_map(
namespace=namespace,
body=self.obj,
)
async def patch(self) -> None:
"""
Patches a ConfigMap.
"""
self.logger.info(f'patching ConfigMap "{self.name}"')
self.logger.trace(f"ConfigMap: {self.obj}")
async with self.api_client() as api_client:
await api_client.patch_namespaced_config_map(
name=self.name,
namespace=self.namespace,
body=self.obj,
)
async def delete(self, options:kubernetes_asyncio.client.V1DeleteOptions = None) ->kubernetes_asyncio.client.V1Status:
"""Delete the ConfigMap.
This method expects the ConfigMap to have been loaded or otherwise
assigned a namespace already. If it has not, the namespace will need
to be set manually.
Args:
options: Options for ConfigMap deletion.
Returns:
The status of the delete operation.
"""
if options is None:
options = kubernetes_asyncio.client.V1DeleteOptions()
servo.logger.info(f'deleting configmap "{self.name}"')
servo.logger.debug(f"delete options: {options}")
servo.logger.debug(f"configmap: {self.obj}")
return await self.api_client.delete_namespaced_config_map(
name=self.name,
namespace=self.namespace,
body=options,
)
async def refresh(self) -> None:
"""Refresh the underlying Kubernetes ConfigMap resource."""
self.obj = await self.api_client.read_namespaced_config_map(
name=self.name,
namespace=self.namespace,
)
async def is_ready(self) -> bool:
"""Check if the ConfigMap is in the ready state.
ConfigMaps do not have a "status" field to check, so we will
measure their readiness status by whether or not they exist
on the cluster.
Returns:
True if in the ready state; False otherwise.
"""
try:
await self.refresh()
except: # noqa
return False
return True
def dns_subdomainify(name: str) -> str:
"""
Valid DNS Subdomain Names conform to [RFC 1123](https://tools.ietf.org/html/rfc1123) and must:
* contain no more than 253 characters
* contain only lowercase alphanumeric characters, '-' or '.'
* start with an alphanumeric character
* end with an alphanumeric character
See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
"""
# lowercase alphanumerics
name = name.lower()
# replace slashes with dots
name = re.sub(r'\/', '.', name)
# replace whitespace with hyphens
name = re.sub(r'\s', '-', name)
# strip any remaining disallowed characters
name = re.sub(r'/[^a-z0-9\.\-]+/g', '', name)
# truncate to our maximum length
name = name[:253]
# ensure starts with an alphanumeric by prefixing with `0-`
boundaryRegex = re.compile('^[a-z0-9]')
if not boundaryRegex.match(name):
name = ('0-' + name)[:253]
# ensure ends with an alphanumeric by suffixing with `-1`
if not boundaryRegex.match(name[-1]):
name = name[:251] + '-1'
return name
def dns_labelize(name: str) -> str:
"""
Transform a string into a valid Kubernetes label value.
Valid Kubernetes label values:
* must be 63 characters or less (cannot be empty)
* must begin and end with an alphanumeric character ([a-z0-9A-Z])
* may contain dashes (-), underscores (_), dots (.), and alphanumerics between
"""
# replace slashes with underscores
name = re.sub(r'\/', '_', name)
# replace whitespace with hyphens
name = re.sub(r'\s', '-', name)
# strip any remaining disallowed characters
name = re.sub(r'[^a-z0-9A-Z\.\-_]+', '', name)
# truncate to our maximum length
name = name[:63]
# ensure starts with an alphanumeric by prefixing with `0-`
boundaryRegex = re.compile('[a-z0-9A-Z]')
if not boundaryRegex.match(name[0]):
name = ('0-' + name)[:63]
# ensure ends with an alphanumeric by suffixing with `-1`
if not boundaryRegex.match(name[-1]):
name = name[:61] + '-1'
return name
def set_container_resource_defaults_from_config(container: Container, config: ContainerConfiguration) -> None:
for resource in Resource.values():
# NOTE: cpu/memory stanza in container config
resource_config = getattr(config, resource)
requirements = container.get_resource_requirements(resource)
servo.logger.debug(f"Loaded resource requirements for '{resource}': {requirements}")
for requirement in ResourceRequirement:
# Use the request/limit from the container.[cpu|memory].[request|limit] as default/override
if resource_value := getattr(resource_config, requirement.name):
if (existing_resource_value := requirements.get(requirement)) is None:
servo.logger.debug(f"Setting default value for {resource}.{requirement} to: {resource_value}")
else:
servo.logger.debug(f"Overriding existing value for {resource}.{requirement} ({existing_resource_value}) to: {resource_value}")
requirements[requirement] = resource_value
servo.logger.debug(f"Setting resource requirements for '{resource}' to: {requirements}")
container.set_resource_requirements(resource, requirements)
| StarcoderdataPython |
31025 | <gh_stars>0
# @lc app=leetcode id=230 lang=python3
#
# [230] Kth Smallest Element in a BST
#
# https://leetcode.com/problems/kth-smallest-element-in-a-bst/description/
#
# algorithms
# Medium (63.45%)
# Likes: 4133
# Dislikes: 90
# Total Accepted: 558.7K
# Total Submissions: 876.8K
# Testcase Example: '[3,1,4,null,2]\n1'
#
# Given the root of a binary search tree, and an integer k, return the k^th
# (1-indexed) smallest element in the tree.
#
#
# Example 1:
#
#
# Input: root = [3,1,4,null,2], k = 1
# Output: 1
#
#
# Example 2:
#
#
# Input: root = [5,3,6,2,4,null,null,1], k = 3
# Output: 3
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is n.
# 1 <= k <= n <= 10^4
# 0 <= Node.val <= 10^4
#
#
#
# Follow up: If the BST is modified often (i.e., we can do insert and delete
# operations) and you need to find the kth smallest frequently, how would you
# optimize?
#
# @lc tags=binary-search;tree
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 找二叉搜索树的第k小的元素。
# 直接深度优先,递归。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def kthSmallest(self, root: TreeNode, k: int) -> int:
def rkthSmallest(root: TreeNode, k: int):
if not root:
return None, 0
retL, countL = rkthSmallest(root.left, k)
if retL is not None:
return retL, 0
k -= countL
if k == 1:
return root.val, 0
retR, countR = rkthSmallest(root.right, k - 1)
if retR is not None:
return retR, 0
return None, countL + countR + 1
return rkthSmallest(root, k)[0]
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print(
str(Solution().kthSmallest(
listToTreeNode([
31, 30, 48, 3, None, 38, 49, 0, 16, 35, 47, None, None, None,
2, 15, 27, 33, 37, 39, None, 1, None, 5, None, 22, 28, 32, 34,
36, None, None, 43, None, None, 4, 11, 19, 23, None, 29, None,
None, None, None, None, None, 40, 46, None, None, 7, 14, 17,
21, None, 26, None, None, None, 41, 44, None, 6, 10, 13, None,
None, 18, 20, None, 25, None, None, 42, None, 45, None, None,
8, None, 12, None, None, None, None, None, 24, None, None,
None, None, None, None, 9
]), 1)))
print('Example 1:')
print('Input : ')
print('root = [3,1,4,null,2], k = 1')
print('Exception :')
print('1')
print('Output :')
print(str(Solution().kthSmallest(listToTreeNode([3, 1, 4, None, 2]), 1)))
print()
print('Example 2:')
print('Input : ')
print('root = [5,3,6,2,4,null,null,1], k = 3')
print('Exception :')
print('3')
print('Output :')
print(
str(Solution().kthSmallest(
listToTreeNode([5, 3, 6, 2, 4, None, None, 1]), 3)))
print()
pass
# @lc main=end | StarcoderdataPython |
170360 | import tensorflow as tf
slim = tf.contrib.slim
from helper_net.inception_v4 import *
import pickle
import numpy as np
def get_weights():
checkpoint_file = '../checkpoints/inception_v4.ckpt'
sess = tf.Session()
arg_scope = inception_v4_arg_scope()
input_tensor = tf.placeholder(tf.float32, (None, 299, 299, 3))
with slim.arg_scope(arg_scope):
logits, end_points = inception_v4(input_tensor, is_training=False)
saver = tf.train.Saver()
saver.restore(sess, checkpoint_file)
final_weights = []
current_bn = []
final_lr = []
vars_model = tf.global_variables()
for i in range(0, len(vars_model), 4):
for y in range(4):
key = vars_model[i+y]
if not "Aux" in key.name:
if y in [1, 2, 3] and not "Logits" in key.name:
value = sess.run(key)
if y == 1:
current_bn = []
current_bn.append(value)
elif y == 2:
current_bn.append(value)
elif y == 3:
current_bn.append(value)
final_weights.append(current_bn)
elif "Logits" in key.name:
value = sess.run(key)
if not "biases" in key.name:
final_lr.append(value)
else:
final_lr.append(value)
final_weights.append(final_lr)
else:
value = sess.run(key)
final_weights.append([value])
with open('weights.p', 'wb') as fp:
pickle.dump(final_weights, fp)
if __name__ == "__main__":
get_weights() | StarcoderdataPython |
11201220 | # Computational Linear Algebra Ep#2 ex1
# By: <NAME>
import numpy as np
import time
if __name__ == "__main__":
print("A:")
# Generates a random 10x10 upper triangular matrix with values between zero to 100
A = np.array(np.random.randint (0,100,(10,10)))
A1 = np.triu(A)
print(A)
print("A1:")
print(A1)
print("")
b = np.array(np.random.randint(0, 100, (10,1)))
print("b: ")
print(b)
print("")
s1 = time.time()
# Solve the normal A matrix system
x = np.linalg.solve(A,b)
s1e = time.time()
print("x:")
print(x)
print("Full A matrix time: " + str(s1e - s1))
s2 = time.time()
# Solve the Triangular matrix system
x1 = np.linalg.solve(A1,b)
s2e = time.time()
print("x1:")
print(x1)
print("Triangle matrix time: " + str(s2e - s2))
| StarcoderdataPython |
1717756 | <reponame>liyuanyuan11/Python
bestFriends=["Jerry","Mark","Justin","Jonny","Tom","Marry","Jenny","Daniel","Tony"]
print(bestFriends)
print(bestFriends[0])
bestFriends[0]="Christina"
print(bestFriends)
bestFriends.append("Frozy")
print(bestFriends)
| StarcoderdataPython |
8126350 | from scipy import misc, signal
import mrcfile
import numpy as np
from aspire.utils.numeric import xp
from aspire.utils import ensure
class Micrograph:
def __init__(self, filepath, margin=None, shrink_factor=None, square=False, gauss_filter_size=None, gauss_filter_sigma=None):
self.filepath = filepath
self.shrink_factor = shrink_factor
self.square = square
self.gauss_filter_size = gauss_filter_size
self.gauss_filter_sigma = gauss_filter_sigma
# Attributes populated by the time this constructor returns
# A 2-D ndarray if loading a MRC file, a 3-D ndarray if loading a MRCS file,
# with the last dimension indicating the no. of images
self.im = None
self._init_margins(margin)
self._read()
def _init_margins(self, margin):
if margin is None:
t = r = b = l = None
elif isinstance(margin, (tuple, list)):
ensure(len(margin)==4, 'If specifying margins a a tuple/list, specify the top/right/bottom/left margins.')
t, r, b, l = margin
else: # assume scalar
t = r = b = l = int(margin)
self.margin_top, self.margin_right, self.margin_bottom, self.margin_left = t, r, b, l
def _read(self):
with mrcfile.open(self.filepath) as mrc:
im = mrc.data.astype('double')
# For multiple mrc files, mrcfile returns an ndarray with (shape n_images, height, width)
# swap axes 0 and 2 so we get the more natural (height, width, n_images)
if im.ndim == 3:
im = np.swapaxes(im, 0, 2)
self.original_im = im
# Discard outer pixels
im = im[
self.margin_top: -self.margin_bottom if self.margin_bottom is not None else None,
self.margin_left: -self.margin_right if self.margin_right is not None else None
]
if self.square:
side_length = min(im.shape)
im = im[:side_length, :side_length]
if self.shrink_factor is not None:
im = misc.imresize(im, 1/self.shrink_factor, mode='F', interp='cubic')
if self.gauss_filter_size is not None:
im = signal.correlate(
im,
Micrograph.gaussian_filter(self.gauss_filter_size, self.gauss_filter_sigma),
'same'
)
self.im = im.astype('double')
self.shape = im.shape
@classmethod
def gaussian_filter(cls, size_filter, std):
"""Computes low-pass filter.
Args:
size_filter: Size of filter (size_filter x size_filter).
std: sigma value in filter.
"""
y, x = xp.mgrid[-(size_filter - 1) // 2: (size_filter - 1) // 2 + 1,
-(size_filter - 1) // 2: (size_filter - 1) // 2 + 1]
response = xp.exp(-xp.square(x) - xp.square(y) / (2 * (std ** 2))) / (xp.sqrt(2 * xp.pi) * std)
response[response < xp.finfo('float').eps] = 0
return xp.asnumpy(response / response.sum()) # Normalize so sum is 1
| StarcoderdataPython |
1838368 | <filename>Python/841.py
from collections import deque
class Solution(object):
def canVisitAllRooms(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: bool
"""
if not rooms:
return True
openRooms = set([0])
queue = deque(rooms[0])
while queue:
for i in range(len(queue)):
key = queue.popleft()
if key < len(rooms) and key not in openRooms:
openRooms.add(key)
for nxt in rooms[key]:
if nxt not in openRooms:
queue.append(nxt)
return len(rooms) == len(openRooms) | StarcoderdataPython |
3470941 | from DPjudge import Power
class XtalballPower(Power):
# ----------------------------------------------------------------------
def __init__(self, game, name, type = None):
Power.__init__(self, game, name, type)
# ----------------------------------------------------------------------
def __repr__(self):
text = Power.__repr__(self).decode('latin-1')
for listName, orders in self.list.items():
if orders: text += '%s\n%s\n' % (listName, '\n'.join(orders))
return text.encode('latin-1')
# ----------------------------------------------------------------------
def reinit(self, includeFlags = 6):
Power.reinit(self, includeFlags)
# -----------------------------------
# Initialize the transient parameters
# -----------------------------------
if includeFlags & 5:
self.list, self.notes = {'SOONER': [], 'LATER': []}, {}
# ----------------------------------------------------------------------
def isEliminated(self, public = False, personal = False):
if not Power.isEliminated(self, public, personal): return False
if not (self.homes and self.game.phase == 'M' and
'GARRISON' in self.game.rules): return True
save = next = self.game.phase
while next not in 'AM':
self.game.phase = self.game.findNextPhase()
next = self.game.phase.split()[-1][0]
self.game.phase = save
return next != 'A'
# ----------------------------------------------------------------------
def movesSubmitted(self):
if self.name not in self.game.map.powers: return 1
if (not self.game.skip
and [x for x in self.game.powers if x.units and not x.list['SOONER']]):
return self.list['SOONER'] or not self.units
if self.game.skip: return self.list['LATER']
return self.list['LATER'] or not self.units
# ----------------------------------------------------------------------
| StarcoderdataPython |
1886999 | <gh_stars>1-10
import socket
from sys import argv
import cv2
import mediapipe as mp
import itertools
import numpy as np
import time
import sys
from multiprocessing import Queue, Process
from queue import Empty
import atexit
from math import ceil
from collections import deque
sys.path.insert(1, './tools')
import holistic, common, encrypt
PRINT_FREQ = 30
SERVER_ADDR = "172.16.31.10"
# SERVER_ADDR = "127.0.0.1"
# Server IP address and Port number
serverAddressPort = (SERVER_ADDR, 9999)
APP_NAME = "SignSense"
# send landmarks and receive predictions from server continuously
def server(landmark_queue, prediction_queue):
common.print_debug_banner("STARTED SERVER")
UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
UDPClientSocket.setblocking(0)
while True:
try:
landmark = landmark_queue.get()
encrypted_landmark = encrypt.encrypt_chacha(landmark)
# Send message to server using created UDP socket
UDPClientSocket.sendto(encrypted_landmark, serverAddressPort)
# Receive message from the server
msgFromServer = UDPClientSocket.recvfrom(2048)[0]
raw_data = encrypt.decrypt_chacha(msgFromServer)
prediction_queue.put(raw_data)
except encrypt.DecryptionError:
print(f"tried to decrypt {msgFromServer}")
except socket.error as e:
# print(f"SOCKET EXCEPTION: {e}")
pass
except Exception as e:
print(f"SERVER EXCEPTION: {e}")
pass
def video_loop(landmark_queue, prediction_queue, use_holistic=False):
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
cap.set(cv2.CAP_PROP_FOURCC, fourcc)
if not cap.isOpened():
print("Error opening Camera")
fps = cap.get(cv2.CAP_PROP_FPS)
print("Webcam FPS = {}".format(fps))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
mp_drawing = mp.solutions.drawing_utils
timestamp = None
started = False
predicted = None
initialized = False
delay = 0
pred_history = deque([" "]*5, 5)
pdecay = time.time()
print("starting image cap")
for image, results in holistic.process_capture(cap, use_holistic):
window_state = cv2.getWindowProperty(APP_NAME, 0)
if started and window_state == -1:
print("QUITTING")
break
started = True
newtime = time.time()
if timestamp is not None:
diff = newtime - timestamp
# Uncomment to print time between each frame
# print(diff)
timestamp = newtime
row = holistic.to_landmark_row(results, use_holistic)
landmark_str = ','.join(np.array(row).astype(np.str))
# send comma delimited str of flattened landmarks in bytes to server
try:
landmark_queue.put_nowait(landmark_str)
except Exception as e:
print(e)
try:
out = prediction_queue.get_nowait()
# toggle the server status flag on first message received
if out and not initialized:
initialized = True
common.print_debug_banner("SENDING ACK TO SERVER FOR CONNECTION")
# send a one-time ACK to toggle server connected status
landmark_queue.put_nowait("ACK")
if delay >= PRINT_FREQ:
if out and out != pred_history[-1] and out != "None":
pred_history.append(out)
pdecay = time.time()
delay = 0
except:
pass
delay += 1
if time.time() - pdecay > 7:
pred_history = deque([" "]*5, 5)
holistic.draw_landmarks(image, results, use_holistic, ' '.join(pred_history))
if initialized:
cv2.circle(image,(20,450), 10, (0,255,0), -1)
cv2.putText(image,'online',(40,458), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow(APP_NAME, image)
else:
cv2.circle(image,(20,450), 10, (0,0,255), -1)
cv2.putText(image,'connecting',(40,458), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow(APP_NAME, image)
cap.release()
cv2.destroyAllWindows()
# send termination message to server
landmark_queue.put("END")
if __name__ == "__main__":
# queue containing the returned predictions from the server
landmark_queue, prediction_queue = Queue(), Queue()
# start separate process for the webcam video GUI
server_p = Process(target=server, args=(landmark_queue, prediction_queue, ))
server_p.daemon = True
atexit.register(common.exit_handler, server_p)
server_p.start()
video_p = Process(target=video_loop, args=(landmark_queue, prediction_queue, ))
video_p.daemon = True
atexit.register(common.exit_handler, video_p)
video_p.start()
video_p.join()
| StarcoderdataPython |
1773433 | <reponame>takatsugukosugi/illustration2vec
from abc import ABCMeta, abstractmethod
import numpy as np
class Illustration2VecBase(object):
__metaclass__ = ABCMeta
def __init__(self, net, tags=None, threshold=None):
self.net = net
if tags is not None:
self.tags = np.array(tags)
self.index = {t: i for i, t in enumerate(tags)}
else:
self.tags = None
if threshold is not None:
self.threshold = threshold
else:
self.threshold = None
@abstractmethod
def _extract(self, inputs, layername):
pass
def _convert_image(self, image):
arr = np.asarray(image, dtype=np.float32)
if arr.ndim == 2:
# convert a monochrome image to a color one
ret = np.empty((arr.shape[0], arr.shape[1], 3), dtype=np.float32)
ret[:] = arr.reshape(arr.shape[0], arr.shape[1], 1)
return ret
elif arr.ndim == 3:
# if arr contains alpha channel, remove it
return arr[:,:,:3]
else:
raise TypeError('unsupported image specified')
def _estimate(self, images):
assert(self.tags is not None)
imgs = [self._convert_image(img) for img in images]
prob = self._extract(imgs, layername='prob')
prob = prob.reshape(prob.shape[0], -1)
return prob
def estimate_specific_tags(self, images, tags):
prob = self._estimate(images)
return [{t: float(prob[i, self.index[t]]) for t in tags}
for i in range(prob.shape[0])]
def estimate_top_tags(self, images, n_tag=10):
prob = self._estimate(images)
general_prob = prob[:, :512]
character_prob = prob[:, 512:1024]
copyright_prob = prob[:, 1024:1536]
rating_prob = prob[:, 1536:]
general_arg = np.argsort(-general_prob, axis=1)[:, :n_tag]
character_arg = np.argsort(-character_prob, axis=1)[:, :n_tag]
copyright_arg = np.argsort(-copyright_prob, axis=1)[:, :n_tag]
rating_arg = np.argsort(-rating_prob, axis=1)
result = []
for i in range(prob.shape[0]):
result.append({
'general': zip(
self.tags[general_arg[i]],
general_prob[i, general_arg[i]].tolist()),
'character': zip(
self.tags[512 + character_arg[i]],
character_prob[i, character_arg[i]].tolist()),
'copyright': zip(
self.tags[1024 + copyright_arg[i]],
copyright_prob[i, copyright_arg[i]].tolist()),
'rating': zip(
self.tags[1536 + rating_arg[i]],
rating_prob[i, rating_arg[i]].tolist()),
})
return result
def __extract_plausible_tags(self, preds, f):
result = []
for pred in preds:
general = [(t, p) for t, p in pred['general'] if f(t, p)]
character = [(t, p) for t, p in pred['character'] if f(t, p)]
copyright = [(t, p) for t, p in pred['copyright'] if f(t, p)]
result.append({
'general': general,
'character': character,
'copyright': copyright,
'rating': pred['rating'],
})
return result
def estimate_plausible_tags(
self, images, threshold=0.25, threshold_rule='constant'):
preds = self.estimate_top_tags(images, n_tag=512)
result = []
if threshold_rule == 'constant':
return self.__extract_plausible_tags(
preds, lambda t, p: p > threshold)
elif threshold_rule == 'f0.5':
if self.threshold is None:
raise TypeError(
'please specify threshold option during init.')
return self.__extract_plausible_tags(
preds, lambda t, p: p > self.threshold[self.index[t], 0])
elif threshold_rule == 'f1':
if self.threshold is None:
raise TypeError(
'please specify threshold option during init.')
return self.__extract_plausible_tags(
preds, lambda t, p: p > self.threshold[self.index[t], 1])
elif threshold_rule == 'f2':
if self.threshold is None:
raise TypeError(
'please specify threshold option during init.')
return self.__extract_plausible_tags(
preds, lambda t, p: p > self.threshold[self.index[t], 2])
else:
raise TypeError('unknown rule specified')
return result
def extract_feature(self, images):
imgs = [self._convert_image(img) for img in images]
feature = self._extract(imgs, layername='encode1')
feature = feature.reshape(feature.shape[0], -1)
return feature
def extract_binary_feature(self, images):
imgs = [self._convert_image(img) for img in images]
feature = self._extract(imgs, layername='encode1neuron')
feature = feature.reshape(feature.shape[0], -1)
binary_feature = np.zeros_like(feature, dtype=np.uint8)
binary_feature[feature > 0.5] = 1
return np.packbits(binary_feature, axis=1)
| StarcoderdataPython |
6504423 | import os
import cv2
import keras.backend as K
import keras.layers as layers
import numpy as np
from keras import regularizers
from keras.applications import resnet50
from keras.applications.resnet50 import WEIGHTS_PATH_NO_TOP
from keras.initializers import TruncatedNormal
from keras.layers import Input, BatchNormalization, Activation, AveragePooling2D, MaxPooling2D
from keras.layers.convolutional import Conv2D
from keras.layers.core import Flatten, Dense
from keras.layers.wrappers import TimeDistributed
from keras.models import Model, load_model
from keras.utils.data_utils import get_file
from custom_layers import RoiResizeConv, Scale
from loss_functions import cls_loss_det, bbreg_loss_det, cls_loss_rpn, bbreg_loss_rpn
from shared_constants import DEFAULT_ANCHORS_PER_LOC
POOLING_REGIONS = 7
FINAL_CONV_FILTERS = 1024
STRIDE = 16
WEIGHT_REGULARIZER = regularizers.l2(1e-4)
BIAS_REGULARIZER = regularizers.l2(1e-4)
# not sure if activity regularizer is needed anywhere
ACTIVITY_REGULARIZER = regularizers.l2(1e-4)
def rpn_from_h5(h5_path, anchors_per_loc=DEFAULT_ANCHORS_PER_LOC):
"""
Loads a saved rpn model from an h5 file.
:param h5_path: string, filesystem path of the saved Keras model for the rpn.
:param anchors_per_loc: positive integer, the number of used in the rpn saved in the file.
:return: Keras model.
"""
model_rpn = load_model(h5_path,
custom_objects={'cls_loss_rpn': cls_loss_rpn(anchors_per_loc=anchors_per_loc),
'bbreg_loss_rpn': bbreg_loss_rpn(anchors_per_loc=anchors_per_loc),
'Scale': Scale})
return model_rpn
def det_from_h5(h5_path, num_classes):
"""
Loads a saved detector model from an h5 file.
:param h5_path: string, filesystem path of the saved Keras model for the detector module.
:param num_classes: positive integer, the number of object classes (including background) used in the file's model.
:return: Keras model.
"""
model_det = load_model(h5_path,
custom_objects={'RoiResizeConv': RoiResizeConv,
'Scale': Scale,
'cls_loss_det': cls_loss_det,
'bbreg_loss_det': bbreg_loss_det,
'class_loss_internal': bbreg_loss_det(num_classes)})
return model_det
def preprocess(data):
"""
Convert raw bgr image to the format needed for pre-trained Imagenet weights to apply.
:param data: numpy array containing bgr values of an image.
:return: numpy array with preprocessed values.
"""
# expect image to be passed in as BGR
rgb_data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
batched_rgb_data = np.expand_dims(rgb_data, axis = 0).astype('float64')
new_data = resnet50.preprocess_input(batched_rgb_data)[0]
return new_data
def get_conv_rows_cols(height, width):
"""
Calculates the dimensions of the last conv4 layer for a given image size.
:param height: positive integer, the image height in pixels.
:param width: positive integer, the image width in pixels.
:return: height and width of the last conv4 layer as a list of integers.
"""
dims = [height, width]
for i in range(len(dims)):
# (3, 3) zeropad
dims[i] += 6
for filter_size in [7, 3, 1, 1]:
# all strides use valid padding, formula is (W - F + 2P) / S + 1
dims[i] = (dims[i] - filter_size) // 2 + 1
return dims
def pop_layer(model):
if not model.outputs:
raise Exception('Sequential model cannot be popped: model is empty.')
model.layers.pop()
if not model.layers:
model.outputs = []
model.inbound_nodes = []
model.outbound_nodes = []
else:
model.layers[-1].outbound_nodes = []
# hack to set model.outputs properly
model.outputs = [model.layers[-1].output]
# another hack to set model.output properly
model.inbound_nodes[0].output_tensors[-1] = model.outputs[-1]
model.built = False
def identity_block(input_tensor, kernel_size, filters, stage, block, trainable=True, use_conv_bias=True,
weight_regularizer=None, bias_regularizer=None, bn_training=False, separate_scale=False):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Additional arguments
trainable: boolean for whether to make this block's layers trainable.
use_conv_bias: boolean for whether or not convolutional layers should have a bias.
weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
regularization.
bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
regularization.
bn_training: boolean for whether or not BatchNormalization layers should be trained. Should always be false as
the model doesn't train correctly with batch normalization.
separate_scale: boolean for whether or not the BatchNormalization layers should be followed by a separate Scale
layer.
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
scale_name_base = 'scale' + str(stage) + block + '_branch'
eps = 1e-5
x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a', trainable=trainable, use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(input_tensor)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a',
trainable=bn_training)(x, training=bn_training)
if separate_scale:
x = Scale(axis=bn_axis, name=scale_name_base + '2a', trainable=bn_training)(x)
x = Activation('relu')(x)
x = Conv2D(filters2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', trainable=trainable,
use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b',
trainable=bn_training)(x, training=bn_training)
if separate_scale:
x = Scale(axis=bn_axis, name=scale_name_base + '2b', trainable=bn_training)(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c', trainable=trainable, use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c',
trainable=bn_training)(x, training=bn_training)
if separate_scale:
x = Scale(axis=bn_axis, name=scale_name_base + '2c', trainable=bn_training)(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), trainable=True, use_conv_bias=True,
weight_regularizer=None, bias_regularizer=None, bn_training=False, separate_scale=False):
"""A block that has a conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Additional arguments
trainable: boolean for whether to make this block's layers trainable.
use_conv_bias: boolean for whether or not convolutional layers should have a bias.
weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
regularization.
bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
regularization.
bn_training: boolean for whether or not BatchNormalization layers should be trained. Should always be false as
the model doesn't train correctly with batch normalization.
separate_scale: boolean for whether or not the BatchNormalization layers should be followed by a separate Scale
layer.
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
scale_name_base = 'scale' + str(stage) + block + '_branch'
eps = 1e-5
x = Conv2D(filters1, (1, 1), strides=strides, name=conv_name_base + '2a', trainable=trainable, use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(input_tensor)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a', trainable=bn_training)(x, training=bn_training)
if separate_scale:
x = Scale(axis=bn_axis, name=scale_name_base + '2a', trainable=bn_training)(x)
x = Activation('relu')(x)
x = Conv2D(filters2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', trainable=trainable,
use_bias=use_conv_bias, kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b', trainable=bn_training)(x, training=bn_training)
if separate_scale:
x = Scale(axis=bn_axis, name=scale_name_base + '2b', trainable=bn_training)(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c', trainable=trainable, use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c', trainable=bn_training)(x, training=bn_training)
if separate_scale:
x = Scale(axis=bn_axis, name=scale_name_base + '2c', trainable=bn_training)(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides, name=conv_name_base + '1', trainable=trainable, use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(input_tensor)
shortcut = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '1',
trainable=bn_training)(shortcut, training=bn_training)
if separate_scale:
shortcut = Scale(axis=bn_axis, name=scale_name_base + '1', trainable=bn_training)(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
def td_identity_block(input_tensor, kernel_size, filters, stage, block, use_conv_bias=True,
weight_regularizer=None, bias_regularizer=None, bn_training=False, separate_scale=False):
"""Time distributed version of resnet identity block
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Additional arguments
use_conv_bias: boolean for whether or not convolutional layers should have a bias.
weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
regularization.
bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
regularization.
bn_training: boolean for whether or not BatchNormalization layers should be trained. Should always be false as
the model doesn't train correctly with batch normalization.
separate_scale: boolean for whether or not the BatchNormalization layers should be followed by a separate Scale
layer.
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
bn_axis = 3
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
scale_name_base = 'scale' + str(stage) + block + '_branch'
eps = 1e-5
x = TimeDistributed(Conv2D(filters1, (1, 1), use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
),
name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),
name=bn_name_base + '2a',)(x, training=bn_training)
if separate_scale:
x = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training), name=scale_name_base + '2a')(x, training=bn_training)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(filters2, kernel_size, padding='same', use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer
),
name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),
name=bn_name_base + '2b')(x, training=bn_training)
if separate_scale:
x = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training), name=scale_name_base + '2b')(x, training=bn_training)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(filters3, (1, 1), use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer
),
name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),
name=bn_name_base + '2c')(x, training=bn_training)
if separate_scale:
x = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training), name=scale_name_base + '2c')(x, training=bn_training)
x = layers.add([x, input_tensor])
x = TimeDistributed(Activation('relu'))(x)
return x
def td_conv_block(input_tensor, kernel_size, filters, stage, block, td_input_shape, strides=(2, 2), use_conv_bias=True,
weight_regularizer=None, bias_regularizer=None, bn_training=False, separate_scale=False):
"""A time distributed block that has a conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Additional arguments
use_conv_bias: boolean for whether or not convolutional layers should have a bias.
weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
regularization.
bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
regularization.
bn_training: boolean for whether or not BatchNormalization layers should be trained. Should always be false as
the model doesn't train correctly with batch normalization.
separate_scale: boolean for whether or not the BatchNormalization layers should be followed by a separate Scale
layer.
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
bn_axis = 3
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
scale_name_base = 'scale' + str(stage) + block + '_branch'
eps = 1e-5
x = TimeDistributed(Conv2D(filters1, (1, 1), strides=strides, use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer
),
name=conv_name_base + '2a', input_shape=td_input_shape)(input_tensor)
x = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),
name=bn_name_base + '2a')(x, training=bn_training)
if separate_scale:
x = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training), name=scale_name_base + '2a')(x, training=bn_training)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(filters2, kernel_size, padding='same', use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer
),
name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),
name=bn_name_base + '2b')(x, training=bn_training)
if separate_scale:
x = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training), name=scale_name_base + '2b')(x, training=bn_training)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(filters3, (1, 1), use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer,bias_regularizer=bias_regularizer
),
name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),
name=bn_name_base + '2c')(x, training=bn_training)
if separate_scale:
x = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training), name=scale_name_base + '2c')(x, training=bn_training)
shortcut = TimeDistributed(Conv2D(filters3, (1, 1), strides=strides, use_bias=use_conv_bias,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer
),
name=conv_name_base + '1')(input_tensor)
shortcut = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),
name=bn_name_base + '1')(shortcut, training=bn_training)
if separate_scale:
shortcut = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training),
name=scale_name_base + '1')(shortcut, training=bn_training)
x = layers.add([x, shortcut])
x = TimeDistributed(Activation('relu'))(x)
return x
def resnet50_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):
"""
Creates a model of the ResNet-50 base layers used for both the RPN and detector.
:param freeze_blocks: list of block numbers to make untrainable, e.g. [1,2,3] to not train the first 3 blocks.
:param weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
regularization.
:param bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
regularization.
:return: Keras model for the base network.
"""
img_input = Input(shape=(None, None, 3))
bn_axis = 3
train1 = 1 not in freeze_blocks
x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
train2 = 2 not in freeze_blocks
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
train3 = 3 not in freeze_blocks
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=train3,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=train3,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=train3,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
train4 = 4 not in freeze_blocks
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=train4,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=train4,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=train4,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=train4,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=train4,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
base_model = Model(img_input, x, name='resnet50')
return base_model
def resnet50_rpn(base_model, weight_regularizer=None, bias_regularizer=None, include_conv=False,
anchors_per_loc=DEFAULT_ANCHORS_PER_LOC):
"""
Creates an rpn model on top of a passed in base model.
:param base_model: Keras model returned by resnet50_base, containing only the first 4 blocks.
:param weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
regularization.
:param bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
regularization.
:param include_conv: boolean for whether the conv4 output should be included in the model output.
:param anchors_per_loc: number of anchors at each convolution position.
:return: Keras model with the rpn layers on top of the base layers. Weights are initialized to Imagenet weights.
"""
net = Conv2D(512, (3, 3), padding='same', activation='relu',kernel_initializer='normal',
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
name='rpn_conv1')(base_model.output)
gaussian_initializer = TruncatedNormal(stddev=0.01)
x_class = Conv2D(anchors_per_loc, (1, 1), activation='sigmoid', kernel_initializer=gaussian_initializer,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
name='rpn_out_cls')(net)
x_regr = Conv2D(anchors_per_loc * 4, (1, 1), activation='linear', kernel_initializer=gaussian_initializer,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
name='rpn_out_bbreg')(net)
outputs = [x_class, x_regr]
if include_conv:
outputs.append(base_model.output)
rpn_model = Model(inputs = base_model.inputs, outputs = outputs)
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
rpn_model.load_weights(weights_path, by_name=True)
return rpn_model
def resnet50_classifier(num_rois, num_classes, base_model = None, weight_regularizer=None, bias_regularizer=None):
"""
Creates a Keras model of the ResNet-50 classification layers on top of a passed in base model.
:param num_rois: positive integer, number of regions of interest to train or inference on in a batch.
:param num_classes: positive integer, number of object classes including background.
:param base_model: Keras model returned by resnet50_base, containing only the first 4 blocks.
:param weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
regularization.
:param bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
regularization.
:return: Keras model with the classification layers on top of the base layers. Weights are initialized to Imagenet
weights.
"""
roi_input = Input(shape=(None, 4), name='roi_input')
pooling_input = base_model.output if base_model else Input(shape=(None, None, FINAL_CONV_FILTERS))
model_input = base_model.input if base_model else pooling_input
resize_out = RoiResizeConv(POOLING_REGIONS, num_rois)([pooling_input, roi_input])
out = td_conv_block(resize_out, 3, [512, 512, 2048], stage=5, block='a', strides=(1,1),
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
td_input_shape=(num_rois, POOLING_REGIONS, POOLING_REGIONS, 1024))
out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='b',
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='c',
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)
out = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(out)
out = TimeDistributed(Flatten(name='flatten'))(out)
gaussian_initializer_cls = TruncatedNormal(stddev=0.01)
gaussian_initializer_bbreg = TruncatedNormal(stddev=0.001)
out_class = TimeDistributed(Dense(num_classes, activation='softmax',
kernel_initializer=gaussian_initializer_cls,
kernel_regularizer=weight_regularizer,
bias_regularizer=bias_regularizer
),
name='dense_class_{}'.format(num_classes))(out)
out_reg = TimeDistributed(Dense(4 * (num_classes - 1), activation='linear',
kernel_initializer=gaussian_initializer_bbreg,
kernel_regularizer=weight_regularizer,
bias_regularizer=bias_regularizer
),
name='dense_reg_{}'.format(num_classes))(out)
cls_model = Model(inputs=[model_input, roi_input], outputs=[out_class, out_reg])
# not sure if needed - bn layers should already be frozen
for layer in cls_model.layers:
if isinstance(layer, TimeDistributed) and isinstance(layer.layer, BatchNormalization):
layer.layer.trainable = False
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
cls_model.load_weights(weights_path, by_name=True)
return cls_model
def resnet101_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):
"""
Creates a model of the ResNet-101 base layers used for both the RPN and detector.
:param freeze_blocks: list of block numbers to make untrainable, e.g. [1,2,3] to not train the first 3 blocks.
:param weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
regularization.
:param bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
regularization.
:return: Keras model for the base network.
"""
img_input = Input(shape=(None, None, 3))
bn_axis = 3
train1 = 1 not in freeze_blocks
x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1, use_bias=False,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)
x = Scale(axis=bn_axis, name='scale_conv1', trainable=False)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
train2 = 2 not in freeze_blocks
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
use_conv_bias=False, separate_scale=True)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
use_conv_bias=False, separate_scale=True)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
use_conv_bias=False, separate_scale=True)
train3 = 3 not in freeze_blocks
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
use_conv_bias=False, separate_scale=True)
for i in range(1, 4):
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b' + str(i), trainable=train3,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
use_conv_bias=False, separate_scale=True)
train4 = 4 not in freeze_blocks
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
use_conv_bias=False, separate_scale=True)
for i in range(1, 23):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b' + str(i), trainable=train4,
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
use_conv_bias=False, separate_scale=True)
base_model = Model(img_input, x, name='resnet101')
return base_model
def resnet101_rpn(base_model, weight_regularizer=None, bias_regularizer=None, include_conv=False,
anchors_per_loc=DEFAULT_ANCHORS_PER_LOC):
# like resnet50_rpn but loads a different set of weights
net = Conv2D(512, (3, 3), padding='same', activation='relu',kernel_initializer='normal',
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
name='rpn_conv1')(base_model.output)
gaussian_initializer = TruncatedNormal(stddev=0.01)
x_class = Conv2D(anchors_per_loc, (1, 1), activation='sigmoid', kernel_initializer=gaussian_initializer,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
name='rpn_out_cls')(net)
x_regr = Conv2D(anchors_per_loc * 4, (1, 1), activation='linear', kernel_initializer=gaussian_initializer,
kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
name='rpn_out_bbreg')(net)
outputs = [x_class, x_regr]
if include_conv:
outputs.append(base_model.output)
rpn_model = Model(inputs = base_model.inputs, outputs = outputs)
this_dir = os.path.dirname(__file__)
weights_path = os.path.join(this_dir, '../models/resnet101_weights_tf.h5')
rpn_model.load_weights(weights_path, by_name=True)
return rpn_model
def resnet101_classifier(num_rois, num_classes, base_model = None, weight_regularizer=None, bias_regularizer=None):
"""
Creates a Keras model of the ResNet-101 classification layers on top of a passed in base model.
:param num_rois: positive integer, number of regions of interest to train or inference on in a batch.
:param num_classes: positive integer, number of object classes including background.
:param base_model: Keras model returned by resnet101_base, containing only the first 4 blocks.
:param weight_regularizer: keras.regularizers.Regularizer object for weight regularization on all layers, None if no
regularization.
:param bias_regularizer: keras.regularizers.Regularizer object for bias regularization on all layers, None if no
regularization.
:return: Keras model with the classification layers on top of the base layers. Weights are initialized to Imagenet
weights.
"""
roi_input = Input(shape=(None, 4), name='roi_input')
pooling_input = base_model.output if base_model else Input(shape=(None, None, FINAL_CONV_FILTERS))
model_input = base_model.input if base_model else pooling_input
resize_out = RoiResizeConv(POOLING_REGIONS, num_rois)([pooling_input, roi_input])
out = td_conv_block(resize_out, 3, [512, 512, 2048], stage=5, block='a', strides=(1,1),
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
td_input_shape=(num_rois, POOLING_REGIONS, POOLING_REGIONS, 1024),
use_conv_bias=False, separate_scale=True)
out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='b',
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
use_conv_bias=False, separate_scale=True)
out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='c',
weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,
use_conv_bias=False, separate_scale=True)
out = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(out)
out = TimeDistributed(Flatten(name='flatten'))(out)
gaussian_initializer_cls = TruncatedNormal(stddev=0.01)
gaussian_initializer_bbreg = TruncatedNormal(stddev=0.001)
out_class = TimeDistributed(Dense(num_classes, activation='softmax',
kernel_initializer=gaussian_initializer_cls,
kernel_regularizer=weight_regularizer,
bias_regularizer=bias_regularizer
),
name='dense_class_{}'.format(num_classes))(out)
out_reg = TimeDistributed(Dense(4 * (num_classes - 1), activation='linear',
kernel_initializer=gaussian_initializer_bbreg,
kernel_regularizer=weight_regularizer,
bias_regularizer=bias_regularizer
),
name='dense_reg_{}'.format(num_classes))(out)
cls_model = Model(inputs=[model_input, roi_input], outputs=[out_class, out_reg])
this_dir = os.path.dirname(__file__)
weights_path = os.path.join(this_dir, '../models/resnet101_weights_tf.h5')
cls_model.load_weights(weights_path, by_name=True)
return cls_model
| StarcoderdataPython |
9627145 | <filename>tests/unit/test_client.py
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from typing import Any
import mock
import pytest
PROJECT = "dummy-project-123"
def test__get_gcd_project_wo_value_set():
from google.cloud.datastore.client import _get_gcd_project
environ = {}
with mock.patch("os.getenv", new=environ.get):
project = _get_gcd_project()
assert project is None
def test__get_gcd_project_w_value_set():
from google.cloud.datastore.client import _get_gcd_project
from google.cloud.datastore.client import DATASTORE_DATASET
environ = {DATASTORE_DATASET: PROJECT}
with mock.patch("os.getenv", new=environ.get):
project = _get_gcd_project()
assert project == PROJECT
def _determine_default_helper(gcd=None, fallback=None, project_called=None):
from google.cloud.datastore.client import _determine_default_project
_callers = []
def gcd_mock():
_callers.append("gcd_mock")
return gcd
def fallback_mock(project=None):
_callers.append(("fallback_mock", project))
return fallback
patch = mock.patch.multiple(
"google.cloud.datastore.client",
_get_gcd_project=gcd_mock,
_base_default_project=fallback_mock,
)
with patch:
returned_project = _determine_default_project(project_called)
return returned_project, _callers
def test__determine_default_project_wo_value():
project, callers = _determine_default_helper()
assert project is None
assert callers == ["gcd_mock", ("fallback_mock", None)]
def test__determine_default_project_w_explicit():
project, callers = _determine_default_helper(project_called=PROJECT)
assert project == PROJECT
assert callers == []
def test__determine_default_project_w_gcd():
project, callers = _determine_default_helper(gcd=PROJECT)
assert project == PROJECT
assert callers == ["gcd_mock"]
def test__determine_default_project_w_fallback():
project, callers = _determine_default_helper(fallback=PROJECT)
assert project == PROJECT
assert callers == ["gcd_mock", ("fallback_mock", None)]
def _make_client(
project=PROJECT,
namespace=None,
credentials=None,
client_info=None,
client_options=None,
_http=None,
_use_grpc=None,
):
from google.cloud.datastore.client import Client
return Client(
project=project,
namespace=namespace,
credentials=credentials,
client_info=client_info,
client_options=client_options,
_http=_http,
_use_grpc=_use_grpc,
)
def test_client_ctor_w_project_no_environ():
# Some environments (e.g. AppVeyor CI) run in GCE, so
# this test would fail artificially.
patch = mock.patch(
"google.cloud.datastore.client._base_default_project", return_value=None
)
with patch:
with pytest.raises(EnvironmentError):
_make_client(project=None)
def test_client_ctor_w_implicit_inputs():
from google.cloud.datastore.client import Client
from google.cloud.datastore.client import _CLIENT_INFO
from google.cloud.datastore.client import _DATASTORE_BASE_URL
other = "other"
patch1 = mock.patch(
"google.cloud.datastore.client._determine_default_project", return_value=other,
)
creds = _make_credentials()
patch2 = mock.patch("google.auth.default", return_value=(creds, None))
with patch1 as _determine_default_project:
with patch2 as default:
client = Client()
assert client.project == other
assert client.namespace is None
assert client._credentials is creds
assert client._client_info is _CLIENT_INFO
assert client._http_internal is None
assert client._client_options is None
assert client.base_url == _DATASTORE_BASE_URL
assert client.current_batch is None
assert client.current_transaction is None
default.assert_called_once_with(scopes=Client.SCOPE,)
_determine_default_project.assert_called_once_with(None)
def test_client_ctor_w_explicit_inputs():
from google.api_core.client_options import ClientOptions
other = "other"
namespace = "namespace"
creds = _make_credentials()
client_info = mock.Mock()
client_options = ClientOptions("endpoint")
http = object()
client = _make_client(
project=other,
namespace=namespace,
credentials=creds,
client_info=client_info,
client_options=client_options,
_http=http,
)
assert client.project == other
assert client.namespace == namespace
assert client._credentials is creds
assert client._client_info is client_info
assert client._http_internal is http
assert client.current_batch is None
assert client._base_url == "endpoint"
assert list(client._batch_stack) == []
def test_client_ctor_use_grpc_default():
import google.cloud.datastore.client as MUT
project = "PROJECT"
creds = _make_credentials()
http = object()
with mock.patch.object(MUT, "_USE_GRPC", new=True):
client1 = _make_client(project=PROJECT, credentials=creds, _http=http)
assert client1._use_grpc
# Explicitly over-ride the environment.
client2 = _make_client(
project=project, credentials=creds, _http=http, _use_grpc=False
)
assert not client2._use_grpc
with mock.patch.object(MUT, "_USE_GRPC", new=False):
client3 = _make_client(project=PROJECT, credentials=creds, _http=http)
assert not client3._use_grpc
# Explicitly over-ride the environment.
client4 = _make_client(
project=project, credentials=creds, _http=http, _use_grpc=True
)
assert client4._use_grpc
def test_client_ctor_w_emulator_w_creds():
from google.cloud.datastore.client import DATASTORE_EMULATOR_HOST
host = "localhost:1234"
fake_environ = {DATASTORE_EMULATOR_HOST: host}
project = "PROJECT"
creds = _make_credentials()
http = object()
with mock.patch("os.environ", new=fake_environ):
with pytest.raises(ValueError):
_make_client(project=project, credentials=creds, _http=http)
def test_client_ctor_w_emulator_wo_creds():
from google.auth.credentials import AnonymousCredentials
from google.cloud.datastore.client import DATASTORE_EMULATOR_HOST
host = "localhost:1234"
fake_environ = {DATASTORE_EMULATOR_HOST: host}
project = "PROJECT"
http = object()
with mock.patch("os.environ", new=fake_environ):
client = _make_client(project=project, _http=http)
assert client.base_url == "http://" + host
assert isinstance(client._credentials, AnonymousCredentials)
def test_client_base_url_property():
from google.api_core.client_options import ClientOptions
from google.cloud.datastore.client import _DATASTORE_BASE_URL
alternate_url = "https://alias.example.com/"
creds = _make_credentials()
client_options = ClientOptions()
client = _make_client(credentials=creds, client_options=client_options)
assert client.base_url == _DATASTORE_BASE_URL
client.base_url = alternate_url
assert client.base_url == alternate_url
def test_client_base_url_property_w_client_options():
alternate_url = "https://alias.example.com/"
creds = _make_credentials()
client_options = {"api_endpoint": "endpoint"}
client = _make_client(credentials=creds, client_options=client_options,)
assert client.base_url == "endpoint"
client.base_url = alternate_url
assert client.base_url == alternate_url
def test_client__datastore_api_property_already_set():
client = _make_client(credentials=_make_credentials(), _use_grpc=True)
already = client._datastore_api_internal = object()
assert client._datastore_api is already
def test_client__datastore_api_property_gapic():
client_info = mock.Mock()
client = _make_client(
project="prahj-ekt",
credentials=_make_credentials(),
client_info=client_info,
_http=object(),
_use_grpc=True,
)
assert client._datastore_api_internal is None
patch = mock.patch(
"google.cloud.datastore.client.make_datastore_api",
return_value=mock.sentinel.ds_api,
)
with patch as make_api:
ds_api = client._datastore_api
assert ds_api is mock.sentinel.ds_api
assert client._datastore_api_internal is mock.sentinel.ds_api
make_api.assert_called_once_with(client)
def test__datastore_api_property_http():
client_info = mock.Mock()
client = _make_client(
project="prahj-ekt",
credentials=_make_credentials(),
client_info=client_info,
_http=object(),
_use_grpc=False,
)
assert client._datastore_api_internal is None
patch = mock.patch(
"google.cloud.datastore.client.HTTPDatastoreAPI",
return_value=mock.sentinel.ds_api,
)
with patch as make_api:
ds_api = client._datastore_api
assert ds_api is mock.sentinel.ds_api
assert client._datastore_api_internal is mock.sentinel.ds_api
make_api.assert_called_once_with(client)
def test_client__push_batch_and__pop_batch():
creds = _make_credentials()
client = _make_client(credentials=creds)
batch = client.batch()
xact = client.transaction()
client._push_batch(batch)
assert list(client._batch_stack) == [batch]
assert client.current_batch is batch
assert client.current_transaction is None
client._push_batch(xact)
assert client.current_batch is xact
assert client.current_transaction is xact
# list(_LocalStack) returns in reverse order.
assert list(client._batch_stack) == [xact, batch]
assert client._pop_batch() is xact
assert list(client._batch_stack) == [batch]
assert client.current_batch is batch
assert client.current_transaction is None
assert client._pop_batch() is batch
assert list(client._batch_stack) == []
def test_client_get_miss():
creds = _make_credentials()
client = _make_client(credentials=creds)
get_multi = client.get_multi = mock.Mock(return_value=[])
key = object()
assert client.get(key) is None
get_multi.assert_called_once_with(
keys=[key],
missing=None,
deferred=None,
transaction=None,
eventual=False,
retry=None,
timeout=None,
)
def test_client_get_hit():
txn_id = "123"
_entity = object()
creds = _make_credentials()
client = _make_client(credentials=creds)
get_multi = client.get_multi = mock.Mock(return_value=[_entity])
key, missing, deferred = object(), [], []
assert client.get(key, missing, deferred, txn_id) is _entity
get_multi.assert_called_once_with(
keys=[key],
missing=missing,
deferred=deferred,
transaction=txn_id,
eventual=False,
retry=None,
timeout=None,
)
def test_client_get_multi_no_keys():
creds = _make_credentials()
client = _make_client(credentials=creds)
ds_api = _make_datastore_api()
client._datastore_api_internal = ds_api
results = client.get_multi([])
assert results == []
ds_api.lookup.assert_not_called()
def test_client_get_multi_miss():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = _make_client(credentials=creds)
ds_api = _make_datastore_api()
client._datastore_api_internal = ds_api
key = Key("Kind", 1234, project=PROJECT)
results = client.get_multi([key])
assert results == []
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
request={
"project_id": PROJECT,
"keys": [key.to_protobuf()],
"read_options": read_options,
}
)
def test_client_get_multi_miss_w_missing():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore_v1.types import entity as entity_pb2
from google.cloud.datastore.key import Key
KIND = "Kind"
ID = 1234
# Make a missing entity pb to be returned from mock backend.
missed = entity_pb2.Entity()
missed.key.partition_id.project_id = PROJECT
path_element = missed._pb.key.path.add()
path_element.kind = KIND
path_element.id = ID
creds = _make_credentials()
client = _make_client(credentials=creds)
# Set missing entity on mock connection.
lookup_response = _make_lookup_response(missing=[missed._pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(KIND, ID, project=PROJECT)
missing = []
entities = client.get_multi([key], missing=missing)
assert entities == []
key_pb = key.to_protobuf()
assert [missed.key.to_protobuf() for missed in missing] == [key_pb._pb]
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
request={"project_id": PROJECT, "keys": [key_pb], "read_options": read_options}
)
def test_client_get_multi_w_missing_non_empty():
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = _make_client(credentials=creds)
key = Key("Kind", 1234, project=PROJECT)
missing = ["this", "list", "is", "not", "empty"]
with pytest.raises(ValueError):
client.get_multi([key], missing=missing)
def test_client_get_multi_w_deferred_non_empty():
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = _make_client(credentials=creds)
key = Key("Kind", 1234, project=PROJECT)
deferred = ["this", "list", "is", "not", "empty"]
with pytest.raises(ValueError):
client.get_multi([key], deferred=deferred)
def test_client_get_multi_miss_w_deferred():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore.key import Key
key = Key("Kind", 1234, project=PROJECT)
key_pb = key.to_protobuf()
# Set deferred entity on mock connection.
creds = _make_credentials()
client = _make_client(credentials=creds)
lookup_response = _make_lookup_response(deferred=[key_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
deferred = []
entities = client.get_multi([key], deferred=deferred)
assert entities == []
assert [def_key.to_protobuf() for def_key in deferred] == [key_pb]
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
request={"project_id": PROJECT, "keys": [key_pb], "read_options": read_options}
)
def test_client_get_multi_w_deferred_from_backend_but_not_passed():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore_v1.types import entity as entity_pb2
from google.cloud.datastore.entity import Entity
from google.cloud.datastore.key import Key
key1 = Key("Kind", project=PROJECT)
key1_pb = key1.to_protobuf()
key2 = Key("Kind", 2345, project=PROJECT)
key2_pb = key2.to_protobuf()
entity1_pb = entity_pb2.Entity()
entity1_pb._pb.key.CopyFrom(key1_pb._pb)
entity2_pb = entity_pb2.Entity()
entity2_pb._pb.key.CopyFrom(key2_pb._pb)
creds = _make_credentials()
client = _make_client(credentials=creds)
# Mock up two separate requests. Using an iterable as side_effect
# allows multiple return values.
lookup_response1 = _make_lookup_response(results=[entity1_pb], deferred=[key2_pb])
lookup_response2 = _make_lookup_response(results=[entity2_pb])
ds_api = _make_datastore_api()
ds_api.lookup = mock.Mock(side_effect=[lookup_response1, lookup_response2], spec=[])
client._datastore_api_internal = ds_api
missing = []
found = client.get_multi([key1, key2], missing=missing)
assert len(found) == 2
assert len(missing) == 0
# Check the actual contents on the response.
assert isinstance(found[0], Entity)
assert found[0].key.path == key1.path
assert found[0].key.project == key1.project
assert isinstance(found[1], Entity)
assert found[1].key.path == key2.path
assert found[1].key.project == key2.project
assert ds_api.lookup.call_count == 2
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_any_call(
request={
"project_id": PROJECT,
"keys": [key2_pb],
"read_options": read_options,
},
)
ds_api.lookup.assert_any_call(
request={
"project_id": PROJECT,
"keys": [key1_pb, key2_pb],
"read_options": read_options,
},
)
def test_client_get_multi_hit_w_retry_w_timeout():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore.key import Key
kind = "Kind"
id_ = 1234
path = [{"kind": kind, "id": id_}]
retry = mock.Mock()
timeout = 100000
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(PROJECT, kind, id_, "foo", "Foo")
# Make a connection to return the entity pb.
creds = _make_credentials()
client = _make_client(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=PROJECT)
(result,) = client.get_multi([key], retry=retry, timeout=timeout)
new_key = result.key
# Check the returned value is as expected.
assert new_key is not key
assert new_key.project == PROJECT
assert new_key.path == path
assert list(result) == ["foo"]
assert result["foo"] == "Foo"
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
request={
"project_id": PROJECT,
"keys": [key.to_protobuf()],
"read_options": read_options,
},
retry=retry,
timeout=timeout,
)
def test_client_get_multi_hit_w_transaction():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore.key import Key
txn_id = b"123"
kind = "Kind"
id_ = 1234
path = [{"kind": kind, "id": id_}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(PROJECT, kind, id_, "foo", "Foo")
# Make a connection to return the entity pb.
creds = _make_credentials()
client = _make_client(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=PROJECT)
txn = client.transaction()
txn._id = txn_id
(result,) = client.get_multi([key], transaction=txn)
new_key = result.key
# Check the returned value is as expected.
assert new_key is not key
assert new_key.project == PROJECT
assert new_key.path == path
assert list(result) == ["foo"]
assert result["foo"] == "Foo"
read_options = datastore_pb2.ReadOptions(transaction=txn_id)
ds_api.lookup.assert_called_once_with(
request={
"project_id": PROJECT,
"keys": [key.to_protobuf()],
"read_options": read_options,
}
)
def test_client_get_multi_hit_multiple_keys_same_project():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore.key import Key
kind = "Kind"
id1 = 1234
id2 = 2345
# Make a found entity pb to be returned from mock backend.
entity_pb1 = _make_entity_pb(PROJECT, kind, id1)
entity_pb2 = _make_entity_pb(PROJECT, kind, id2)
# Make a connection to return the entity pbs.
creds = _make_credentials()
client = _make_client(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb1, entity_pb2])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key1 = Key(kind, id1, project=PROJECT)
key2 = Key(kind, id2, project=PROJECT)
retrieved1, retrieved2 = client.get_multi([key1, key2])
# Check values match.
assert retrieved1.key.path == key1.path
assert dict(retrieved1) == {}
assert retrieved2.key.path == key2.path
assert dict(retrieved2) == {}
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
request={
"project_id": PROJECT,
"keys": [key1.to_protobuf(), key2.to_protobuf()],
"read_options": read_options,
}
)
def test_client_get_multi_hit_multiple_keys_different_project():
from google.cloud.datastore.key import Key
PROJECT1 = "PROJECT"
PROJECT2 = "PROJECT-ALT"
key1 = Key("KIND", 1234, project=PROJECT1)
key2 = Key("KIND", 1234, project=PROJECT2)
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(ValueError):
client.get_multi([key1, key2])
def test_client_get_multi_max_loops():
from google.cloud.datastore.key import Key
kind = "Kind"
id_ = 1234
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(PROJECT, kind, id_, "foo", "Foo")
# Make a connection to return the entity pb.
creds = _make_credentials()
client = _make_client(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=PROJECT)
deferred = []
missing = []
patch = mock.patch("google.cloud.datastore.client._MAX_LOOPS", new=-1)
with patch:
result = client.get_multi([key], missing=missing, deferred=deferred)
# Make sure we have no results, even though the connection has been
# set up as in `test_hit` to return a single result.
assert result == []
assert missing == []
assert deferred == []
ds_api.lookup.assert_not_called()
def test_client_put():
creds = _make_credentials()
client = _make_client(credentials=creds)
put_multi = client.put_multi = mock.Mock()
entity = mock.Mock()
client.put(entity)
put_multi.assert_called_once_with(entities=[entity], retry=None, timeout=None)
def test_client_put_w_retry_w_timeout():
creds = _make_credentials()
client = _make_client(credentials=creds)
put_multi = client.put_multi = mock.Mock()
entity = mock.Mock()
retry = mock.Mock()
timeout = 100000
client.put(entity, retry=retry, timeout=timeout)
put_multi.assert_called_once_with(entities=[entity], retry=retry, timeout=timeout)
def test_client_put_multi_no_entities():
creds = _make_credentials()
client = _make_client(credentials=creds)
assert client.put_multi([]) is None
def test_client_put_multi_w_single_empty_entity():
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/649
from google.cloud.datastore.entity import Entity
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(ValueError):
client.put_multi(Entity())
def test_client_put_multi_no_batch_w_partial_key_w_retry_w_timeout():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
entity = _Entity(foo=u"bar")
key = entity.key = _Key(_Key.kind, None)
retry = mock.Mock()
timeout = 100000
creds = _make_credentials()
client = _make_client(credentials=creds)
key_pb = _make_key(234)
ds_api = _make_datastore_api(key_pb)
client._datastore_api_internal = ds_api
result = client.put_multi([entity], retry=retry, timeout=timeout)
assert result is None
ds_api.commit.assert_called_once_with(
request={
"project_id": PROJECT,
"mode": datastore_pb2.CommitRequest.Mode.NON_TRANSACTIONAL,
"mutations": mock.ANY,
"transaction": None,
},
retry=retry,
timeout=timeout,
)
mutations = ds_api.commit.call_args[1]["request"]["mutations"]
mutated_entity = _mutated_pb(mutations, "insert")
assert mutated_entity.key == key.to_protobuf()
prop_list = list(mutated_entity.properties.items())
assert len(prop_list) == 1
name, value_pb = prop_list[0]
assert name == "foo"
assert value_pb.string_value == u"bar"
def test_client_put_multi_existing_batch_w_completed_key():
creds = _make_credentials()
client = _make_client(credentials=creds)
entity = _Entity(foo=u"bar")
key = entity.key = _Key()
with _NoCommitBatch(client) as CURR_BATCH:
result = client.put_multi([entity])
assert result is None
mutated_entity = _mutated_pb(CURR_BATCH.mutations, "upsert")
assert mutated_entity.key == key.to_protobuf()
prop_list = list(mutated_entity.properties.items())
assert len(prop_list) == 1
name, value_pb = prop_list[0]
assert name == "foo"
assert value_pb.string_value == u"bar"
def test_client_delete():
creds = _make_credentials()
client = _make_client(credentials=creds)
delete_multi = client.delete_multi = mock.Mock()
key = mock.Mock()
client.delete(key)
delete_multi.assert_called_once_with(keys=[key], retry=None, timeout=None)
def test_client_delete_w_retry_w_timeout():
creds = _make_credentials()
client = _make_client(credentials=creds)
delete_multi = client.delete_multi = mock.Mock()
key = mock.Mock()
retry = mock.Mock()
timeout = 100000
client.delete(key, retry=retry, timeout=timeout)
delete_multi.assert_called_once_with(keys=[key], retry=retry, timeout=timeout)
def test_client_delete_multi_no_keys():
creds = _make_credentials()
client = _make_client(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
result = client.delete_multi([])
assert result is None
client._datastore_api_internal.commit.assert_not_called()
def test_client_delete_multi_no_batch_w_retry_w_timeout():
from google.cloud.datastore_v1.types import datastore as datastore_pb2
key = _Key()
retry = mock.Mock()
timeout = 100000
creds = _make_credentials()
client = _make_client(credentials=creds)
ds_api = _make_datastore_api()
client._datastore_api_internal = ds_api
result = client.delete_multi([key], retry=retry, timeout=timeout)
assert result is None
ds_api.commit.assert_called_once_with(
request={
"project_id": PROJECT,
"mode": datastore_pb2.CommitRequest.Mode.NON_TRANSACTIONAL,
"mutations": mock.ANY,
"transaction": None,
},
retry=retry,
timeout=timeout,
)
mutations = ds_api.commit.call_args[1]["request"]["mutations"]
mutated_key = _mutated_pb(mutations, "delete")
assert mutated_key == key.to_protobuf()
def test_client_delete_multi_w_existing_batch():
creds = _make_credentials()
client = _make_client(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
key = _Key()
with _NoCommitBatch(client) as CURR_BATCH:
result = client.delete_multi([key])
assert result is None
mutated_key = _mutated_pb(CURR_BATCH.mutations, "delete")
assert mutated_key == key._key
client._datastore_api_internal.commit.assert_not_called()
def test_client_delete_multi_w_existing_transaction():
creds = _make_credentials()
client = _make_client(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
key = _Key()
with _NoCommitTransaction(client) as CURR_XACT:
result = client.delete_multi([key])
assert result is None
mutated_key = _mutated_pb(CURR_XACT.mutations, "delete")
assert mutated_key == key._key
client._datastore_api_internal.commit.assert_not_called()
def test_client_delete_multi_w_existing_transaction_entity():
from google.cloud.datastore.entity import Entity
creds = _make_credentials()
client = _make_client(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
key = _Key()
entity = Entity(key=key)
with _NoCommitTransaction(client) as CURR_XACT:
result = client.delete_multi([entity])
assert result is None
mutated_key = _mutated_pb(CURR_XACT.mutations, "delete")
assert mutated_key == key._key
client._datastore_api_internal.commit.assert_not_called()
def test_client_allocate_ids_w_completed_key():
creds = _make_credentials()
client = _make_client(credentials=creds)
complete_key = _Key()
with pytest.raises(ValueError):
client.allocate_ids(complete_key, 2)
def test_client_allocate_ids_w_partial_key():
num_ids = 2
incomplete_key = _Key(_Key.kind, None)
creds = _make_credentials()
client = _make_client(credentials=creds, _use_grpc=False)
allocated = mock.Mock(keys=[_KeyPB(i) for i in range(num_ids)], spec=["keys"])
alloc_ids = mock.Mock(return_value=allocated, spec=[])
ds_api = mock.Mock(allocate_ids=alloc_ids, spec=["allocate_ids"])
client._datastore_api_internal = ds_api
result = client.allocate_ids(incomplete_key, num_ids)
# Check the IDs returned.
assert [key.id for key in result] == list(range(num_ids))
expected_keys = [incomplete_key.to_protobuf()] * num_ids
alloc_ids.assert_called_once_with(
request={"project_id": PROJECT, "keys": expected_keys}
)
def test_client_allocate_ids_w_partial_key_w_retry_w_timeout():
num_ids = 2
incomplete_key = _Key(_Key.kind, None)
retry = mock.Mock()
timeout = 100000
creds = _make_credentials()
client = _make_client(credentials=creds, _use_grpc=False)
allocated = mock.Mock(keys=[_KeyPB(i) for i in range(num_ids)], spec=["keys"])
alloc_ids = mock.Mock(return_value=allocated, spec=[])
ds_api = mock.Mock(allocate_ids=alloc_ids, spec=["allocate_ids"])
client._datastore_api_internal = ds_api
result = client.allocate_ids(incomplete_key, num_ids, retry=retry, timeout=timeout)
# Check the IDs returned.
assert [key.id for key in result] == list(range(num_ids))
expected_keys = [incomplete_key.to_protobuf()] * num_ids
alloc_ids.assert_called_once_with(
request={"project_id": PROJECT, "keys": expected_keys},
retry=retry,
timeout=timeout,
)
def test_client_reserve_ids_sequential_w_completed_key():
num_ids = 2
creds = _make_credentials()
client = _make_client(credentials=creds, _use_grpc=False)
complete_key = _Key()
reserve_ids = mock.Mock()
ds_api = mock.Mock(reserve_ids=reserve_ids, spec=["reserve_ids"])
client._datastore_api_internal = ds_api
assert not complete_key.is_partial
client.reserve_ids_sequential(complete_key, num_ids)
reserved_keys = (
_Key(_Key.kind, id) for id in range(complete_key.id, complete_key.id + num_ids)
)
expected_keys = [key.to_protobuf() for key in reserved_keys]
reserve_ids.assert_called_once_with(
request={"project_id": PROJECT, "keys": expected_keys}
)
def test_client_reserve_ids_sequential_w_completed_key_w_retry_w_timeout():
num_ids = 2
retry = mock.Mock()
timeout = 100000
creds = _make_credentials()
client = _make_client(credentials=creds, _use_grpc=False)
complete_key = _Key()
assert not complete_key.is_partial
reserve_ids = mock.Mock()
ds_api = mock.Mock(reserve_ids=reserve_ids, spec=["reserve_ids"])
client._datastore_api_internal = ds_api
client.reserve_ids_sequential(complete_key, num_ids, retry=retry, timeout=timeout)
reserved_keys = (
_Key(_Key.kind, id) for id in range(complete_key.id, complete_key.id + num_ids)
)
expected_keys = [key.to_protobuf() for key in reserved_keys]
reserve_ids.assert_called_once_with(
request={"project_id": PROJECT, "keys": expected_keys},
retry=retry,
timeout=timeout,
)
def test_client_reserve_ids_sequential_w_completed_key_w_ancestor():
num_ids = 2
creds = _make_credentials()
client = _make_client(credentials=creds, _use_grpc=False)
complete_key = _Key("PARENT", "SINGLETON", _Key.kind, 1234)
reserve_ids = mock.Mock()
ds_api = mock.Mock(reserve_ids=reserve_ids, spec=["reserve_ids"])
client._datastore_api_internal = ds_api
assert not complete_key.is_partial
client.reserve_ids_sequential(complete_key, num_ids)
reserved_keys = (
_Key("PARENT", "SINGLETON", _Key.kind, id)
for id in range(complete_key.id, complete_key.id + num_ids)
)
expected_keys = [key.to_protobuf() for key in reserved_keys]
reserve_ids.assert_called_once_with(
request={"project_id": PROJECT, "keys": expected_keys}
)
def test_client_reserve_ids_sequential_w_partial_key():
num_ids = 2
incomplete_key = _Key(_Key.kind, None)
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(ValueError):
client.reserve_ids_sequential(incomplete_key, num_ids)
def test_client_reserve_ids_sequential_w_wrong_num_ids():
num_ids = "2"
complete_key = _Key()
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(ValueError):
client.reserve_ids_sequential(complete_key, num_ids)
def test_client_reserve_ids_sequential_w_non_numeric_key_name():
num_ids = 2
complete_key = _Key(_Key.kind, "batman")
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(ValueError):
client.reserve_ids_sequential(complete_key, num_ids)
def _assert_reserve_ids_warning(warned):
assert len(warned) == 1
assert "Client.reserve_ids is deprecated." in str(warned[0].message)
def test_client_reserve_ids_w_partial_key():
import warnings
num_ids = 2
incomplete_key = _Key(_Key.kind, None)
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(ValueError):
with warnings.catch_warnings(record=True) as warned:
client.reserve_ids(incomplete_key, num_ids)
_assert_reserve_ids_warning(warned)
def test_client_reserve_ids_w_wrong_num_ids():
import warnings
num_ids = "2"
complete_key = _Key()
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(ValueError):
with warnings.catch_warnings(record=True) as warned:
client.reserve_ids(complete_key, num_ids)
_assert_reserve_ids_warning(warned)
def test_client_reserve_ids_w_non_numeric_key_name():
import warnings
num_ids = 2
complete_key = _Key(_Key.kind, "batman")
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(ValueError):
with warnings.catch_warnings(record=True) as warned:
client.reserve_ids(complete_key, num_ids)
_assert_reserve_ids_warning(warned)
def test_client_reserve_ids_w_completed_key():
import warnings
num_ids = 2
creds = _make_credentials()
client = _make_client(credentials=creds, _use_grpc=False)
complete_key = _Key()
reserve_ids = mock.Mock()
ds_api = mock.Mock(reserve_ids=reserve_ids, spec=["reserve_ids"])
client._datastore_api_internal = ds_api
assert not complete_key.is_partial
with warnings.catch_warnings(record=True) as warned:
client.reserve_ids(complete_key, num_ids)
reserved_keys = (
_Key(_Key.kind, id) for id in range(complete_key.id, complete_key.id + num_ids)
)
expected_keys = [key.to_protobuf() for key in reserved_keys]
reserve_ids.assert_called_once_with(
request={"project_id": PROJECT, "keys": expected_keys}
)
_assert_reserve_ids_warning(warned)
def test_client_reserve_ids_w_completed_key_w_retry_w_timeout():
import warnings
num_ids = 2
retry = mock.Mock()
timeout = 100000
creds = _make_credentials()
client = _make_client(credentials=creds, _use_grpc=False)
complete_key = _Key()
assert not complete_key.is_partial
reserve_ids = mock.Mock()
ds_api = mock.Mock(reserve_ids=reserve_ids, spec=["reserve_ids"])
client._datastore_api_internal = ds_api
with warnings.catch_warnings(record=True) as warned:
client.reserve_ids(complete_key, num_ids, retry=retry, timeout=timeout)
reserved_keys = (
_Key(_Key.kind, id) for id in range(complete_key.id, complete_key.id + num_ids)
)
expected_keys = [key.to_protobuf() for key in reserved_keys]
reserve_ids.assert_called_once_with(
request={"project_id": PROJECT, "keys": expected_keys},
retry=retry,
timeout=timeout,
)
_assert_reserve_ids_warning(warned)
def test_client_reserve_ids_w_completed_key_w_ancestor():
import warnings
num_ids = 2
creds = _make_credentials()
client = _make_client(credentials=creds, _use_grpc=False)
complete_key = _Key("PARENT", "SINGLETON", _Key.kind, 1234)
reserve_ids = mock.Mock()
ds_api = mock.Mock(reserve_ids=reserve_ids, spec=["reserve_ids"])
client._datastore_api_internal = ds_api
assert not complete_key.is_partial
with warnings.catch_warnings(record=True) as warned:
client.reserve_ids(complete_key, num_ids)
reserved_keys = (
_Key("PARENT", "SINGLETON", _Key.kind, id)
for id in range(complete_key.id, complete_key.id + num_ids)
)
expected_keys = [key.to_protobuf() for key in reserved_keys]
reserve_ids.assert_called_once_with(
request={"project_id": PROJECT, "keys": expected_keys}
)
_assert_reserve_ids_warning(warned)
def test_client_key_w_project():
KIND = "KIND"
ID = 1234
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(TypeError):
client.key(KIND, ID, project=PROJECT)
def test_client_key_wo_project():
kind = "KIND"
id_ = 1234
creds = _make_credentials()
client = _make_client(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Key", spec=["__call__"])
with patch as mock_klass:
key = client.key(kind, id_)
assert key is mock_klass.return_value
mock_klass.assert_called_once_with(kind, id_, project=PROJECT, namespace=None)
def test_client_key_w_namespace():
kind = "KIND"
id_ = 1234
namespace = object()
creds = _make_credentials()
client = _make_client(namespace=namespace, credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Key", spec=["__call__"])
with patch as mock_klass:
key = client.key(kind, id_)
assert key is mock_klass.return_value
mock_klass.assert_called_once_with(
kind, id_, project=PROJECT, namespace=namespace
)
def test_client_key_w_namespace_collision():
kind = "KIND"
id_ = 1234
namespace1 = object()
namespace2 = object()
creds = _make_credentials()
client = _make_client(namespace=namespace1, credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Key", spec=["__call__"])
with patch as mock_klass:
key = client.key(kind, id_, namespace=namespace2)
assert key is mock_klass.return_value
mock_klass.assert_called_once_with(
kind, id_, project=PROJECT, namespace=namespace2
)
def test_client_entity_w_defaults():
creds = _make_credentials()
client = _make_client(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Entity", spec=["__call__"])
with patch as mock_klass:
entity = client.entity()
assert entity is mock_klass.return_value
mock_klass.assert_called_once_with(key=None, exclude_from_indexes=())
def test_client_entity_w_explicit():
key = mock.Mock(spec=[])
exclude_from_indexes = ["foo", "bar"]
creds = _make_credentials()
client = _make_client(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Entity", spec=["__call__"])
with patch as mock_klass:
entity = client.entity(key, exclude_from_indexes)
assert entity is mock_klass.return_value
mock_klass.assert_called_once_with(
key=key, exclude_from_indexes=exclude_from_indexes
)
def test_client_batch():
creds = _make_credentials()
client = _make_client(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Batch", spec=["__call__"])
with patch as mock_klass:
batch = client.batch()
assert batch is mock_klass.return_value
mock_klass.assert_called_once_with(client)
def test_client_transaction_w_defaults():
creds = _make_credentials()
client = _make_client(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Transaction", spec=["__call__"])
with patch as mock_klass:
xact = client.transaction()
assert xact is mock_klass.return_value
mock_klass.assert_called_once_with(client)
def test_client_transaction_w_read_only():
from google.cloud.datastore_v1.types import TransactionOptions
creds = _make_credentials()
client = _make_client(credentials=creds)
xact = client.transaction(read_only=True)
options = TransactionOptions(read_only=TransactionOptions.ReadOnly())
assert xact._options == options
assert not xact._options._pb.HasField("read_write")
assert xact._options._pb.HasField("read_only")
assert xact._options._pb.read_only == TransactionOptions.ReadOnly()._pb
def test_client_query_w_other_client():
KIND = "KIND"
creds = _make_credentials()
client = _make_client(credentials=creds)
other = _make_client(credentials=_make_credentials())
with pytest.raises(TypeError):
client.query(kind=KIND, client=other)
def test_client_query_w_project():
KIND = "KIND"
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(TypeError):
client.query(kind=KIND, project=PROJECT)
def test_client_query_w_defaults():
creds = _make_credentials()
client = _make_client(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"])
with patch as mock_klass:
query = client.query()
assert query is mock_klass.return_value
mock_klass.assert_called_once_with(client, project=PROJECT, namespace=None)
def test_client_query_w_explicit():
kind = "KIND"
namespace = "NAMESPACE"
ancestor = object()
filters = [("PROPERTY", "==", "VALUE")]
projection = ["__key__"]
order = ["PROPERTY"]
distinct_on = ["DISTINCT_ON"]
creds = _make_credentials()
client = _make_client(credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"])
with patch as mock_klass:
query = client.query(
kind=kind,
namespace=namespace,
ancestor=ancestor,
filters=filters,
projection=projection,
order=order,
distinct_on=distinct_on,
)
assert query is mock_klass.return_value
mock_klass.assert_called_once_with(
client,
project=PROJECT,
kind=kind,
namespace=namespace,
ancestor=ancestor,
filters=filters,
projection=projection,
order=order,
distinct_on=distinct_on,
)
def test_client_query_w_namespace():
kind = "KIND"
namespace = object()
creds = _make_credentials()
client = _make_client(namespace=namespace, credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"])
with patch as mock_klass:
query = client.query(kind=kind)
assert query is mock_klass.return_value
mock_klass.assert_called_once_with(
client, project=PROJECT, namespace=namespace, kind=kind
)
def test_client_query_w_namespace_collision():
kind = "KIND"
namespace1 = object()
namespace2 = object()
creds = _make_credentials()
client = _make_client(namespace=namespace1, credentials=creds)
patch = mock.patch("google.cloud.datastore.client.Query", spec=["__call__"])
with patch as mock_klass:
query = client.query(kind=kind, namespace=namespace2)
assert query is mock_klass.return_value
mock_klass.assert_called_once_with(
client, project=PROJECT, namespace=namespace2, kind=kind
)
def test_client_reserve_ids_multi_w_partial_key():
incomplete_key = _Key(_Key.kind, None)
creds = _make_credentials()
client = _make_client(credentials=creds)
with pytest.raises(ValueError):
client.reserve_ids_multi([incomplete_key])
def test_client_reserve_ids_multi():
creds = _make_credentials()
client = _make_client(credentials=creds, _use_grpc=False)
key1 = _Key(_Key.kind, "one")
key2 = _Key(_Key.kind, "two")
reserve_ids = mock.Mock()
ds_api = mock.Mock(reserve_ids=reserve_ids, spec=["reserve_ids"])
client._datastore_api_internal = ds_api
client.reserve_ids_multi([key1, key2])
expected_keys = [key1.to_protobuf(), key2.to_protobuf()]
reserve_ids.assert_called_once_with(
request={"project_id": PROJECT, "keys": expected_keys}
)
class _NoCommitBatch(object):
def __init__(self, client):
from google.cloud.datastore.batch import Batch
self._client = client
self._batch = Batch(client)
self._batch.begin()
def __enter__(self):
self._client._push_batch(self._batch)
return self._batch
def __exit__(self, *args):
self._client._pop_batch()
class _NoCommitTransaction(object):
def __init__(self, client, transaction_id="TRANSACTION"):
from google.cloud.datastore.batch import Batch
from google.cloud.datastore.transaction import Transaction
self._client = client
xact = self._transaction = Transaction(client)
xact._id = transaction_id
Batch.begin(xact)
def __enter__(self):
self._client._push_batch(self._transaction)
return self._transaction
def __exit__(self, *args):
self._client._pop_batch()
class _Entity(dict):
key = None
exclude_from_indexes = ()
_meanings: Dict[str, Any] = {}
class _Key(object):
kind = "KIND"
id = 1234
name = None
_project = project = PROJECT
_namespace = None
_key = "KEY"
_path = None
_stored = None
def __init__(self, *flat_path, **kwargs):
if flat_path:
self._flat_path = flat_path
self.kind = flat_path[-2]
id_or_name = flat_path[-1]
if isinstance(id_or_name, int):
self.id = id_or_name
else:
self.id = None
self.name = id_or_name
else:
self._flat_path = [self.kind, self.id]
self.__dict__.update(kwargs)
self._kw_args = kwargs
@property
def is_partial(self):
return self.id is None and self.name is None
def to_protobuf(self):
from google.cloud.datastore_v1.types import entity as entity_pb2
key = self._key = entity_pb2.Key()
path = self._flat_path
while path:
element = key._pb.path.add()
kind, id_or_name = path[:2]
element.kind = kind
if isinstance(id_or_name, int):
element.id = id_or_name
elif id_or_name is not None:
element.name = id_or_name
path = path[2:]
return key
def completed_key(self, new_id):
assert self.is_partial
path = list(self._flat_path)
path[-1] = new_id
key_class = type(self)
new_key = key_class(*path, **self._kw_args)
return new_key
class _PathElementPB(object):
def __init__(self, id_):
self.id = id_
class _KeyPB(object):
def __init__(self, id_):
self.path = [_PathElementPB(id_)]
def _mutated_pb(mutation_pb_list, mutation_type):
assert len(mutation_pb_list) == 1
# We grab the only mutation.
mutated_pb = mutation_pb_list[0]
# Then check if it is the correct type.
assert mutated_pb._pb.WhichOneof("operation") == mutation_type
return getattr(mutated_pb, mutation_type)
def _make_key(id_):
from google.cloud.datastore_v1.types import entity as entity_pb2
key = entity_pb2.Key()
elem = key._pb.path.add()
elem.id = id_
return key
def _make_commit_response(*keys):
from google.cloud.datastore_v1.types import datastore as datastore_pb2
mutation_results = [datastore_pb2.MutationResult(key=key) for key in keys]
return datastore_pb2.CommitResponse(mutation_results=mutation_results)
def _make_lookup_response(results=(), missing=(), deferred=()):
entity_results_found = [
mock.Mock(entity=result, spec=["entity"]) for result in results
]
entity_results_missing = [
mock.Mock(entity=missing_entity, spec=["entity"]) for missing_entity in missing
]
return mock.Mock(
found=entity_results_found,
missing=entity_results_missing,
deferred=deferred,
spec=["found", "missing", "deferred"],
)
def _make_datastore_api(*keys, **kwargs):
commit_method = mock.Mock(return_value=_make_commit_response(*keys), spec=[])
lookup_response = kwargs.pop("lookup_response", _make_lookup_response())
lookup_method = mock.Mock(return_value=lookup_response, spec=[])
return mock.Mock(
commit=commit_method, lookup=lookup_method, spec=["commit", "lookup"]
)
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_entity_pb(project, kind, integer_id, name=None, str_val=None):
from google.cloud.datastore_v1.types import entity as entity_pb2
from google.cloud.datastore.helpers import _new_value_pb
entity_pb = entity_pb2.Entity()
entity_pb.key.partition_id.project_id = project
path_element = entity_pb._pb.key.path.add()
path_element.kind = kind
path_element.id = integer_id
if name is not None and str_val is not None:
value_pb = _new_value_pb(entity_pb, name)
value_pb.string_value = str_val
return entity_pb
| StarcoderdataPython |
171962 | #crop and resize the image
from PIL import Image
import os
#for image read and save
from skimage import io
from skimage.transform import resize
import time
#scriptDir = os.path.dirname(__file__)
#imagePath = os.path.join(scriptDir, '/home/tirth/Diabetic Retinopathy/TirthSampleTest/126_right.jpeg')
start_time_first = time.time()
imagePath = "/home/tirth/Diabetic Retinopathy/Data Cleaning/sample/16_left.jpeg"
image = Image.open(imagePath)
img = io.imread(imagePath)
height, width, channel = img.shape
print('Before: ', height, width, channel)
cropp = 1800
#upper left corner
left = width//2 - cropp//2
top = height//2 - cropp//2
#lower right corner
bot = width//2 + cropp//2
right = height//2 + cropp//2
imageCropped = image.crop((left, top, bot, right))
extension = '.jpeg'
imageCropped.save('1800' + extension)
print("--- %s seconds ---" % (time.time() - start_time_first))
#img.show()
start_time_second = time.time()
cropx , cropy = 1800, 1800
img = io.imread(imagePath)
y,x,channel = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
img = img[starty:starty+cropy,startx:startx+cropx]
img = resize(img, (256,256), mode='constant')
io.imsave('256.jpeg', img)
print("--- %s seconds ---" % (time.time() - start_time_second))
| StarcoderdataPython |
6462534 | #!/usr/bin/env python3
# Complete the isBalanced function below.
if __name__ == "__main__":
table = {")":"(", "]":"[", "}":"{"}
for _ in range(int(input())):
stack = []
for x in input():
if stack and table.get(x) == stack[-1]:
stack.pop()
else:
stack.append(x)
print("NO" if stack else "YES")
| StarcoderdataPython |
1952269 | <reponame>yumauri/kings_and_pigs
import pygame
from ..functions import loader
from .animation import Animation
from .animated_entity import AnimatedEntity
# dialogue sprites loader
load_image = loader("kings_and_pigs/data/sprites/13-Dialogue Boxes")
class Dialogue(AnimatedEntity):
def __init__(self, x, y, show, hide):
self.animation_show = Animation(show, 3)
self.animation_hide = Animation(hide, 2)
# make dialog to bo visible a bit longer
# by duplicating latest frame 5 more times
self.animation_show.frames.extend([self.animation_show.frames[-1]] * 5)
super().__init__(x, y, self.animation_show)
def disappeared():
self.kill()
def appeared():
self.change_animation(self.animation_hide)
self.animation_hide.on_done(disappeared, True)
self.animation_show.on_done(appeared, True)
class AwareDialogue(Dialogue):
def __init__(self, x, y):
show = load_image("!!! In (24x8).png")
hide = load_image("!!! Out (24x8).png")
super().__init__(x, y, show, hide)
class AttackDialogue(Dialogue):
def __init__(self, x, y):
show = load_image("Attack In (24x8).png")
hide = load_image("Attack Out (24x8).png")
super().__init__(x, y, show, hide)
class ConfuseDialogue(Dialogue):
def __init__(self, x, y):
show = load_image("Interrogation In (24x8).png")
hide = load_image("Interrogation Out (24x8).png")
super().__init__(x, y, show, hide)
| StarcoderdataPython |
4802744 | <reponame>jklymak/dolfyn
from setuptools import setup, find_packages
import os
import shutil
# Change this to True if you want to include the tests and test data
# in the distribution.
include_tests = False
try:
# This deals with a bug where the tests aren't excluded due to not
# rebuilding the files in this folder.
shutil.rmtree('dolfyn.egg-info')
except OSError:
pass
# Get the version info We do this to avoid importing __init__, which
# depends on other packages that may not yet be installed.
base_dir = os.path.abspath(os.path.dirname(__file__))
version = {}
with open(base_dir + "/dolfyn/_version.py") as fp:
exec(fp.read(), version)
config = dict(
name='dolfyn',
version=version['__version__'],
description='Doppler Ocean Library for pYthoN.',
author='DOLfYN Developers',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
],
url='http://github.com/lkilcher/dolfyn',
packages=find_packages(exclude=['dolfyn.tests']),
# ['dolfyn', 'dolfyn.adv', 'dolfyn.io', 'dolfyn.data',
# 'dolfyn.rotate', 'dolfyn.tools', 'dolfyn.adp', ],
package_data={},
install_requires=['numpy', 'scipy', 'six', 'xarray', 'netcdf4'],
provides=['dolfyn', ],
scripts=['scripts/motcorrect_vector.py', 'scripts/binary2mat.py'],
)
if include_tests:
config['packages'].append('dolfyn.tests')
config['package_data'].update({'dolfyn.tests': ['data/*']},)
setup(**config)
| StarcoderdataPython |
11256496 | begin_unit
comment|'# Copyright 2010-2011 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'datetime'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'utils'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ViewBuilder
name|'class'
name|'ViewBuilder'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""OpenStack API base limits view builder."""'
newline|'\n'
nl|'\n'
DECL|variable|limit_names
name|'limit_names'
op|'='
op|'{'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'limit_names'
op|'='
op|'{'
nl|'\n'
string|'"ram"'
op|':'
op|'['
string|'"maxTotalRAMSize"'
op|']'
op|','
nl|'\n'
string|'"instances"'
op|':'
op|'['
string|'"maxTotalInstances"'
op|']'
op|','
nl|'\n'
string|'"cores"'
op|':'
op|'['
string|'"maxTotalCores"'
op|']'
op|','
nl|'\n'
string|'"key_pairs"'
op|':'
op|'['
string|'"maxTotalKeypairs"'
op|']'
op|','
nl|'\n'
string|'"floating_ips"'
op|':'
op|'['
string|'"maxTotalFloatingIps"'
op|']'
op|','
nl|'\n'
string|'"metadata_items"'
op|':'
op|'['
string|'"maxServerMeta"'
op|','
string|'"maxImageMeta"'
op|']'
op|','
nl|'\n'
string|'"injected_files"'
op|':'
op|'['
string|'"maxPersonality"'
op|']'
op|','
nl|'\n'
string|'"injected_file_content_bytes"'
op|':'
op|'['
string|'"maxPersonalitySize"'
op|']'
op|','
nl|'\n'
string|'"security_groups"'
op|':'
op|'['
string|'"maxSecurityGroups"'
op|']'
op|','
nl|'\n'
string|'"security_group_rules"'
op|':'
op|'['
string|'"maxSecurityGroupRules"'
op|']'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|build
dedent|''
name|'def'
name|'build'
op|'('
name|'self'
op|','
name|'rate_limits'
op|','
name|'absolute_limits'
op|')'
op|':'
newline|'\n'
indent|' '
name|'rate_limits'
op|'='
name|'self'
op|'.'
name|'_build_rate_limits'
op|'('
name|'rate_limits'
op|')'
newline|'\n'
name|'absolute_limits'
op|'='
name|'self'
op|'.'
name|'_build_absolute_limits'
op|'('
name|'absolute_limits'
op|')'
newline|'\n'
nl|'\n'
name|'output'
op|'='
op|'{'
nl|'\n'
string|'"limits"'
op|':'
op|'{'
nl|'\n'
string|'"rate"'
op|':'
name|'rate_limits'
op|','
nl|'\n'
string|'"absolute"'
op|':'
name|'absolute_limits'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'return'
name|'output'
newline|'\n'
nl|'\n'
DECL|member|_build_absolute_limits
dedent|''
name|'def'
name|'_build_absolute_limits'
op|'('
name|'self'
op|','
name|'absolute_limits'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Builder for absolute limits\n\n absolute_limits should be given as a dict of limits.\n For example: {"ram": 512, "gigabytes": 1024}.\n\n """'
newline|'\n'
name|'limits'
op|'='
op|'{'
op|'}'
newline|'\n'
name|'for'
name|'name'
op|','
name|'value'
name|'in'
name|'six'
op|'.'
name|'iteritems'
op|'('
name|'absolute_limits'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'name'
name|'in'
name|'self'
op|'.'
name|'limit_names'
name|'and'
name|'value'
name|'is'
name|'not'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'limit_name'
name|'in'
name|'self'
op|'.'
name|'limit_names'
op|'['
name|'name'
op|']'
op|':'
newline|'\n'
indent|' '
name|'limits'
op|'['
name|'limit_name'
op|']'
op|'='
name|'value'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'return'
name|'limits'
newline|'\n'
nl|'\n'
DECL|member|_build_rate_limits
dedent|''
name|'def'
name|'_build_rate_limits'
op|'('
name|'self'
op|','
name|'rate_limits'
op|')'
op|':'
newline|'\n'
indent|' '
name|'limits'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'rate_limit'
name|'in'
name|'rate_limits'
op|':'
newline|'\n'
indent|' '
name|'_rate_limit_key'
op|'='
name|'None'
newline|'\n'
name|'_rate_limit'
op|'='
name|'self'
op|'.'
name|'_build_rate_limit'
op|'('
name|'rate_limit'
op|')'
newline|'\n'
nl|'\n'
comment|'# check for existing key'
nl|'\n'
name|'for'
name|'limit'
name|'in'
name|'limits'
op|':'
newline|'\n'
indent|' '
name|'if'
op|'('
name|'limit'
op|'['
string|'"uri"'
op|']'
op|'=='
name|'rate_limit'
op|'['
string|'"URI"'
op|']'
name|'and'
nl|'\n'
name|'limit'
op|'['
string|'"regex"'
op|']'
op|'=='
name|'rate_limit'
op|'['
string|'"regex"'
op|']'
op|')'
op|':'
newline|'\n'
indent|' '
name|'_rate_limit_key'
op|'='
name|'limit'
newline|'\n'
name|'break'
newline|'\n'
nl|'\n'
comment|"# ensure we have a key if we didn't find one"
nl|'\n'
dedent|''
dedent|''
name|'if'
name|'not'
name|'_rate_limit_key'
op|':'
newline|'\n'
indent|' '
name|'_rate_limit_key'
op|'='
op|'{'
nl|'\n'
string|'"uri"'
op|':'
name|'rate_limit'
op|'['
string|'"URI"'
op|']'
op|','
nl|'\n'
string|'"regex"'
op|':'
name|'rate_limit'
op|'['
string|'"regex"'
op|']'
op|','
nl|'\n'
string|'"limit"'
op|':'
op|'['
op|']'
op|','
nl|'\n'
op|'}'
newline|'\n'
name|'limits'
op|'.'
name|'append'
op|'('
name|'_rate_limit_key'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'_rate_limit_key'
op|'['
string|'"limit"'
op|']'
op|'.'
name|'append'
op|'('
name|'_rate_limit'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'limits'
newline|'\n'
nl|'\n'
DECL|member|_build_rate_limit
dedent|''
name|'def'
name|'_build_rate_limit'
op|'('
name|'self'
op|','
name|'rate_limit'
op|')'
op|':'
newline|'\n'
indent|' '
name|'_get_utc'
op|'='
name|'datetime'
op|'.'
name|'datetime'
op|'.'
name|'utcfromtimestamp'
newline|'\n'
name|'next_avail'
op|'='
name|'_get_utc'
op|'('
name|'rate_limit'
op|'['
string|'"resetTime"'
op|']'
op|')'
newline|'\n'
name|'return'
op|'{'
nl|'\n'
string|'"verb"'
op|':'
name|'rate_limit'
op|'['
string|'"verb"'
op|']'
op|','
nl|'\n'
string|'"value"'
op|':'
name|'rate_limit'
op|'['
string|'"value"'
op|']'
op|','
nl|'\n'
string|'"remaining"'
op|':'
name|'int'
op|'('
name|'rate_limit'
op|'['
string|'"remaining"'
op|']'
op|')'
op|','
nl|'\n'
string|'"unit"'
op|':'
name|'rate_limit'
op|'['
string|'"unit"'
op|']'
op|','
nl|'\n'
string|'"next-available"'
op|':'
name|'utils'
op|'.'
name|'isotime'
op|'('
name|'next_avail'
op|')'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ViewBuilderV21
dedent|''
dedent|''
name|'class'
name|'ViewBuilderV21'
op|'('
name|'ViewBuilder'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'ViewBuilderV21'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|')'
newline|'\n'
comment|'# NOTE In v2.0 these are added by a specific extension'
nl|'\n'
name|'self'
op|'.'
name|'limit_names'
op|'['
string|'"server_groups"'
op|']'
op|'='
op|'['
string|'"maxServerGroups"'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'limit_names'
op|'['
string|'"server_group_members"'
op|']'
op|'='
op|'['
string|'"maxServerGroupMembers"'
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| StarcoderdataPython |
1638100 | <gh_stars>1-10
# TODO : Clean var ; rem cache
import query_retreive as qr
import retreiveData as rd
import time,pickle
from flask import Flask, jsonify,request
app = Flask(__name__)
'''
ADD TEMPLATE :
'''
@app.route('/',methods=["GET"])
def query():
previous = ""
file = open("./aipc.pickle","rb")
placename = pickle.load(file)
file.close()
while(1):
ans,number = qr.response()
number = str(number)[2:]
if(ans != previous):
temp = qr.get_context(ans)
flag = -1
if temp[-1]==0:
qr.sendSMS(number,temp[0])
print("SENT :",temp[0])
print("*"*80)
response = [number,temp[0]]
flag = 0
else:
flag = temp[-1]
response = temp[:-1]
if flag == 2:
data = rd.google_directions(response[0],response[1],response[2])
qr.sendSMS(number,data)
print("SENT :",data)
print("*"*80)
elif flag == 3:
data = rd.process_detail(response[0])
qr.sendSMS(number,"ADDRESS: \n"+data)
print("SENT :",data)
print("*"*80)
elif flag:
pincode = response[0]
query = response[1]
contact = number
# pin code conv .
data = rd.retreive_area(placename[str(pincode)],query)
if len(data) == 1:
qr.sendSMS(number,data)
print("SENT :",data)
print("*"*80)
elif data != None:
if data[1]!=None:
data = data[0]+"\n"+data[1]+"\n"+data[2]
else:
data = data[0]+"\n"+data[2]
print("SENT :",data)
print("*"*80)
qr.sendSMS(number,data)
print("Message Sent : app.py")
previous = ans
time.sleep(5)
if __name__ == "__main__":
app.run(debug=True,port=8080)
| StarcoderdataPython |
6628434 | <gh_stars>1-10
temperatura = int(input('informe a temperatura em graus celcius:'))
converssão = (temperatura*9/5)+32
print('A temperatura em graus celcius é de {}C \nApós de ser convertida para fharenheit fica {}F'.format(temperatura, converssão))
| StarcoderdataPython |
11258316 | #!BPY
"""
Name: 'Newton Alchemedia Format (.xml)'
Blender: 243
Group: 'Import'
Tooltip: 'Import from a Newton Alchemedia file format (.xml).'
"""
# --------------------------------------------------------------------------
# Licence
# Created: 20/08/2010
# Copyright: Copyright (c) <2010> <Newton Game Dynamics>
# License:
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely
# --------------------------------------------------------------------------
import Blender, bpy, math, pyScene
from Blender import Mesh, Object, Material, Texture, Image, sys
from Blender.BGL import *
from Blender.Draw import *
from Blender.Window import *
from Blender.Mathutils import Vector
import os
import struct
from types import *
#global variables
g_filename = Create ("C:/Newton_200/NewtonSDK/samples/bin/test.xml")
g_textureMap = {}
# Events
EVENT_NOEVENT = 1
EVENT_SCENE = 2
EVENT_CHOOSE_FILENAME = 3
EVENT_EXIT = 100
#
# Get scene Node from scene node
#
def GetMeshNode (scene, sceneNode):
childLink = scene.GetFirstChildLink (sceneNode)
while childLink != None:
childNode = scene.GetNodeFromLink(childLink)
if scene.IsMeshNode(childNode) == True:
return childNode
childLink = scene.GetNextChildLink (sceneNode, childLink)
return None
#
# Create blender Texture object from node
#
def CreateBlenderTextureFromNode (scene, textureNode, blenderScene, path):
tex = Texture.New(scene.GetNodeName(textureNode))
texture = pyScene.pyTexture (scene, textureNode)
imageName = path + '/' + texture.GetImageName()
image = Blender.Image.Load(imageName)
tex.setImage(image)
return tex
#
# Create blender Material object from node
#
def CreateBlenderMaterialFromNode (scene, materialNode, blenderScene):
mat = Material.New(scene.GetNodeName(materialNode))
# load the textures for this material
childLink = scene.GetFirstChildLink (materialNode)
while childLink != None:
textureNode = scene.GetNodeFromLink(childLink)
if scene.IsTextureNode(textureNode) == True:
sourceTexture = pyScene.pyTexture(scene, textureNode)
texture = g_textureMap[sourceTexture.GetId()]
#mat.setTexture(0, texture)
mat.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL)
childLink = scene.GetNextChildLink (materialNode, childLink)
return mat
#
# Create blender empty object from node
#
def CreateBlenderEmptyOject (scene, node, blenderScene):
object = blenderScene.objects.new('Empty', scene.GetNodeName(node))
return object
#
# Create blender mesh object from node
#
def CreateBlenderMeshObjectFromNode (scene, meshNode, blenderScene):
# Get a alchemedia mesh for the node and buidl a blender mesh
mesh = pyScene.pyMesh (scene, meshNode)
# create a blender mesh
newMesh = bpy.data.meshes.new(scene.GetNodeName(meshNode))
# Create all verte and face data
vertexList = []
vertexCount = mesh.GetVertexCount()
for i in range(0, vertexCount):
vertex = mesh.GetVertex (i)
vertexList.append([vertex.x, vertex.y, vertex.z])
faceList = []
faceNode = mesh.GetFirstTriangle()
while faceNode != None:
face = mesh.GetTriangle (faceNode);
faceList.append([face.p0.vertex, face.p1.vertex, face.p2.vertex])
faceNode = mesh.GetNextTriangle(faceNode)
newMesh.verts.extend(vertexList) # add vertices to mesh
newMesh.faces.extend(faceList) # add faces to the mesh (also adds edges)
# create all materials from this mesh
materialIndex = 0
materialMap = {}
meshMaterials = newMesh.materials
childLink = scene.GetFirstChildLink (meshNode)
while childLink != None:
childNode = scene.GetNodeFromLink(childLink)
if scene.IsMaterialNode(childNode) == True:
# make a blender material and a alchemdia material
material = CreateBlenderMaterialFromNode (scene, childNode, blenderScene)
meshMaterials.append(material)
# add a map key for asigning faces
sourceMaterial = pyScene.pyMaterial(scene, childNode)
materialMap[sourceMaterial.GetId()] = materialIndex
materialIndex = materialIndex + 1
childLink = scene.GetNextChildLink (meshNode, childLink)
newMesh.materials = meshMaterials
# In a secund pass asign material and uv mapping to faces
faceIndex = 0
newMesh.faceUV= True
faceNode = mesh.GetFirstTriangle()
while faceNode != None:
face = mesh.GetTriangle (faceNode);
newMesh.faces[faceIndex].mat = materialMap[face.materialIndex]
uv0 = mesh.GetUV0(face.p0.uv0)
uv1 = mesh.GetUV0(face.p1.uv0)
uv2 = mesh.GetUV0(face.p2.uv0)
newMesh.faces[faceIndex].uv = [Vector(uv0.x, uv0.y), Vector(uv1.x, uv1.y), Vector(uv2.x, uv2.y)]
faceIndex = faceIndex + 1
faceNode = mesh.GetNextTriangle(faceNode)
# link mesh to blend objects
object = blenderScene.objects.new(newMesh, scene.GetNodeName(meshNode))
# calculate normal after mesh is parented
#newMesh.mode |= Blender.Mesh.Modes.AUTOSMOOTH
#newMesh.degr = 30
#for face in newMesh.faces:
# face.smooth = 1
#newMesh.calcNormals()
return object
#
# Load all scene objects
#
def LoadNodesScene(scene, rootNode, blenderScene, chidrenList):
''' recusivally load convert a scene to a blender scene'''
blenderObject = None
meshNode = GetMeshNode(scene, rootNode)
if meshNode != None:
blenderObject = CreateBlenderMeshObjectFromNode (scene, meshNode, blenderScene)
else:
blenderObject = CreateBlenderEmptyOject (scene, rootNode, blenderScene)
# add the object to the childer list
chidrenList.append(blenderObject)
# see if this node has more children and add then to the scene
myChidren = []
childLink = scene.GetFirstChildLink (rootNode)
while childLink != None:
childNode = scene.GetNodeFromLink(childLink)
if scene.IsSceneNode(childNode) == True:
LoadNodesScene(scene, childNode, blenderScene, myChidren)
childLink = scene.GetNextChildLink (rootNode, childLink)
blenderObject.makeParent(myChidren)
# set the object name
blenderObject.setName (scene.GetNodeName(rootNode))
# set position and oriention
object = pyScene.pyObject (scene, rootNode)
#posit = object.GetLocalPosition()
#eulers = object.GetLocalEulers()
#scale = object.GetLocalScale()
#euler = [eulers.x, eulers.y, eulers.z]
#blenderObject.setSize(scale.x, scale.y, scale.z)
#blenderObject.setLocation(posit.x, posit.y, posit.z)
#blenderObject.setEuler(euler)
objectMatrix = object.GetMatrix4x4()
blenderMatrix = Blender.Mathutils.Matrix([1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1])
blenderMatrix[0][0] = objectMatrix.e00
blenderMatrix[0][1] = objectMatrix.e01
blenderMatrix[0][2] = objectMatrix.e02
blenderMatrix[1][0] = objectMatrix.e10
blenderMatrix[1][1] = objectMatrix.e11
blenderMatrix[1][2] = objectMatrix.e12
blenderMatrix[2][0] = objectMatrix.e20
blenderMatrix[2][1] = objectMatrix.e21
blenderMatrix[2][2] = objectMatrix.e22
blenderMatrix[3][0] = objectMatrix.e30
blenderMatrix[3][1] = objectMatrix.e31
blenderMatrix[3][2] = objectMatrix.e32
blenderObject.setMatrix(blenderMatrix)
#
# implement main scene loader function
#
def LoadAlchemediaScene(filename):
''' Load a newton Alchemedia file '''
scene = pyScene.pyScene()
scene.Load (filename)
root = scene.GetRoot()
# get the active blender scene
blenderScene = bpy.data.scenes.active
# load all unique textures
(path, name) = os.path.split (filename)
childLink = scene.GetFirstChildLink (root)
while childLink != None:
textureNode = scene.GetNodeFromLink(childLink)
if scene.IsTextureNode(textureNode) == True:
texture = CreateBlenderTextureFromNode (scene, textureNode, blenderScene, path)
# add a map key for asigning faces
sourceTexture = pyScene.pyTexture(scene, textureNode)
g_textureMap[sourceTexture.GetId()] = texture
childLink = scene.GetNextChildLink (root, childLink)
# import all objects into a blender scene
myChidren = []
childLink = scene.GetFirstChildLink (root)
while childLink != None:
node = scene.GetNodeFromLink(childLink)
if scene.IsSceneNode(node) == True:
LoadNodesScene(scene, node, blenderScene, myChidren)
childLink = scene.GetNextChildLink (root, childLink)
# make sure everthing is updated before exiting
# I see this in some demos, but nthing seeme to makes the scene render completly
blenderScene.update(1)
#Blender.Redraw()
######################################################
# Callbacks for Window functions
######################################################
def filename_callback(input_filename):
global g_filename
g_filename.val = input_filename
######################################################
# GUI Loader
######################################################
def draw_gui():
global g_filename
global EVENT_NOEVENT, EVENT_SCENE, EVENT_CHOOSE_FILENAME, EVENT_EXIT
########## Titles
glClear(GL_COLOR_BUFFER_BIT)
glRasterPos2d(8, 100)
Text("Alchemedia Loader")
# File name chooser Button
BeginAlign()
filename = String("file to load: ", EVENT_NOEVENT, 10, 55, 410, 18, g_filename.val, 255, "Alchemdia xml file to load")
Button("Browse", EVENT_CHOOSE_FILENAME, 420, 55, 80, 18)
EndAlign()
# Load and Exit Buttons
Button("Load", EVENT_SCENE, 10, 10, 80, 18)
Button("Cancel", EVENT_EXIT, 170, 10, 80, 18)
def event(evt, val):
if (evt == QKEY and not val):
Blender.Draw.Exit()
def bevent(evt):
global g_filename
global EVENT_NOEVENT, EVENT_SCENE, EVENT_CHOOSE_FILENAME, EVENT_EXIT
# Manages GUI events
if (evt == EVENT_EXIT):
Blender.Draw.Exit()
elif (evt == EVENT_CHOOSE_FILENAME):
FileSelector(filename_callback, "Alchemedia XML File Selection")
elif (evt == EVENT_SCENE):
if not Blender.sys.exists(g_filename.val):
PupMenu('file does not exist')
return
else:
LoadAlchemediaScene(g_filename.val)
Blender.Redraw()
Blender.Draw.Exit()
return
if __name__ == '__main__':
Register(draw_gui, event, bevent)
| StarcoderdataPython |
3324819 | <filename>plasma-2040/lightAll96LEDsOnStrip.py
import plasma
from plasma import plasma2040
NUM_LEDS = 96
led_strip = plasma.WS2812(NUM_LEDS, 0, 0, plasma2040.DAT)
led_strip.start()
for i in range(NUM_LEDS):
led_strip.set_rgb(i, 127, 0, 0) | StarcoderdataPython |
12851508 | def fac(n):
if n in [0, 1]:
return 1
else:
return n * fac(n-1)
def sum_of_the_factorial_of_their_digits(n):
fac_of_the_digits = [fac_dic[int(x)] for x in str(n)]
return sum(fac_of_the_digits)
def main():
for n in range(10, 2540161):
if n == sum_of_the_factorial_of_their_digits(n):
yield n
if __name__ == "__main__":
global fac_dic
fac_dic = {n : fac(n) for n in range(10)}
answer = list(main())
print(answer)
| StarcoderdataPython |
9783554 | azure_credentials_schema = {
"$id": "http://azure-ml.com/schemas/azure_credentials.json",
"$schema": "http://json-schema.org/schema",
"title": "azure_credentials",
"description": "JSON specification for your azure credentials",
"type": "object",
"required": ["clientId", "clientSecret", "subscriptionId", "tenantId"],
"properties": {
"clientId": {
"type": "string",
"description": "The client ID of the service principal."
},
"clientSecret": {
"type": "string",
"description": "The client secret of the service principal."
},
"subscriptionId": {
"type": "string",
"description": "The subscription ID that should be used."
},
"tenantId": {
"type": "string",
"description": "The tenant ID of the service principal."
}
}
}
parameters_schema = {
"$id": "http://azure-ml.com/schemas/compute.json",
"$schema": "http://json-schema.org/schema",
"title": "aml-compute",
"description": "JSON specification for your compute details",
"type": "object",
"properties": {
"experiment_name": {
"type": "string",
"description": "The name of your experiment in AML.",
"minLength": 3,
"maxLength": 36
},
"tags": {
"type": "object",
"description": "Tags to be added to the submitted run."
},
"wait_for_completion": {
"type": "boolean",
"description": "Indicates whether the action will wait for completion of the run."
},
"runconfig_python_file": {
"type": "string",
"description": "Path to the python script in your repository in which you define your run and return an Estimator, Pipeline, AutoMLConfig or ScriptRunConfig object."
},
"runconfig_python_function_name": {
"type": "string",
"description": "The name of the function in your python script in your repository in which you define your run and return an Estimator, Pipeline, AutoMLConfig or ScriptRunConfig object. The function gets the workspace object passed as an argument."
},
"runconfig_yaml_file": {
"type": "string",
"description": "The name of your runconfig YAML file."
},
"pipeline_yaml_file": {
"type": "string",
"description": "The name of your pipeline YAML file."
},
"pipeline_publish": {
"type": "boolean",
"description": "Indicates whether the action will publish the pipeline after submitting it to Azure Machine Learning. This only works if you submitted a pipeline."
},
"pipeline_name": {
"type": "string",
"description": "The name of the published pipeline."
},
"pipeline_version": {
"type": "string",
"description": "The version of the published pipeline."
},
"pipeline_continue_on_step_failure": {
"type": "boolean",
"description": "Indicates whether the published pipeline will continue execution of other steps in the PipelineRun if a step fails."
},
"download_artifacts": {
"type": "boolean",
"description": "Indicates whether the created artifacts and logs from runs, pipelines and steps will be downloaded to your GitHub workspace."
}
}
}
| StarcoderdataPython |
1777314 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import json
import random
import main
from tests.sample_requests import sample_facebook_text_request, sample_facebook_flac_request_en, \
sample_facebook_opus_request_en, sample_facebook_opus_request_it
def test_post_facebook_text():
main.app.testing = True
client = main.app.test_client()
sample_facebook_text_request['responseId'] += str(random.getrandbits(128))
res = client.post('/', content_type='application/json', data=json.dumps(sample_facebook_text_request))
data = json.loads(res.get_data().decode("utf-8"))
assert res.status_code == 200
assert data['fulfillmentText'] == 'Request not recognised. Please say "Hi" to begin again.'
def test_post_facebook_flac_en():
main.app.testing = True
client = main.app.test_client()
sample_facebook_flac_request_en['responseId'] += str(random.getrandbits(128))
res = client.post('/', content_type='application/json', data=json.dumps(sample_facebook_flac_request_en))
data = json.loads(res.get_data().decode("utf-8"))
assert res.status_code == 200
assert data['fulfillmentText'].lower() == 'how old is the brooklyn bridge'
def test_post_facebook_opus_en():
main.app.testing = True
client = main.app.test_client()
sample_facebook_opus_request_en['responseId'] += str(random.getrandbits(128))
res = client.post('/', content_type='application/json', data=json.dumps(sample_facebook_opus_request_en))
data = json.loads(res.get_data().decode("utf-8"))
assert res.status_code == 200
assert data['fulfillmentText'].lower() == 'how old is the brooklyn bridge'
def test_post_facebook_opus_it():
main.app.testing = True
client = main.app.test_client()
sample_facebook_opus_request_it['responseId'] += str(random.getrandbits(128))
res = client.post('/', content_type='application/json', data=json.dumps(sample_facebook_opus_request_it))
data = json.loads(res.get_data().decode("utf-8"))
assert res.status_code == 200
assert data['fulfillmentText'].lower() == 'quanti anni ha il ponte della vittoria'
| StarcoderdataPython |
3593461 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 10 16:20:44 2019
@author: joscelynec
"""
import math #Needed for distance formula
import heapq #Needed for Priority Queue
"""
Class used to encapsulate a graph node for use in
the Priority Queue
"""
class graph_node:
def __init__(self,value,priority):
self.value = value #this is the node number 0,1,2,3...etc.
self.priority = priority #this will be computed via f = g + h
#Python method redefined to compare priority, needed in A*
def __lt__(self, other):
return self.priority < other.priority
#Define equality between nodes
def __eq__(self, other):
return self.value == other.value
#Return a string version of graph_node, used for testing
def __str__(self):
return str(self.value)+ " "+str(self.priority)+" "+str(self.neigh)+" "+str(self.inter)
"""
Usual Euclidean distance formula to be used
in all computations, admissible here.
"""
def dist(x1,y1,x2,y2):
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
"""
Helper method to return a shortest path in list form
"""
def get_path(predecessor, start, goal):
current = goal
path = []
while current != start:
path.append(current)
current = predecessor[current]
path.append(start)
path.reverse()#To print from start -> goal
return path
"""
A* Algorithm adapted from http://theory.stanford.edu/~amitp/GameProgramming/
"""
def shortest_path(M,start,goal):
#Obtain dictionary of (x,y) coordinates M.intersections
inter = M.intersections
#Obtain list of lists of adjacent nodes M.roads
roads = M.roads
#Create frontier list to be explored
frontier = []
#Make frontier into a Priority Queue
heapq.heapify(frontier)
#Encapsulate start as a graph node
start_node = graph_node(start, 0)
#Add the start node to the frontier priority queue
heapq.heappush(frontier,start_node)
#Dictionary to track previous node
origin = {}
#Dictionary to track current cost
curr_cost = {}
#Intialize origin and curr_cost dictionaries
origin[start] = None
curr_cost[start] = 0
#Encapsulate goal as a graph_node for comparison
goal_node = graph_node(goal, 0)
#Main body of the algorithm
while len(frontier) > 0:
#Retrieve the lowest priority node from frontier
current_node = heapq.heappop(frontier)
#If lowest priority and at goal then finish
if current_node == goal_node:
break
#Examine all possible neighbors from current
#Note: next here is a pure number, not a graph_node
#avoid the overhead of the class graph_node where possible
for next in roads[current_node.value]:
#***Get current_node (x,y) coordinates***
current_coords = inter[current_node.value]
x_curr = current_coords[0]
y_curr = current_coords[1]
#************************************
#***Get next (x,y) coordinates***
next_coords = inter[next]
x_next = next_coords[0]
y_next = next_coords[1]
#************************************
#Compute cost from current intersection to next intersection
#This is: current cost + straightline distance cost
new_cost = curr_cost[current_node.value] + dist(x_curr,y_curr,x_next,y_next)
if next not in curr_cost.keys() or new_cost < curr_cost[next]:
#Update curr_cost of next
curr_cost[next] = new_cost
#***Get goal (x,y) coordinates***
goal_coords = inter[goal]
x_goal = goal_coords[0]
y_goal = goal_coords[1]
#************************************
#Priority is new_cost + straightline distance cost next to goal
priority = new_cost + dist(x_goal,y_goal, x_next,y_next)
#Encapsulate next as graph_node
next_node = graph_node(next, priority)
#Push the next_node onto the Priority Queue
heapq.heappush(frontier,next_node)
#Update the origin of next
origin[next] = current_node.value
#Apply helper function to origin dictionary and return
return get_path(origin, start, goal)
| StarcoderdataPython |
1848611 | <gh_stars>1-10
#!/usr/bin/env python3
# chip_list.py
#
import logging
import os
from pathlib import Path
import yaml
from . import chip
log = logging.getLogger(__name__)
class ChipList:
_chip_list = {}
_global_name_dict = {}
def __init__(self):
log.debug('ChipList.__init__()')
self.clear()
def find_chip(self, chip_id):
log.debug('ChipList.find_chip(%s)', chip_id)
if not isinstance(chip_id, str):
raise ValueError('Expected string')
if '/' in chip_id: # scoped chip id
return self._chip_list.get(chip_id)
return self._global_name_dict.get(chip_id)
def clear(self):
self._chip_list = {}
self._global_name_dict = {}
def load(self, path):
log.debug('load_chip_list(%s)', path)
if os.path.isfile(path):
self._load_single_file(path)
elif os.path.isdir(path):
for file in os.listdir(path):
if file.endswith(".yaml"):
fullpath = os.path.normpath(os.path.join(path, file))
self._load_single_file(fullpath)
else:
raise IOError('Input must be a file or directory')
def _add_aliases(self, chip, family):
id = chip.unscoped_id
if family == '7400':
log.debug('Adding 7400-family aliases')
if id[:2] != '74':
log.error('Chip is missing 74 prefix: [%s], skipping aliases', id)
else:
FAMILIES = ['L', 'H', 'S', 'LS', 'AS', 'ALS', 'F', # Bipolar
'C', 'HC', 'HCT', 'AC', 'ACT', # CMOS
'ACQ', 'AHC', 'ALVC', 'ALVT', 'AUC', # CMOS overkill
'AUP', 'AVC', 'AXC', 'FC', 'FCT', 'LCX',
'LV', 'LVC', 'LVT', 'LVQ', 'LVX', 'VHC']
names = [f'{id[:2]}{fam}{id[2:]}' for fam in FAMILIES]
for name in names:
self._global_name_dict[name] = chip.create_alias(name)
log.debug('Added %d aliases: %s', len(names), names)
else:
log.warning('Unknown family: [%s] for chip [%s]', family, chip.scoped_id)
@staticmethod
def _get_row_spacing(yaml_chip):
spacing = 6
if 'type' in yaml_chip:
if yaml_chip['type'] == 'wide':
spacing = 12
else:
log.warning('Unknown type attribute: [%s]', yaml_chip['type'])
return spacing
def _load_single_file(self, filename):
log.debug('load_chip_list_file(%s)', filename)
library_name = Path(filename).stem
log.debug('library_name: %s', library_name)
chip_list = {}
skipped = 0
with open(filename, 'r', encoding='utf8') as ymlfile:
yaml_chips = None
try:
yaml_chips = yaml.safe_load(ymlfile)
except yaml.YAMLError as err:
log.error('Error parsing chip file [%s]: %s', filename, err)
return
if yaml_chips == None:
log.warning('No chip data in file [%s]', filename)
return
for id, yaml_chip in yaml_chips.items():
log.debug('processing: %s, data: %s', id, yaml_chip)
string_id = str(id)
scoped_id = f'{library_name}/{string_id}'
log.debug('Processing id=%s', scoped_id)
if string_id[0] == '_':
log.debug('Skipping id=%s', scoped_id)
skipped += 1
continue
try:
if not 'pins' in yaml_chip:
raise chip.Error('No pins attribute for chip [%s]', scoped_id)
spacing = self._get_row_spacing(yaml_chip)
new_chip = chip.Chip(string_id, len(yaml_chip['pins']),
library=library_name,
rowSpacing=spacing)
if 'name' in yaml_chip:
new_chip.name = str(yaml_chip['name'])
if 'description' in yaml_chip:
new_chip.description = str(yaml_chip['description'])
new_chip.set_pins(yaml_chip['pins'])
chip_list[scoped_id] = new_chip
# Add to raw chip list for global searches
if string_id in self._global_name_dict:
log.warning('Duplicate global chip id [%s], use scoped name [%s] for lookup', string_id, scoped_id)
self._global_name_dict[string_id] = new_chip
if 'family' in yaml_chip:
self._add_aliases(new_chip, str(yaml_chip['family']))
except chip.Error as err:
log.error('Error adding chip [%s]: %s, skipping',
scoped_id, err)
skipped += 1
log.info(f'Loaded %d chips from %s %s', len(chip_list), filename,
f'({skipped} skipped)' if skipped else '')
self._chip_list.update(chip_list)
def __len__(self):
return len(self._chip_list)
def __iter__(self):
return self._chip_list.values().__iter__()
def __getitem__(self, item):
return self.find_chip(item)
@property
def names(self):
return [name for name in self._chip_list.keys()]
@property
def global_names(self):
return [name for name in self._global_name_dict]
| StarcoderdataPython |
139116 | <filename>openmdao/devtools/docs_experiment/experimental_source/core/experimental_driver.py
"""Define a base class for all Drivers in OpenMDAO."""
from collections import OrderedDict
import warnings
import numpy as np
from openmdao.recorders.recording_manager import RecordingManager
from openmdao.recorders.recording_iteration_stack import Recording
from openmdao.utils.record_util import create_local_meta, check_path
from openmdao.utils.mpi import MPI
from openmdao.utils.options_dictionary import OptionsDictionary
class ExperimentalDriver(object):
"""
A fake driver class used for doc generation testing.
Attributes
----------
fail : bool
Reports whether the driver ran successfully.
iter_count : int
Keep track of iterations for case recording.
options : list
List of options
options : <OptionsDictionary>
Dictionary with general pyoptsparse options.
recording_options : <OptionsDictionary>
Dictionary with driver recording options.
cite : str
Listing of relevant citations that should be referenced when
publishing work that uses this class.
_problem : <Problem>
Pointer to the containing problem.
supports : <OptionsDictionary>
Provides a consistant way for drivers to declare what features they support.
_designvars : dict
Contains all design variable info.
_cons : dict
Contains all constraint info.
_objs : dict
Contains all objective info.
_responses : dict
Contains all response info.
_rec_mgr : <RecordingManager>
Object that manages all recorders added to this driver.
_vars_to_record : dict
Dict of lists of var names indicating what to record
_model_viewer_data : dict
Structure of model, used to make n2 diagram.
_remote_dvs : dict
Dict of design variables that are remote on at least one proc. Values are
(owning rank, size).
_remote_cons : dict
Dict of constraints that are remote on at least one proc. Values are
(owning rank, size).
_remote_objs : dict
Dict of objectives that are remote on at least one proc. Values are
(owning rank, size).
_remote_responses : dict
A combined dict containing entries from _remote_cons and _remote_objs.
_total_coloring : tuple of dicts
A data structure describing coloring for simultaneous derivs.
_res_jacs : dict
Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
"""
def __init__(self):
"""
Initialize the driver.
"""
self._rec_mgr = RecordingManager()
self._vars_to_record = {
'desvarnames': set(),
'responsenames': set(),
'objectivenames': set(),
'constraintnames': set(),
'sysinclnames': set(),
}
self._problem = None
self._designvars = None
self._cons = None
self._objs = None
self._responses = None
self.options = OptionsDictionary()
self.recording_options = OptionsDictionary()
###########################
self.recording_options.declare('record_desvars', types=bool, default=True,
desc='Set to True to record design variables at the \
driver level')
self.recording_options.declare('record_responses', types=bool, default=False,
desc='Set to True to record responses at the driver level')
self.recording_options.declare('record_objectives', types=bool, default=True,
desc='Set to True to record objectives at the \
driver level')
self.recording_options.declare('record_constraints', types=bool, default=True,
desc='Set to True to record constraints at the \
driver level')
self.recording_options.declare('includes', types=list, default=[],
desc='Patterns for variables to include in recording. \
Uses fnmatch wildcards')
self.recording_options.declare('excludes', types=list, default=[],
desc='Patterns for vars to exclude in recording '
'(processed post-includes). Uses fnmatch wildcards')
self.recording_options.declare('record_derivatives', types=bool, default=False,
desc='Set to True to record derivatives at the driver \
level')
###########################
# What the driver supports.
self.supports = OptionsDictionary()
self.supports.declare('inequality_constraints', types=bool, default=False)
self.supports.declare('equality_constraints', types=bool, default=False)
self.supports.declare('linear_constraints', types=bool, default=False)
self.supports.declare('two_sided_constraints', types=bool, default=False)
self.supports.declare('multiple_objectives', types=bool, default=False)
self.supports.declare('integer_design_vars', types=bool, default=False)
self.supports.declare('gradients', types=bool, default=False)
self.supports.declare('active_set', types=bool, default=False)
self.supports.declare('simultaneous_derivatives', types=bool, default=False)
self.supports.declare('distributed_design_vars', types=bool, default=False)
self.iter_count = 0
self.options = None
self._model_viewer_data = None
self.cite = ""
# TODO, support these in OpenMDAO
self.supports.declare('integer_design_vars', types=bool, default=False)
self._res_jacs = {}
self.fail = False
def add_recorder(self, recorder):
"""
Add a recorder to the driver.
Parameters
----------
recorder : CaseRecorder
A recorder instance.
"""
self._rec_mgr.append(recorder)
def cleanup(self):
"""
Clean up resources prior to exit.
"""
self._rec_mgr.close()
def _setup_driver(self, problem):
"""
Prepare the driver for execution.
This is the final thing to run during setup.
Parameters
----------
problem : <Problem>
Pointer to the containing problem.
"""
pass
def _get_voi_val(self, name, meta, remote_vois):
"""
Get the value of a variable of interest (objective, constraint, or design var).
This will retrieve the value if the VOI is remote.
Parameters
----------
name : str
Name of the variable of interest.
meta : dict
Metadata for the variable of interest.
remote_vois : dict
Dict containing (owning_rank, size) for all remote vois of a particular
type (design var, constraint, or objective).
Returns
-------
float or ndarray
The value of the named variable of interest.
"""
model = self._problem.model
comm = model.comm
vec = model._outputs._views_flat
indices = meta['indices']
if name in remote_vois:
owner, size = remote_vois[name]
if owner == comm.rank:
if indices is None:
val = vec[name].copy()
else:
val = vec[name][indices]
else:
if indices is not None:
size = len(indices)
val = np.empty(size)
comm.Bcast(val, root=owner)
else:
if indices is None:
val = vec[name].copy()
else:
val = vec[name][indices]
if self._has_scaling:
# Scale design variable values
adder = meta['adder']
if adder is not None:
val += adder
scaler = meta['scaler']
if scaler is not None:
val *= scaler
return val
def get_design_var_values(self, filter=None):
"""
Return the design variable values.
This is called to gather the initial design variable state.
Parameters
----------
filter : list
List of desvar names used by recorders.
Returns
-------
dict
Dictionary containing values of each design variable.
"""
if filter:
dvs = filter
else:
# use all the designvars
dvs = self._designvars
return {n: self._get_voi_val(n, self._designvars[n], self._remote_dvs) for n in dvs}
def set_design_var(self, name, value):
"""
Set the value of a design variable.
Parameters
----------
name : str
Global pathname of the design variable.
value : float or ndarray
Value for the design variable.
"""
if (name in self._remote_dvs and
self._problem.model._owning_rank['output'][name] != self._problem.comm.rank):
return
meta = self._designvars[name]
indices = meta['indices']
if indices is None:
indices = slice(None)
desvar = self._problem.model._outputs._views_flat[name]
desvar[indices] = value
if self._has_scaling:
# Scale design variable values
scaler = meta['scaler']
if scaler is not None:
desvar[indices] *= 1.0 / scaler
adder = meta['adder']
if adder is not None:
desvar[indices] -= adder
def get_response_values(self, filter=None):
"""
Return response values.
Parameters
----------
filter : list
List of response names used by recorders.
Returns
-------
dict
Dictionary containing values of each response.
"""
if filter:
resps = filter
else:
resps = self._responses
return {n: self._get_voi_val(n, self._responses[n], self._remote_objs) for n in resps}
def get_objective_values(self, filter=None):
"""
Return objective values.
Parameters
----------
filter : list
List of objective names used by recorders.
Returns
-------
dict
Dictionary containing values of each objective.
"""
if filter:
objs = filter
else:
objs = self._objs
return {n: self._get_voi_val(n, self._objs[n], self._remote_objs) for n in objs}
def get_constraint_values(self, ctype='all', lintype='all', filter=None):
"""
Return constraint values.
Parameters
----------
ctype : str
Default is 'all'. Optionally return just the inequality constraints
with 'ineq' or the equality constraints with 'eq'.
lintype : str
Default is 'all'. Optionally return just the linear constraints
with 'linear' or the nonlinear constraints with 'nonlinear'.
filter : list
List of constraint names used by recorders.
Returns
-------
dict
Dictionary containing values of each constraint.
"""
if filter is not None:
cons = filter
else:
cons = self._cons
con_dict = {}
for name in cons:
meta = self._cons[name]
if lintype == 'linear' and not meta['linear']:
continue
if lintype == 'nonlinear' and meta['linear']:
continue
if ctype == 'eq' and meta['equals'] is None:
continue
if ctype == 'ineq' and meta['equals'] is not None:
continue
con_dict[name] = self._get_voi_val(name, meta, self._remote_cons)
return con_dict
def run(self):
"""
Execute this driver.
The base `Driver` just runs the model. All other drivers overload
this method.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
"""
with Recording(self._get_name(), self.iter_count, self) as rec:
self._problem.model.run_solve_nonlinear()
self.iter_count += 1
return False
def _dict2array_jac(self, derivs):
osize = 0
isize = 0
do_wrt = True
islices = {}
oslices = {}
for okey, oval in derivs.items():
if do_wrt:
for ikey, val in oval.items():
istart = isize
isize += val.shape[1]
islices[ikey] = slice(istart, isize)
do_wrt = False
ostart = osize
osize += oval[ikey].shape[0]
oslices[okey] = slice(ostart, osize)
new_derivs = np.zeros((osize, isize))
relevant = self._problem.model._relevant
for okey, odict in derivs.items():
for ikey, val in odict.items():
if okey in relevant[ikey] or ikey in relevant[okey]:
new_derivs[oslices[okey], islices[ikey]] = val
return new_derivs
def _compute_totals(self, of=None, wrt=None, return_format='flat_dict', use_abs_names=True):
"""
Compute derivatives of desired quantities with respect to desired inputs.
All derivatives are returned using driver scaling.
Parameters
----------
of : list of variable name str or None
Variables whose derivatives will be computed. Default is None, which
uses the driver's objectives and constraints.
wrt : list of variable name str or None
Variables with respect to which the derivatives will be computed.
Default is None, which uses the driver's desvars.
return_format : str
Format to return the derivatives. Default is a 'flat_dict', which
returns them in a dictionary whose keys are tuples of form (of, wrt). For
the scipy optimizer, 'array' is also supported.
use_abs_names : bool
Set to True when passing in global names to skip some translation steps.
Returns
-------
derivs : object
Derivatives in form requested by 'return_format'.
"""
prob = self._problem
# Compute the derivatives in dict format...
if prob.model._owns_approx_jac:
derivs = prob._compute_totals_approx(of=of, wrt=wrt, return_format='dict',
use_abs_names=use_abs_names)
else:
derivs = prob._compute_totals(of=of, wrt=wrt, return_format='dict',
use_abs_names=use_abs_names)
# ... then convert to whatever the driver needs.
if return_format in ('dict', 'array'):
if self._has_scaling:
for okey, odict in derivs.items():
for ikey, val in odict.items():
iscaler = self._designvars[ikey]['scaler']
oscaler = self._responses[okey]['scaler']
# Scale response side
if oscaler is not None:
val[:] = (oscaler * val.T).T
# Scale design var side
if iscaler is not None:
val *= 1.0 / iscaler
else:
raise RuntimeError("Derivative scaling by the driver only supports the 'dict' and "
"'array' formats at present.")
if return_format == 'array':
derivs = self._dict2array_jac(derivs)
return derivs
def record_iteration(self):
"""
Record an iteration of the current Driver.
"""
if not self._rec_mgr._recorders:
return
metadata = create_local_meta(self._get_name())
# Get the data to record
data = {}
if self.recording_options['record_desvars']:
# collective call that gets across all ranks
desvars = self.get_design_var_values()
else:
desvars = {}
if self.recording_options['record_responses']:
# responses = self.get_response_values() # not really working yet
responses = {}
else:
responses = {}
if self.recording_options['record_objectives']:
objectives = self.get_objective_values()
else:
objectives = {}
if self.recording_options['record_constraints']:
constraints = self.get_constraint_values()
else:
constraints = {}
desvars = {name: desvars[name]
for name in self._filtered_vars_to_record['des']}
# responses not working yet
# responses = {name: responses[name] for name in self._filtered_vars_to_record['res']}
objectives = {name: objectives[name]
for name in self._filtered_vars_to_record['obj']}
constraints = {name: constraints[name]
for name in self._filtered_vars_to_record['con']}
if self.recording_options['includes']:
root = self._problem.model
outputs = root._outputs
# outputsinputs, outputs, residuals = root.get_nonlinear_vectors()
sysvars = {}
for name, value in outputs._names.items():
if name in self._filtered_vars_to_record['sys']:
sysvars[name] = value
else:
sysvars = {}
if MPI:
root = self._problem.model
desvars = self._gather_vars(root, desvars)
responses = self._gather_vars(root, responses)
objectives = self._gather_vars(root, objectives)
constraints = self._gather_vars(root, constraints)
sysvars = self._gather_vars(root, sysvars)
data['des'] = desvars
data['res'] = responses
data['obj'] = objectives
data['con'] = constraints
data['sys'] = sysvars
self._rec_mgr.record_iteration(self, data, metadata)
def _gather_vars(self, root, local_vars):
"""
Gather and return only variables listed in `local_vars` from the `root` System.
Parameters
----------
root : <System>
the root System for the Problem
local_vars : dict
local variable names and values
Returns
-------
dct : dict
variable names and values.
"""
# if trace:
# debug("gathering vars for recording in %s" % root.pathname)
all_vars = root.comm.gather(local_vars, root=0)
# if trace:
# debug("DONE gathering rec vars for %s" % root.pathname)
if root.comm.rank == 0:
dct = all_vars[-1]
for d in all_vars[:-1]:
dct.update(d)
return dct
def _get_name(self):
"""
Get name of current Driver.
Returns
-------
str
Name of current Driver.
"""
return "Driver"
| StarcoderdataPython |
3351788 | <gh_stars>0
#!/usr/bin/env python3
# Problem 14: Longest Collatz sequence
# https://projecteuler.net/problem=14
import sys
def euler014(bound):
best_start = 0
best_length = 0
for start in range(1, bound):
number = start
length = 0
while number != 1:
number = number // 2 if number % 2 == 0 else 3 * number + 1
length += 1
if length > best_length:
best_length = length
best_start = start
return best_start
def parse_input(lines):
return int(lines[0].strip())
if __name__ == "__main__":
print(euler014(parse_input(sys.stdin.readlines())))
| StarcoderdataPython |
1715407 | <gh_stars>10-100
"""CB58 encode or decode FILE, or standard input, to standard output.
"""
import argparse
import sys
from . import __version__, cb58decode, cb58encode
EPILOG = """
CB58 is a base-58 encoding with a 32-bit checksum, used on the AVA network.
It's similar to base58check.
"""
def main(argv=None):
parser = argparse.ArgumentParser(
prog='cb58ref',
description=__doc__,
epilog=EPILOG,
)
parser.add_argument(
'-d', '--decode', action='store_true',
help='decode data',
)
parser.add_argument(
'-n', action='store_false',
dest='newline',
help='do not output the trailing newline',
)
parser.add_argument(
'file', metavar='FILE',
type=argparse.FileType('rb'),
default='-',
nargs='?',
help='file to read from (default: stdin)'
)
parser.add_argument(
'--version', action='store_true',
help='print program version and exit',
)
args = parser.parse_args(argv)
if args.version:
print(parser.prog, __version__)
return 0
# Workaround for https://bugs.python.org/issue14156
# We want to read binary data, but (as of Jun 2020) argparse doesn't
# provide that when reading from stdin.
if args.file == sys.stdin:
args.file = args.file.buffer
# Read CB58, output bytes
if args.decode:
s = args.file.read().decode('ascii')
b = cb58decode(s)
sys.stdout.buffer.write(b)
if args.newline:
sys.stdout.buffer.write(b'\n')
return 0
# Read CB58, output CB58
b = args.file.read()
s = cb58encode(b)
sys.stdout.write(s)
if args.newline:
sys.stdout.write('\n')
return 0
if __name__ == '__main__':
sys.exit(main()) # pragma: no cover
| StarcoderdataPython |
8109782 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import logging
import torch
from torch import nn
from ...layers import TextEncoder
from ...layers import PWEmbeddingOutputDecoder
from ...utils.misc import get_n_params
from ...vocabulary import Vocabulary
from ...utils.topology import Topology
from ...utils.ml_metrics import Loss
from ...utils.device import DEVICE
from ...utils.misc import pbar
from ...utils.embedding import load_param_from_fasttext
from ...datasets import MultimodalDataset
from ...metrics import Metric
logger = logging.getLogger('nmtpytorch')
class EmbeddingOutput(nn.Module):
supports_beam_search = True
def set_defaults(self):
self.defaults = {
'emb_dim': 128, # Source and target embedding sizes
'emb_maxnorm': None, # Normalize embeddings l2 norm to 1
'emb_gradscale': False, # Scale embedding gradients w.r.t. batch frequency
'enc_dim': 256, # Encoder hidden size
'enc_type': 'gru', # Encoder type (gru|lstm)
'enc_lnorm': False, # Add layer-normalization to encoder output
'n_encoders': 1, # Number of stacked encoders
'enc_emb_init_type': 'random',
'enc_emb_init_args': {},
'dec_dim': 256, # Decoder hidden size
'dec_type': 'gru', # Decoder type (gru|lstm)
'dec_variant': 'cond', # (cond|simplegru|vector)
'dec_init': 'mean_ctx', # How to initialize decoder (zero/mean_ctx/feats)
'dec_init_size': None, # feature vector dimensionality for
'dec_init_activ': 'tanh', # Decoder initialization activation func
# dec_init == 'feats'
'dec_emb_init_type': 'random',
'dec_emb_init_args': {},
'dec_out_init_type': 'fasttext',
'dec_out_init_args': {},
'n_decoders': 1,
'loss_type': 'maxmargin', # Loss type for decoder
'loss_args': {},
'att_type': 'mlp', # Attention type (mlp|dot)
'att_temp': 1., # Attention temperature
'att_activ': 'tanh', # Attention non-linearity (all torch nonlins)
'att_mlp_bias': False, # Enables bias in attention mechanism
'att_bottleneck': 'ctx', # Bottleneck dimensionality (ctx|hid)
'att_transform_ctx': True, # Transform annotations before attention
'dropout_emb': 0, # Simple dropout to source embeddings
'dropout_ctx': 0, # Simple dropout to source encodings
'dropout_out': 0, # Simple dropout to decoder output
'dropout_enc': 0, # Intra-encoder dropout if n_encoders > 1
'tied_emb': False, # Share embeddings: (False|2way|3way)
'direction': None, # Network directionality, i.e. en->de
'max_len': 80, # Reject sentences where 'bucket_by' length > 80
'bucket_by': None, # A key like 'en' to define w.r.t which dataset
# the batches will be sorted
'bucket_order': None, # Curriculum: ascending/descending/None
'sampler_type': 'bucket', # bucket or approximate
'sched_sampling': 0, # Scheduled sampling ratio
'bos_type': 'emb', # 'emb': default learned emb
'bos_activ': None, #
'bos_dim': None, #
}
def __init__(self, opts):
super().__init__()
# opts -> config file sections {.model, .data, .vocabulary, .train}
self.opts = opts
# Vocabulary objects
self.vocabs = {}
# Each auxiliary loss should be stored inside this dictionary
# in order to be taken into account by the mainloop for multi-tasking
self.aux_loss = {}
# Setup options
self.opts.model = self.set_model_options(opts.model)
# Parse topology & languages
self.topology = Topology(self.opts.model['direction'])
# Load vocabularies here
for name, fname in self.opts.vocabulary.items():
self.vocabs[name] = Vocabulary(fname, name=name)
# Inherently non multi-lingual aware
slangs = self.topology.get_src_langs()
tlangs = self.topology.get_trg_langs()
if slangs:
self.sl = slangs[0]
self.src_vocab = self.vocabs[self.sl]
self.n_src_vocab = len(self.src_vocab)
if tlangs:
self.tl = tlangs[0]
self.trg_vocab = self.vocabs[self.tl]
self.n_trg_vocab = len(self.trg_vocab)
# Need to be set for early-stop evaluation
# NOTE: This should come from config or elsewhere
self.val_refs = self.opts.data['val_set'][self.tl]
# Textual context size is always equal to enc_dim * 2 since
# it is the concatenation of forward and backward hidden states
if 'enc_dim' in self.opts.model:
self.ctx_sizes = {str(self.sl): self.opts.model['enc_dim'] * 2}
# Check vocabulary sizes for 3way tying
if self.opts.model['tied_emb'] not in [False, '2way', '3way']:
raise RuntimeError(
"'{}' not recognized for tied_emb.".format(self.opts.model['tied_emb']))
if self.opts.model['tied_emb'] == '3way':
assert self.n_src_vocab == self.n_trg_vocab, \
"The vocabulary sizes do not match for 3way tied embeddings."
def __repr__(self):
s = super().__repr__() + '\n'
for vocab in self.vocabs.values():
s += "{}\n".format(vocab)
s += "{}\n".format(get_n_params(self))
return s
def set_model_options(self, model_opts):
self.set_defaults()
for opt, value in model_opts.items():
if opt in self.defaults:
# Override defaults from config
if isinstance(self.defaults[opt], dict):
# translation or test
if isinstance(value, dict):
self.defaults[opt].update(value)
# train
else:
self.defaults[opt].update(getattr(self.opts, str(value), {}))
else:
self.defaults[opt] = value
else:
logger.info('Warning: unused model option: {}'.format(opt))
return self.defaults
def reset_parameters(self):
for name, param in self.named_parameters():
# Skip 1-d biases and scalars
if param.requires_grad and param.dim() > 1:
nn.init.kaiming_normal_(param.data)
if self.opts.model['enc_emb_init_type'] == 'fasttext':
args = self.opts.model['enc_emb_init_args']
self.enc.emb.weight = load_param_from_fasttext(self.trg_vocab, args)
if self.opts.model['dec_emb_init_type'] == 'fasttext':
args = self.opts.model['dec_emb_init_args']
self.dec.emb.weight = load_param_from_fasttext(self.trg_vocab, args)
self.dec_out_init_type = self.opts.model['dec_out_init_type'].lower()
if self.dec_out_init_type == 'fasttext':
args = self.opts.model['dec_out_init_args']
weight = load_param_from_fasttext(self.trg_vocab, args)
weight.requires_grad = False
self.dec.out2prob.weight = weight
elif self.dec_out_init_type == 'tied':
self.dec.out2prob.weight = self.dec.emb.weight
def setup(self, is_train=True):
"""Sets up NN topology by creating the layers."""
########################
# Create Textual Encoder
########################
self.enc = TextEncoder(
input_size=self.opts.model['emb_dim'],
hidden_size=self.opts.model['enc_dim'],
n_vocab=self.n_src_vocab,
rnn_type=self.opts.model['enc_type'],
dropout_emb=self.opts.model['dropout_emb'],
dropout_ctx=self.opts.model['dropout_ctx'],
dropout_rnn=self.opts.model['dropout_enc'],
num_layers=self.opts.model['n_encoders'],
emb_maxnorm=self.opts.model['emb_maxnorm'],
emb_gradscale=self.opts.model['emb_gradscale'],
layer_norm=self.opts.model['enc_lnorm'])
################
# Create Decoder
################
self.opts.model['dec_emb_init_args']['vocab'] = self.trg_vocab
self.opts.model['dec_out_init_args']['vocab'] = self.trg_vocab
self.dec = PWEmbeddingOutputDecoder(
input_size=self.opts.model['emb_dim'],
hidden_size=self.opts.model['dec_dim'],
n_vocab=self.n_trg_vocab,
rnn_type=self.opts.model['dec_type'],
num_layer=self.opts.model['n_decoders'],
ctx_size_dict=self.ctx_sizes,
ctx_name=str(self.sl),
tied_emb=self.opts.model['tied_emb'],
dec_init=self.opts.model['dec_init'],
dec_init_size=self.opts.model['dec_init_size'],
dec_init_activ=self.opts.model['dec_init_activ'],
loss_type=self.opts.model['loss_type'],
loss_args=self.opts.model['loss_args'],
att_type=self.opts.model['att_type'],
att_temp=self.opts.model['att_temp'],
att_activ=self.opts.model['att_activ'],
transform_ctx=self.opts.model['att_transform_ctx'],
mlp_bias=self.opts.model['att_mlp_bias'],
att_bottleneck=self.opts.model['att_bottleneck'],
dropout_out=self.opts.model['dropout_out'],
emb_maxnorm=self.opts.model['emb_maxnorm'],
emb_gradscale=self.opts.model['emb_gradscale'],
sched_sample=self.opts.model['sched_sampling'],
bos_type=self.opts.model['bos_type'],
bos_dim=self.opts.model['bos_dim'],
bos_activ=self.opts.model['bos_activ'])
# Share encoder and decoder weights
if self.opts.model['tied_emb'] == '3way':
self.enc.emb.weight = self.dec.emb.weight
def load_data(self, split, batch_size, mode='train'):
"""Loads the requested dataset split."""
dataset = MultimodalDataset(
data=self.opts.data['{}_set'.format(split)],
mode=mode, batch_size=batch_size,
vocabs=self.vocabs, topology=self.topology,
bucket_by=self.opts.model['bucket_by'],
max_len=self.opts.model['max_len'],
bucket_order=self.opts.model['bucket_order'],
sampler_type=self.opts.model['sampler_type'])
logger.info(dataset)
return dataset
def get_bos(self, batch_size):
"""Returns a representation for <bos> embeddings for decoding."""
return torch.LongTensor(batch_size).fill_(self.trg_vocab['<bos>'])
def encode(self, batch, **kwargs):
"""Encodes all inputs and returns a dictionary.
Arguments:
batch (dict): A batch of samples with keys designating the
information sources.
Returns:
dict:
A dictionary where keys are source modalities compatible
with the data loader and the values are tuples where the
elements are encodings and masks. The mask can be ``None``
if the relevant modality does not require a mask.
"""
d = {str(self.sl): self.enc(batch[self.sl])}
if 'feats' in batch:
d['feats'] = (batch['feats'], None)
return d
def forward(self, batch, **kwargs):
"""Computes the forward-pass of the network and returns batch loss.
Arguments:
batch (dict): A batch of samples with keys designating the source
and target modalities.
Returns:
Tensor:
A scalar loss normalized w.r.t batch size and token counts.
"""
# Get loss dict
result = self.dec(self.encode(batch), batch[self.tl])
result['n_items'] = torch.nonzero(batch[self.tl][1:]).shape[0]
return result
def test_performance(self, data_loader, dump_file=None):
"""Computes test set loss over the given DataLoader instance."""
loss = Loss()
for batch in pbar(data_loader, unit='batch'):
batch.device(DEVICE)
out = self.forward(batch)
loss.update(out['loss'], out['n_items'])
return [
Metric('LOSS', loss.get(), higher_better=False),
]
def get_decoder(self, task_id=None):
"""Compatibility function for multi-tasking architectures."""
return self.dec
| StarcoderdataPython |
4875588 | from twisted.internet import reactor, protocol
from twisted.protocols import basic
from World.Lib.telcodes import RED, NEWLINE, BLUE, RESET
import dircache, md5
import World
from World import WORLD
NOSTATE = 0
LOGIN = 1
PASSWORD = 2
NEWLOGIN = 3
NEWPASSWORD = 4
GAME = 5
INTRO = open("./Text/Intro.txt", "r").read()
class MudServer(basic.LineReceiver):
def __init__(self):
self.state = LOGIN
self.username = ""
self.loggedin = False
self.level = 0
def lineReceived(self, line):
if self.state != GAME:
self.login(line)
def connectionMade(self):
self.transport.write(RED + INTRO + NEWLINE)
self.transport.write(BLUE + "Username: " + RESET)
def login(self, line):
if self.state == LOGIN:
if line in dircache.listdir('./Players'):
self.username = line
if open("./Players/" + self.username, "r").readlines()[1].split(":")[1].isspace():
self.transport.write(BLUE + "Please enter a new password" + NEWLINE)
self.state = NEWPASSWORD
else:
self.state = PASSWORD
self.transport.write(BLUE + "password: " + RESET)
else:
self.transport.write(NEWLINE + "Invalid username" + NEWLINE)
self.transport.write(BLUE + "\r\nUsername: " + RESET)
elif self.state == PASSWORD:
dig = md5.new()
dig.update(line)
password = <PASSWORD>()
if password in open('./Players/' + self.username, 'r').readlines()[1].split(":")[1]:
self.state = GAME
WORLD.login(self)
else:
self.transport.write(RED + "Invalid password" + NEWLINE)
self.transport.write(BLUE + "password: " + RESET)
elif self.state == NEWPASSWORD:
dig = md5.new()
dig.update(line)
password = <PASSWORD>()
page = open("./Players/" + self.username, "r").readlines()
tmp = page[1].split(":")
tmp[1] = password + "\n"
page[1] = tmp[0] + ":" + tmp[1]
page = page[0] + page[1] + page[2]
open("./Players/" + self.username, "w+t").write(page)
self.state = GAME
WORLD.login(self)
elif self.state == GAME:
self.transport.write("Entered the game loop")
class MudServerFactory(protocol.ServerFactory):
protocol = MudServer
if __name__ =="__main__":
port = 5777
reactor.listenTCP(port, MudServerFactory())
reactor.run()
| StarcoderdataPython |
5188759 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `openclsim` package."""
import pytest
import simpy
import shapely.geometry
import logging
import datetime
import time
import numpy as np
from click.testing import CliRunner
from openclsim import core
from openclsim import model
from openclsim import cli
logger = logging.getLogger(__name__)
# make the environment
@pytest.fixture
def env():
simulation_start = datetime.datetime(2019, 1, 1)
my_env = simpy.Environment(initial_time=time.mktime(simulation_start.timetuple()))
my_env.epoch = time.mktime(simulation_start.timetuple())
return my_env
# make the locations
@pytest.fixture
def Location():
return type(
"Location",
(
core.Identifiable, # Give it a name
core.Log, # Allow logging of all discrete events
core.Locatable, # Add coordinates to extract distance information and visualize
core.HasContainer, # Add information on the material available at the site
core.HasResource, # Add information on serving equipment
),
{},
)
# make the movers
@pytest.fixture
def TransportProcessingResource():
return type(
"TransportProcessingResource",
(
core.Identifiable,
core.Log,
core.ContainerDependentMovable,
core.Processor,
core.LoadingFunction,
core.UnloadingFunction,
core.HasResource,
),
{},
)
# Run the test
def test_temp_site_no_stop(env, Location, TransportProcessingResource):
# make the locations that have soil
location = shapely.geometry.Point(0, 0)
data_from_site = {
"env": env, # The simpy environment
"name": "Location A", # The name of the "from location"
"geometry": location, # The coordinates of the "from location"
"capacity": 1_000, # The capacity of the "from location"
"level": 1_000, # The actual volume of the "from location"
}
data_to_site = {
"env": env, # The simpy environment
"name": "Location B", # The name of the "to location"
"geometry": location, # The coordinates of the "to location"
"capacity": 1_000, # The capacity of the "to location"
"level": 0, # The actual volume of the "to location"
}
from_site = Location(**data_from_site)
to_site = Location(**data_to_site)
def compute_v_provider(v_empty, v_full):
return lambda x: x * (v_full - v_empty) + v_empty
data_tpr = {
"env": env, # The simpy environment
"name": "Transport Processing Resource", # Name
"geometry": from_site.geometry, # It starts at the "from site"
"loading_rate": 1.5, # Loading rate
"unloading_rate": 1.5, # Unloading rate
"capacity": 5_000, # Capacity
"compute_v": compute_v_provider(5, 4.5),
} # Variable speed
processor = TransportProcessingResource(**data_tpr)
model.Activity(
env=env,
name="Movement of material 01",
origin=from_site,
destination=to_site,
loader=processor,
mover=processor,
unloader=processor,
)
model.Activity(
env=env,
name="Movement of material 02",
origin=to_site,
destination=from_site,
loader=processor,
mover=processor,
unloader=processor,
start_event=env.timeout(2 * 24 * 3600),
)
start = env.now
env.run()
assert env.now - start != 2 * 24 * 3600
def test_temp_site_with_stop(env, Location, TransportProcessingResource):
# make the locations that have soil
location = shapely.geometry.Point(0, 0)
data_from_site = {
"env": env, # The simpy environment
"name": "Location A", # The name of the "from location"
"geometry": location, # The coordinates of the "from location"
"capacity": 1_000, # The capacity of the "from location"
"level": 1_000, # The actual volume of the "from location"
}
data_to_site = {
"env": env, # The simpy environment
"name": "Location B", # The name of the "to location"
"geometry": location, # The coordinates of the "to location"
"capacity": 1_000, # The capacity of the "to location"
"level": 0, # The actual volume of the "to location"
}
from_site = Location(**data_from_site)
to_site = Location(**data_to_site)
def compute_v_provider(v_empty, v_full):
return lambda x: x * (v_full - v_empty) + v_empty
data_tpr = {
"env": env, # The simpy environment
"name": "Transport Processing Resource", # Name
"geometry": from_site.geometry, # It starts at the "from site"
"loading_rate": 1.5, # Loading rate
"unloading_rate": 1.5, # Unloading rate
"capacity": 5_000, # Capacity
"compute_v": compute_v_provider(5, 4.5),
} # Variable speed
processor = TransportProcessingResource(**data_tpr)
model.Activity(
env=env,
name="Movement of material 01",
origin=from_site,
destination=to_site,
loader=processor,
mover=processor,
unloader=processor,
)
model.Activity(
env=env,
name="Movement of material 02",
origin=to_site,
destination=from_site,
loader=processor,
mover=processor,
unloader=processor,
start_event=env.timeout(2 * 24 * 3600),
stop_event=to_site.container.get_empty_event,
)
start = env.now
env.run()
assert env.now - start != 2 * 24 * 3600
| StarcoderdataPython |
5065062 | #!/usr/bin/env python3
import os
import logging
import time
import requests
from .nitro import NitroClient
from .frrouting import FrroutingClient
logger = logging.getLogger(__name__)
class SyncService():
def __init__(self, url, username, password, nexthop):
self.frr_client = FrroutingClient()
self.nitro_client = NitroClient(url, username, password)
# disable nitro client ssl verification
self.nitro_client.set_verify("false")
self.nexthop = nexthop
self.nitro_url = url
def start_sync_daemon(self):
logger.info("Initializing periodic sync every 5 seconds as daemon")
while True:
logger.info("Beginning sync")
self.start_sync()
logger.info("End of sync")
time.sleep(5)
def start_sync(self):
# Extract FRRouting configured hostroutes for ADC VIP
frrouting_vip_routes_to_process = self.__get_frrouting_hostroutes("99")
logger.info("Host routes already configured in FRRouting : " + str(frrouting_vip_routes_to_process))
logger.info("Beginning sync with Citrix ADC.")
# Do the request to the NITRO API
# Filter for VIP IP addresses to ignore SNIP and NSIP
result = self.__get_adc_virtual_ipaddresses()
#Check if result is a JSON containin a least one nsip object.
if 'nsip' in result:
orphaned_frrouting_routes = self.__sync_routes_with_active_virtual_ipaddresses(result, frrouting_vip_routes_to_process, self.nexthop)
# Make sure that no deleted VIP remains in frrouting by removing all the static routes for VIP remaining in frrouting_vip_routes_to_process
self.__remove_orphaned_hostroutes(orphaned_frrouting_routes, self.nexthop)
else:
logger.error('Unable to connect parse Virtual IP addresses in Nitro response')
self.__remove_orphaned_hostroutes(frrouting_vip_routes_to_process, self.nexthop)
def __get_frrouting_hostroutes(self, tag):
# Create an empty set on which all the frrouting configured VIP will be injected.
ipaddresses_set = set()
# Create a list of prefixes matching for which configured nexthop matches nexthop parameter
hostroutes = self.frr_client.get_routes_with_tag(tag)
if isinstance(hostroutes,dict):
for route in hostroutes:
ipaddresses_set.add(route.split('/')[0])
# Return ipaddresses_set unordered list of unique VIP.
return ipaddresses_set
def __get_adc_virtual_ipaddresses(self):
try:
return self.nitro_client.request(
method='get',
endpoint="config",
objecttype="nsip",
params="args=type:VIP"
).json()
except requests.exceptions.ConnectionError:
logger.error("Unable to connect to Citrix ADC NITRO API at " + self.nitro_url)
return {}
def __sync_routes_with_active_virtual_ipaddresses(self, virtual_ipaddresses, frrouting_vip_routes_to_process, nexthop):
for vip in virtual_ipaddresses['nsip']:
# IF state = ENABLED & hostroute=ENABLED & vipvipvsrvrrhiactiveupcount > 0 & route not already in routing table=> publish route
# vip['viprtadv2bsd'] is true when Citrix ADC wants to advertise hostroute to the network
if vip['state'] == 'ENABLED' and vip['hostroute'] == 'ENABLED' and vip['viprtadv2bsd'] and vip['ipaddress'] not in frrouting_vip_routes_to_process:
#Inject route using vtysh cli command
logger.info("Injecting new VIP "+ vip['ipaddress'] + " into routing table")
self.frr_client.add_static_route_with_tag(vip['ipaddress'], "255.255.255.255", nexthop, 99)
# IF state = ENABLED & hostroute=ENABLED & vipvipvsrvrrhiactiveupcount > 0 & route already in routing table=> do nothing
elif vip['state'] == 'ENABLED' and vip['hostroute'] == 'ENABLED' and vip['viprtadv2bsd'] and vip['ipaddress'] in frrouting_vip_routes_to_process:
#Do nothing as configuration is ok and remove VIP from frrouting_vip_routes_to_process as it has been processed
frrouting_vip_routes_to_process.remove(vip['ipaddress'])
# Else remove route. This method ensures that disabled VIP or unconfigured hostroutes are also removed from routing table
# vip['viprtadv2bsd'] is false when Citrix ADC wants to remote hostroute from the network
elif (vip['state'] == 'DISABLED' or vip['hostroute'] == 'DISABLED' or not vip['viprtadv2bsd'] ) and (vip['ipaddress'] in frrouting_vip_routes_to_process):
#Remove route using vtysh cli command
logger.info("Removing down/disabled VIP "+ vip['ipaddress'] + " from routing table")
self.frr_client.remove_static_route(vip['ipaddress'], "255.255.255.255", nexthop)
#Remove VIP from frrouting_vip_routes_to_process as it has been processed
frrouting_vip_routes_to_process.remove(vip['ipaddress'])
#Return the list of VIP for which a route still exists in frrouting routing table
#All the existing VIP in Citrix ADC (active or inactive) have been removed from this list
return frrouting_vip_routes_to_process
def __remove_orphaned_hostroutes(self, ip_addresses_dict, nexthop):
for ip_address in ip_addresses_dict:
logger.info("Removing deleted VIP "+ ip_address + " from routing table")
self.frr_client.remove_static_route(ip_address, "255.255.255.255", nexthop)
| StarcoderdataPython |
1733556 | from django.db import models
class Tweet(models.Model):
content = models.TextField(blank= True,null=True)
image = models.FileField(upload_to= 'images/', blank= True,null=True)
| StarcoderdataPython |
6580108 | <gh_stars>100-1000
# Python program to demonstrate Basic Euclidean Algorithm
# Function to return gcd of a and b
def gcd(a, b):
if a == 0 :
return b
return gcd(b%a, a)
# example with 2 numbers which could be taken as input also
a = 10
b = 15
print(gcd(a, b))
| StarcoderdataPython |
3374151 | import math
import random
import geomstats.backend as gs
from geomstats.geometry.spd_matrices import SPDMatrices
from tests.data_generation import _OpenSetTestData, _RiemannianMetricTestData
SQRT_2 = math.sqrt(2.0)
LN_2 = math.log(2.0)
EXP_1 = math.exp(1.0)
EXP_2 = math.exp(2.0)
SINH_1 = math.sinh(1.0)
class SPDMatricesTestData(_OpenSetTestData):
smoke_space_args_list = [(2,), (3,), (4,), (5,)]
smoke_n_points_list = [1, 2, 1, 2]
n_list = random.sample(range(2, 5), 2)
space_args_list = [(n,) for n in n_list]
n_points_list = random.sample(range(1, 5), 2)
shape_list = [(n, n) for n in n_list]
n_vecs_list = random.sample(range(1, 10), 2)
def belongs_test_data(self):
smoke_data = [
dict(n=2, mat=[[3.0, -1.0], [-1.0, 3.0]], expected=True),
dict(n=2, mat=[[1.0, 1.0], [2.0, 1.0]], expected=False),
dict(
n=3,
mat=[[1.0, 2.0, 3.0], [2.0, 4.0, 5.0], [3.0, 5.0, 6.0]],
expected=False,
),
dict(
n=2,
mat=[[[1.0, 0.0], [0.0, 1.0]], [[1.0, -1.0], [0.0, 1.0]]],
expected=[True, False],
),
]
return self.generate_tests(smoke_data)
def projection_test_data(self):
smoke_data = [
dict(n=2, mat=[[1.0, 0.0], [0.0, 1.0]], expected=[[1.0, 0.0], [0.0, 1.0]]),
dict(
n=2,
mat=[[-1.0, 0.0], [0.0, -2.0]],
expected=[[gs.atol, 0.0], [0.0, gs.atol]],
),
]
return self.generate_tests(smoke_data)
def logm_test_data(self):
smoke_data = [
dict(spd_mat=[[1.0, 0.0], [0.0, 1.0]], expected=[[0.0, 0.0], [0.0, 0.0]])
]
return self.generate_tests(smoke_data)
def cholesky_factor_test_data(self):
smoke_data = [
dict(
n=2,
spd_mat=[[[1.0, 2.0], [2.0, 5.0]], [[1.0, 0.0], [0.0, 1.0]]],
expected=[[[1.0, 0.0], [2.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]]],
),
dict(
n=3,
spd_mat=[[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]],
expected=[
[SQRT_2, 0.0, 0.0],
[0.0, SQRT_2, 0.0],
[0.0, 0.0, SQRT_2],
],
),
]
return self.generate_tests(smoke_data)
def cholesky_factor_belongs_test_data(self):
list_n = random.sample(range(1, 100), 10)
n_samples = 10
random_data = [
dict(n=n, mat=SPDMatrices(n).random_point(n_samples)) for n in list_n
]
return self.generate_tests([], random_data)
def differential_cholesky_factor_test_data(self):
smoke_data = [
dict(
n=2,
tangent_vec=[[1.0, 1.0], [1.0, 1.0]],
base_point=[[4.0, 2.0], [2.0, 5.0]],
expected=[[1 / 4, 0.0], [3 / 8, 1 / 16]],
)
]
return self.generate_tests(smoke_data)
def differential_power_test_data(self):
smoke_data = [
dict(
power=0.5,
tangent_vec=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
base_point=[[1.0, 0.0, 0.0], [0.0, 2.5, 1.5], [0.0, 1.5, 2.5]],
expected=[
[1.0, 1 / 3, 1 / 3],
[1 / 3, 0.125, 0.125],
[1 / 3, 0.125, 0.125],
],
)
]
return self.generate_tests(smoke_data)
def inverse_differential_power_test_data(self):
smoke_data = [
dict(
power=0.5,
tangent_vec=[
[1.0, 1 / 3, 1 / 3],
[1 / 3, 0.125, 0.125],
[1 / 3, 0.125, 0.125],
],
base_point=[[1.0, 0.0, 0.0], [0.0, 2.5, 1.5], [0.0, 1.5, 2.5]],
expected=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
)
]
return self.generate_tests(smoke_data)
def differential_log_test_data(self):
smoke_data = [
dict(
tangent_vec=[[1.0, 1.0, 3.0], [1.0, 1.0, 3.0], [3.0, 3.0, 4.0]],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 4.0]],
expected=[
[1.0, 1.0, 2 * LN_2],
[1.0, 1.0, 2 * LN_2],
[2 * LN_2, 2 * LN_2, 1],
],
)
]
return self.generate_tests(smoke_data)
def inverse_differential_log_test_data(self):
smoke_data = [
dict(
tangent_vec=[
[1.0, 1.0, 2 * LN_2],
[1.0, 1.0, 2 * LN_2],
[2 * LN_2, 2 * LN_2, 1],
],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 4.0]],
expected=[[1.0, 1.0, 3.0], [1.0, 1.0, 3.0], [3.0, 3.0, 4.0]],
)
]
return self.generate_tests(smoke_data)
def differential_exp_test_data(self):
smoke_data = [
dict(
tangent_vec=[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]],
expected=[
[EXP_1, EXP_1, SINH_1],
[EXP_1, EXP_1, SINH_1],
[SINH_1, SINH_1, 1 / EXP_1],
],
)
]
return self.generate_tests(smoke_data)
def inverse_differential_exp_test_data(self):
smoke_data = [
dict(
tangent_vec=[
[EXP_1, EXP_1, SINH_1],
[EXP_1, EXP_1, SINH_1],
[SINH_1, SINH_1, 1 / EXP_1],
],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]],
expected=[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
)
]
return self.generate_tests(smoke_data)
def random_point_belongs_test_data(self):
belongs_atol = gs.atol * 100000
return self._random_point_belongs_test_data(
self.smoke_space_args_list,
self.smoke_n_points_list,
self.space_args_list,
self.n_points_list,
belongs_atol,
)
def to_tangent_is_tangent_test_data(self):
is_tangent_atol = gs.atol * 1000
return self._to_tangent_is_tangent_test_data(
SPDMatrices,
self.space_args_list,
self.shape_list,
self.n_vecs_list,
is_tangent_atol,
)
def random_tangent_vec_is_tangent_test_data(self):
return self._random_tangent_vec_is_tangent_test_data(
SPDMatrices, self.space_args_list, self.n_vecs_list
)
def projection_belongs_test_data(self):
return self._projection_belongs_test_data(
self.space_args_list, self.shape_list, self.n_points_list
)
def to_tangent_is_tangent_in_ambient_space_test_data(self):
return self._to_tangent_is_tangent_in_ambient_space_test_data(
SPDMatrices, self.space_args_list, self.shape_list
)
class SPDMetricAffineTestData(_RiemannianMetricTestData):
n_list = random.sample(range(2, 5), 2)
power_affine_list = [1.0, -0.5]
metric_args_list = list(zip(n_list, power_affine_list))
shape_list = [(n, n) for n in n_list]
space_list = [SPDMatrices(n) for n in n_list]
n_points_list = random.sample(range(1, 5), 2)
n_points_a_list = random.sample(range(1, 5), 2)
n_tangent_vecs_list = random.sample(range(1, 5), 2)
n_points_b_list = [1]
alpha_list = [1] * 2
n_rungs_list = [1] * 2
scheme_list = ["pole"] * 2
def inner_product_test_data(self):
smoke_data = [
dict(
n=3,
power_affine=0.5,
tangent_vec_a=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
tangent_vec_b=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
base_point=[[1.0, 0.0, 0.0], [0.0, 2.5, 1.5], [0.0, 1.5, 2.5]],
expected=713 / 144,
)
]
return self.generate_tests(smoke_data)
def exp_test_data(self):
smoke_data = [
dict(
n=2,
power_affine=1.0,
tangent_vec=[[2.0, 0.0], [0.0, 2.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[EXP_2, 0.0], [0.0, EXP_2]],
)
]
return self.generate_tests(smoke_data)
def log_test_data(self):
smoke_data = [
dict(
n=2,
power_affine=1.0,
point=[[1.0, 0.0], [0.0, 1.0]],
base_point=[[2.0, 0.0], [0.0, 2.0]],
expected=[[-2 * LN_2, 0.0], [0.0, -2 * LN_2]],
)
]
return self.generate_tests(smoke_data)
def exp_shape_test_data(self):
return self._exp_shape_test_data(
self.metric_args_list, self.space_list, self.shape_list
)
def log_shape_test_data(self):
return self._log_shape_test_data(self.metric_args_list, self.space_list)
def squared_dist_is_symmetric_test_data(self):
return self._squared_dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
atol=gs.atol * 1000,
)
def exp_belongs_test_data(self):
return self._exp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
belongs_atol=gs.atol * 1000,
)
def log_is_tangent_test_data(self):
return self._log_is_tangent_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
is_tangent_atol=gs.atol * 1000,
)
def geodesic_ivp_belongs_test_data(self):
return self._geodesic_ivp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def geodesic_bvp_belongs_test_data(self):
return self._geodesic_bvp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def exp_after_log_test_data(self):
return self._exp_after_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def log_after_exp_test_data(self):
return self._log_after_exp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
amplitude=10,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def exp_ladder_parallel_transport_test_data(self):
return self._exp_ladder_parallel_transport_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_rungs_list,
self.alpha_list,
self.scheme_list,
)
def exp_geodesic_ivp_test_data(self):
return self._exp_geodesic_ivp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_points_list,
rtol=gs.rtol * 1000,
atol=gs.atol * 1000,
)
def parallel_transport_ivp_is_isometry_test_data(self):
return self._parallel_transport_ivp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def parallel_transport_bvp_is_isometry_test_data(self):
return self._parallel_transport_bvp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def dist_is_symmetric_test_data(self):
return self._dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_positive_test_data(self):
return self._dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def squared_dist_is_positive_test_data(self):
return self._squared_dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_norm_of_log_test_data(self):
return self._dist_is_norm_of_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_point_to_itself_is_zero_test_data(self):
return self._dist_point_to_itself_is_zero_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
atol=gs.atol * 1000,
)
def inner_product_is_symmetric_test_data(self):
return self._inner_product_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
)
def triangle_inequality_of_dist_test_data(self):
return self._triangle_inequality_of_dist_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
class SPDMetricBuresWassersteinTestData(_RiemannianMetricTestData):
n_list = random.sample(range(2, 5), 2)
metric_args_list = [(n,) for n in n_list]
shape_list = [(n, n) for n in n_list]
space_list = [SPDMatrices(n) for n in n_list]
n_points_list = random.sample(range(1, 5), 2)
n_tangent_vecs_list = random.sample(range(1, 5), 2)
n_points_a_list = random.sample(range(1, 5), 2)
n_points_b_list = [1]
alpha_list = [1] * 2
n_rungs_list = [1] * 2
scheme_list = ["pole"] * 2
def inner_product_test_data(self):
smoke_data = [
dict(
n=3,
tangent_vec_a=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
tangent_vec_b=[[1.0, 2.0, 4.0], [2.0, 3.0, 8.0], [4.0, 8.0, 5.0]],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.5, 0.5], [0.0, 0.5, 1.5]],
expected=4.0,
)
]
return self.generate_tests(smoke_data)
def exp_test_data(self):
smoke_data = [
dict(
n=2,
tangent_vec=[[2.0, 0.0], [0.0, 2.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[4.0, 0.0], [0.0, 4.0]],
)
]
return self.generate_tests(smoke_data)
def log_test_data(self):
smoke_data = [
dict(
n=2,
point=[[4.0, 0.0], [0.0, 4.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[2.0, 0.0], [0.0, 2.0]],
)
]
return self.generate_tests(smoke_data)
def squared_dist_test_data(self):
smoke_data = [
dict(
n=2,
point_a=[[1.0, 0.0], [0.0, 1.0]],
point_b=[[2.0, 0.0], [0.0, 2.0]],
expected=2 + 4 - (2 * 2 * SQRT_2),
)
]
return self.generate_tests(smoke_data)
def exp_shape_test_data(self):
return self._exp_shape_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
)
def log_shape_test_data(self):
return self._log_shape_test_data(
self.metric_args_list,
self.space_list,
)
def squared_dist_is_symmetric_test_data(self):
return self._squared_dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
atol=gs.atol * 1000,
)
def exp_belongs_test_data(self):
return self._exp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
belongs_atol=gs.atol * 1000,
)
def log_is_tangent_test_data(self):
return self._log_is_tangent_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
is_tangent_atol=gs.atol * 1000,
)
def geodesic_ivp_belongs_test_data(self):
return self._geodesic_ivp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def geodesic_bvp_belongs_test_data(self):
return self._geodesic_bvp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def exp_after_log_test_data(self):
return self._exp_after_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
rtol=gs.rtol * 10,
atol=gs.atol * 10,
)
def log_after_exp_test_data(self):
return self._log_after_exp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
amplitude=7.0,
rtol=gs.rtol * 10,
atol=gs.atol * 10,
)
def exp_ladder_parallel_transport_test_data(self):
return self._exp_ladder_parallel_transport_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_rungs_list,
self.alpha_list,
self.scheme_list,
)
def exp_geodesic_ivp_test_data(self):
return self._exp_geodesic_ivp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_points_list,
rtol=gs.rtol * 100000,
atol=gs.atol * 100000,
)
def parallel_transport_ivp_is_isometry_test_data(self):
return self._parallel_transport_ivp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def parallel_transport_bvp_is_isometry_test_data(self):
return self._parallel_transport_bvp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def dist_is_symmetric_test_data(self):
return self._dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_positive_test_data(self):
return self._dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def squared_dist_is_positive_test_data(self):
return self._squared_dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_norm_of_log_test_data(self):
return self._dist_is_norm_of_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_point_to_itself_is_zero_test_data(self):
return self._dist_point_to_itself_is_zero_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
def inner_product_is_symmetric_test_data(self):
return self._inner_product_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
)
def parallel_transport_test_data(self):
smoke_data = [dict(n=k) for k in self.metric_args_list]
return self.generate_tests(smoke_data)
def triangle_inequality_of_dist_test_data(self):
return self._triangle_inequality_of_dist_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
class SPDMetricEuclideanTestData(_RiemannianMetricTestData):
n_list = random.sample(range(2, 5), 2)
power_euclidean_list = [1.0, -0.5, 0.5, 1.0, 1.0]
metric_args_list = list(zip(n_list, power_euclidean_list))
one_metric_args_list = list(zip(n_list, [1.0] * 5))
shape_list = [(n, n) for n in n_list]
space_list = [SPDMatrices(n) for n in n_list]
n_points_list = random.sample(range(1, 5), 2)
n_tangent_vecs_list = random.sample(range(1, 5), 2)
n_points_a_list = random.sample(range(1, 5), 2)
n_points_b_list = [1]
alpha_list = [1] * 2
n_rungs_list = [1] * 2
scheme_list = ["pole"] * 2
def inner_product_test_data(self):
smoke_data = [
dict(
n=3,
power_euclidean=0.5,
tangent_vec_a=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
tangent_vec_b=[[2.0, 1.0, 1.0], [1.0, 0.5, 0.5], [1.0, 0.5, 0.5]],
base_point=[[1.0, 0.0, 0.0], [0.0, 2.5, 1.5], [0.0, 1.5, 2.5]],
expected=3472 / 576,
)
]
return self.generate_tests(smoke_data)
def exp_domain_test_data(self):
smoke_data = [
dict(
n=3,
power_euclidean=1.0,
tangent_vec=[[-1.0, 0.0, 0.0], [0.0, -0.5, 0.0], [0.0, 0.0, 1.0]],
base_point=[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]],
expected=[-3, 1],
)
]
return self.generate_tests(smoke_data)
def exp_test_data(self):
smoke_data = [
dict(
n=2,
power_euclidean=1.0,
tangent_vec=[[2.0, 0.0], [0.0, 2.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[3.0, 0.0], [0.0, 3.0]],
)
]
return self.generate_tests(smoke_data)
def log_test_data(self):
smoke_data = [
dict(
n=2,
power_euclidean=1.0,
point=[[2.0, 0.0], [0.0, 2.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[1.0, 0.0], [0.0, 1.0]],
)
]
return self.generate_tests(smoke_data)
def parallel_transport_test_data(self):
smoke_data = [
dict(
n=2,
power_euclidean=1.0,
tangent_vec_a=[[2.0, 0.0], [0.0, 2.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
tangent_vec_b=[[1.0, 0.0], [0.0, 0.5]],
)
]
return self.generate_tests(smoke_data)
def exp_shape_test_data(self):
return self._exp_shape_test_data(
self.metric_args_list, self.space_list, self.shape_list
)
def log_shape_test_data(self):
return self._log_shape_test_data(self.metric_args_list, self.space_list)
def squared_dist_is_symmetric_test_data(self):
return self._squared_dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
atol=gs.atol * 1000,
)
def exp_belongs_test_data(self):
return self._exp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
belongs_atol=gs.atol * 10000,
)
def log_is_tangent_test_data(self):
return self._log_is_tangent_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
is_tangent_atol=gs.atol * 10000,
)
def geodesic_ivp_belongs_test_data(self):
return self._geodesic_ivp_belongs_test_data(
self.one_metric_args_list,
self.space_list,
self.shape_list,
self.n_points_list,
belongs_atol=gs.atol * 10000,
)
def geodesic_bvp_belongs_test_data(self):
return self._geodesic_bvp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def exp_after_log_test_data(self):
return self._exp_after_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def log_after_exp_test_data(self):
return self._log_after_exp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
amplitude=10,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def exp_ladder_parallel_transport_test_data(self):
return self._exp_ladder_parallel_transport_test_data(
self.one_metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_rungs_list,
self.alpha_list,
self.scheme_list,
)
def exp_geodesic_ivp_test_data(self):
return self._exp_geodesic_ivp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_points_list,
rtol=gs.rtol * 100000,
atol=gs.atol * 100000,
)
def parallel_transport_ivp_is_isometry_test_data(self):
return self._parallel_transport_ivp_is_isometry_test_data(
self.one_metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def parallel_transport_bvp_is_isometry_test_data(self):
return self._parallel_transport_bvp_is_isometry_test_data(
self.one_metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def dist_is_symmetric_test_data(self):
return self._dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_positive_test_data(self):
return self._dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def squared_dist_is_positive_test_data(self):
return self._squared_dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_norm_of_log_test_data(self):
return self._dist_is_norm_of_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_point_to_itself_is_zero_test_data(self):
return self._dist_point_to_itself_is_zero_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
def inner_product_is_symmetric_test_data(self):
return self._inner_product_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
)
def triangle_inequality_of_dist_test_data(self):
return self._triangle_inequality_of_dist_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
class SPDMetricLogEuclideanTestData(_RiemannianMetricTestData):
n_list = random.sample(range(2, 4), 2)
metric_args_list = [(n,) for n in n_list]
shape_list = [(n, n) for n in n_list]
space_list = [SPDMatrices(n) for n in n_list]
n_points_list = random.sample(range(1, 4), 2)
n_samples_list = random.sample(range(1, 4), 2)
n_tangent_vecs_list = random.sample(range(1, 5), 2)
n_points_a_list = random.sample(range(1, 4), 2)
n_points_b_list = [1]
alpha_list = [1] * 2
n_rungs_list = [1] * 2
scheme_list = ["pole"] * 2
def inner_product_test_data(self):
smoke_data = [
dict(
n=3,
tangent_vec_a=[[1.0, 1.0, 3.0], [1.0, 1.0, 3.0], [3.0, 3.0, 4.0]],
tangent_vec_b=[[1.0, 1.0, 3.0], [1.0, 1.0, 3.0], [3.0, 3.0, 4.0]],
base_point=[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 4.0]],
expected=5.0 + (4.0 * ((2 * LN_2) ** 2)),
)
]
return self.generate_tests(smoke_data)
def exp_test_data(self):
smoke_data = [
dict(
n=2,
tangent_vec=[[2.0, 0.0], [0.0, 2.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[EXP_2, 0.0], [0.0, EXP_2]],
)
]
return self.generate_tests(smoke_data)
def log_test_data(self):
smoke_data = [
dict(
n=2,
point=[[2.0, 0.0], [0.0, 2.0]],
base_point=[[1.0, 0.0], [0.0, 1.0]],
expected=[[LN_2, 0.0], [0.0, LN_2]],
)
]
return self.generate_tests(smoke_data)
def dist_test_data(self):
smoke_data = [
dict(
n=2,
point_a=[[1.0, 0.0], [0.0, 1.0]],
point_b=[[EXP_1, 0.0], [0.0, EXP_1]],
expected=SQRT_2,
)
]
return self.generate_tests(smoke_data)
def exp_shape_test_data(self):
return self._exp_shape_test_data(
self.metric_args_list, self.space_list, self.shape_list
)
def log_shape_test_data(self):
return self._log_shape_test_data(self.metric_args_list, self.space_list)
def squared_dist_is_symmetric_test_data(self):
return self._squared_dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
atol=gs.atol * 1000,
)
def exp_belongs_test_data(self):
return self._exp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
belongs_atol=gs.atol * 10000,
)
def log_is_tangent_test_data(self):
return self._log_is_tangent_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
is_tangent_atol=gs.atol * 10000,
)
def geodesic_ivp_belongs_test_data(self):
return self._geodesic_ivp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_points_list,
belongs_atol=gs.atol * 10000,
)
def geodesic_bvp_belongs_test_data(self):
return self._geodesic_bvp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def exp_after_log_test_data(self):
return self._exp_after_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def log_after_exp_test_data(self):
return self._log_after_exp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
amplitude=10,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def exp_ladder_parallel_transport_test_data(self):
return self._exp_ladder_parallel_transport_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_rungs_list,
self.alpha_list,
self.scheme_list,
)
def exp_geodesic_ivp_test_data(self):
return self._exp_geodesic_ivp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_points_list,
rtol=gs.rtol * 100000,
atol=gs.atol * 100000,
)
def parallel_transport_ivp_is_isometry_test_data(self):
return self._parallel_transport_ivp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def parallel_transport_bvp_is_isometry_test_data(self):
return self._parallel_transport_bvp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def dist_is_symmetric_test_data(self):
return self._dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_positive_test_data(self):
return self._dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
is_positive_atol=gs.atol * 1000,
)
def squared_dist_is_positive_test_data(self):
return self._squared_dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_norm_of_log_test_data(self):
return self._dist_is_norm_of_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_point_to_itself_is_zero_test_data(self):
return self._dist_point_to_itself_is_zero_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
def inner_product_is_symmetric_test_data(self):
return self._inner_product_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
)
def triangle_inequality_of_dist_test_data(self):
return self._triangle_inequality_of_dist_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
| StarcoderdataPython |
5000611 | <reponame>lodino/Camera-Feature-Extraction
import glob
def load_img_from_dir(dir_path: str, f: str) -> [str]:
if dir_path == '':
return glob.glob('*')
else:
if dir_path[-1] != '/':
dir_path += '/'
return list(glob.glob(dir_path + '*.' + f))
| StarcoderdataPython |
3206717 | def main():
try:
option = 0
while (option != 4):
print()
print("1 - Divide by zero")
print("2 - Open a nonexistent file")
print("3 - Bad list index")
print("4 - Quit")
print()
option = int(input("Choose an option (1-4)"))
if (option==1):
print (1/0)
if (option==2):
f = open("IDONTEXIST")
if (option==3):
l = []
print (l[100])
if (option==4):
print ("Thanks for playing")
except IndexError:
print("Can't assign a list to a integer Doc")
except ZeroDivisionError:
print("You can't divide by zero Doc")
except FileNotFoundError:
print("That files not found Doc")
except ValueError:
print("You gotta input one of the numbers one through four, Doc")
main()
| StarcoderdataPython |
6440184 | import requests
import pandas as pd
#data = requests.get('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=IBM&outputsize=full&apikey=demo').json()
data = requests.get('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=AAPL&outputsize=full&apikey=<KEY>').json()
df = pd.DataFrame.from_dict(data['Time Series (Daily)'], orient='index')
df['5. adjusted close'] = df['5. adjusted close'].astype(float)
df['6. volume'] = df['6. volume'].astype(float)
df = df.iloc[::-1]
df.to_pickle("./daily/AAPL_full.pkl") | StarcoderdataPython |
5180201 |
"""
Methods and heuristics used to build the bitcoin user network graph from transactions list
"""
from .graph_database_driver import GraphDatabaseDriver
class UserNetwork:
""" Process transactions and populate the GraphDatabaseDriver with addresses and their relations
"""
def __init__(self):
self.driver = GraphDatabaseDriver()
self.heuristics_enabled = [self.h1_inputs]
# Keep track of each heuristic usage
self.heuristics_used = [0, 0, 0, 0]
# Keep set of known addresses (input or output)
self.known_addresses = set()
# Dictionary of all addresses after H1 with user id
self.known_users = dict()
def close(self):
self.driver.close()
def populate_known_addresses(self):
""" Fetch addresses from graph database and add addresses to known addresses set
"""
address_count = self.driver.get_address_count()
if address_count > 0:
print("Fetching", address_count, "addresses from database")
self.driver.fetch_all_known_addresses(self.add_known_address)
print(len(self.known_addresses), "uniques addresses added\n")
else:
print("No already known addresses in database")
def populate_known_addresses_with_users(self):
print("Fetching all addresses with users from database")
self.driver.fetch_all_known_addresses_with_users(self.add_known_address_with_user)
print(len(self.known_users), "uniques addresses with users added\n")
def commit_new_entries(self):
self.driver.commit_additions()
def commit_new_user_relations(self):
self.driver.commit_user_relations()
def add_transaction(self, transaction):
""" Process a bitcoin transaction and addresses and relations to the graph database
"""
# Applies enabled heuristics
for heuristic in self.heuristics_enabled:
heuristic(transaction)
for a in transaction.inputs + transaction.outputs:
if not self.is_known_address(a.address):
self.add_known_address(a.address)
self.driver.add_address(a.address)
def add_known_address(self, address):
""" Add an address to known addresses list after converting it to byte array
:param address: String of bitcoin address
"""
b58_address = self.encode_address(address)
self.known_addresses.add(b58_address)
def is_known_address(self, address):
""" Check if address is in known addresses set
:param address: String of bitcoin address
"""
return self.encode_address(address) in self.known_addresses
def add_known_address_with_user(self, address, user):
""" Add a key from given address to known_users returning user's is
:param address: String of bitcoin address
:param user: Id of owner
"""
b58_address = self.encode_address(address)
self.known_users[b58_address] = int(user)
def get_user_id_from_address(self, address):
""" Return id o user associated with address
:param address: String of bitcoin address
:return: Id of user
"""
b58_address = self.encode_address(address)
if b58_address in self.known_users:
return self.known_users[b58_address]
else:
return None
def generate_users_nodes(self):
print("Finding connected components from addresses...")
user_count = self.driver.find_connected_components()
print(user_count, "unique users found, creating User nodes...")
self.driver.create_user_nodes()
print("User nodes created\n")
@staticmethod
def encode_address(address):
""" Convert address string to byte array
:param address: String of bitcoin address
"""
return str.encode(address)
def h1_inputs(self, transaction):
""" All addresses used as input of the same transaction belong to the
same controlling entity, called a User.
"""
# If more than 1 input address
if len(transaction.inputs) > 1:
# An edge is added between the first input address and all the others
for input_transaction in transaction.inputs[1:]:
self.driver.add_relation([transaction.inputs[0].address, input_transaction.address])
self.heuristics_used[0] += 1
def h2_change_address(self, transaction):
""" If there are exactly two output-addresses a1 and a2, that one of them
(a1) appears for the first time and that the other (a2) has appeared before, then a1
is considered to be the change address.
"""
# 2 output addresses exactly
if len(transaction.outputs) == 2:
a1_known_address = self.is_known_address(transaction.outputs[0].address)
a2_known_address = self.is_known_address(transaction.outputs[1].address)
change_address = None
# a1 is the change address
if a2_known_address and not a1_known_address:
change_address = transaction.outputs[0].address
# a2 is the change address
elif a1_known_address and not a2_known_address:
change_address = transaction.outputs[1].address
if change_address is not None:
for input_transaction in transaction.inputs:
self.driver.add_relation([input_transaction.address, change_address])
self.heuristics_used[1] += 1
def h3_one_time_change_address(self, transaction):
""" An address is considered a one-time change address if it satisfies the following properties:
- The transaction is not a coin generation
- The address is not among the input addresses (address reuse)
- It is the only output address appearing for the first time
"""
# Coinbase transaction (coin generation) have address hash "0" as input
if transaction.inputs[0].address != "0":
first_time_address = False
one_time_change_address = None
for output_address in transaction.outputs:
# Check if it is the only one to appear for the first time
if not self.is_known_address(output_address.address):
if first_time_address:
# At least two new addresses as outputs
return
else:
first_time_address = True
# Check if not among inputs
if output_address.address not in map(lambda a: a.address, transaction.inputs):
one_time_change_address = output_address.address
if one_time_change_address is not None:
for input_transaction in transaction.inputs:
self.driver.add_relation([input_transaction.address, one_time_change_address])
self.heuristics_used[2] += 1
def h4_community_detection(self, transaction):
""" 1. A first level of aggregation is created by applying H1. Sets of addresses belonging
to a same user are used as nodes of the hint network
2. For each transaction in the dataset, considering users found by H1 instead of
individual addresses, an edge is added (if not already present) between the (necessarily
unique) sender and each recipient if:
• there are less than 10 users in the ouput of the transaction (recipients)
• all recipients are different from the sender, i.e there is no already known
change address
On this network, a community detection algorithm is applied. Communities correspond
to unique users
"""
if len(transaction.outputs) < 10:
# Get the user id from known_users
sender_id = self.get_user_id_from_address(transaction.inputs[0].address)
if sender_id is None:
return
for output in transaction.outputs:
# A recipient is the same as the sender
recipient = self.get_user_id_from_address(output.address)
if recipient is None or recipient == sender_id:
return
for output in transaction.outputs:
recipient_id = self.get_user_id_from_address(output.address)
value = int(output.value)
self.driver.add_user_relation(sender_id, recipient_id, value)
def community_detection(self):
""" Apply a community detection algorithm to uniques users nodes with transactions as edges
"""
self.driver.run_louvain_algorithm()
| StarcoderdataPython |
6404291 | <reponame>diofeher/ctf-writeups
def solution(A, B, K):
if A % K == 0:
return (B - A) // K + 1
else:
return (B - (A - A % K )) // K | StarcoderdataPython |
4806894 | <reponame>wittawatj/kernel-mod
"""
Utility functions specific for experiments. These functions are less general
than the ones in kmod.util.
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import kmod.util as util
import numpy as np
from scipy import linalg
from sklearn.metrics.pairwise import polynomial_kernel
import sys
import kmod.glo as glo
import os
import torch
from kmod.mnist.dcgan import Generator
from kmod.mnist.dcgan import DCGAN
import kmod.mnist.dcgan as mnist_dcgan
import kmod.net as net
import kmod.gen as gen
mnist_model_names= ['dcgan', 'began', 'wgan', 'lsgan', 'gan', 'wgan_gp',
'vae']
shared_resource_path = glo.shared_resource_folder()
def plot_images_grid(images, func_img=None, grid_rows=4, grid_cols=4):
"""
Plot images in a grid, starting from index 0 to the maximum size of the
grid.
images: stack of images images[i] is one image
func_img: function to run on each image before plotting
"""
gs1 = gridspec.GridSpec(grid_rows, grid_cols)
gs1.update(wspace=0.05, hspace=0.05) # set the spacing between axes.
for i in range(grid_rows*grid_cols):
if func_img is not None:
img = func_img(images[i])
else:
img = images[i]
# plt.subplot(grid_rows, grid_cols, i+1)
plt.subplot(gs1[i])
plt.imshow(img)
plt.axis('off')
########################
#based on https://github.com/mbinkowski/MMD-GAN
###############################
def get_splits(n, splits=10, split_method='openai'):
if split_method == 'openai':
return [slice(i * n // splits, (i + 1) * n // splits)
for i in range(splits)]
elif split_method == 'bootstrap':
return [np.random.choice(n, n) for _ in range(splits)]
elif 'copy':
return [np.arange(n) for _ in range(splits)]
else:
raise ValueError("bad split_method {}".format(split_method))
def inception_score(preds, **split_args):
split_inds = get_splits(preds.shape[0], **split_args)
scores = np.zeros(len(split_inds))
for i, inds in enumerate(split_inds):
part = preds[inds]
kl = part * (np.log(part) - np.log(np.mean(part, 0, keepdims=True)))
kl = np.mean(np.sum(kl, 1))
scores[i] = np.exp(kl)
return scores
def fid_permutation_test(X, Y, Z, alpha=0.01, n_permute=400, seed=893):
assert X.shape == Y.shape
assert X.shape == Z.shape
XYZ = np.vstack([X, Y, Z])
nxyz = XYZ.shape[0]
nx = ny = X.shape[0]
splits = 1
split_size = X.shape[0]
split_method = 'copy'
split_args = {'splits': splits, 'n': split_size, 'split_method': split_method}
with util.ContextTimer(seed) as t:
stat = np.mean(fid_score(X, Z, **split_args)) - np.mean(fid_score(Y, Z, **split_args))
list_fid = np.zeros((n_permute))
with util.NumpySeedContext(seed):
for r in range(n_permute):
ind = np.random.choice(nxyz, nxyz, replace=False)
indx = ind[:nx]
indy = ind[nx:nx+ny]
indz = ind[nx+ny:]
codes_p = XYZ[indx]
codes_q = XYZ[indy]
codes_r = XYZ[indz]
fid_xz = np.mean(fid_score(codes_p, codes_r, **split_args))
fid_yz = np.mean(fid_score(codes_q, codes_r, **split_args))
list_fid[r] = fid_xz - fid_yz
pvalue = np.mean(list_fid > stat)
results = {'alpha': alpha, 'pvalue': pvalue, 'test_stat': stat,
'h0_rejected': pvalue < alpha, 'n_permute': n_permute,
'time_secs': t.secs,
}
return results
def fid_score(codes_g, codes_r, eps=1e-6, output=sys.stdout, **split_args):
splits_g = get_splits(**split_args)
splits_r = get_splits(**split_args)
assert len(splits_g) == len(splits_r)
d = codes_g.shape[1]
assert codes_r.shape[1] == d
scores = np.zeros(len(splits_g))
for i, (w_g, w_r) in enumerate(zip(splits_g, splits_r)):
part_g = codes_g[w_g]
part_r = codes_r[w_r]
mn_g = part_g.mean(axis=0)
mn_r = part_r.mean(axis=0)
cov_g = np.cov(part_g, rowvar=False)
cov_r = np.cov(part_r, rowvar=False)
covmean, _ = linalg.sqrtm(cov_g.dot(cov_r), disp=False)
if not np.isfinite(covmean).all():
cov_g[range(d), range(d)] += eps
cov_r[range(d), range(d)] += eps
covmean = linalg.sqrtm(cov_g.dot(cov_r))
scores[i] = np.sum((mn_g - mn_r) ** 2) + (
np.trace(cov_g) + np.trace(cov_r) - 2 * np.trace(covmean))
return scores
def polynomial_mmd(codes_g, codes_r, degree=3, gamma=None, coef0=1,
var_at_m=None, ret_var=True):
# use k(x, y) = (gamma <x, y> + coef0)^degree
# default gamma is 1 / dim
X = codes_g
Y = codes_r
K_XX = polynomial_kernel(X, degree=degree, gamma=gamma, coef0=coef0)
K_YY = polynomial_kernel(Y, degree=degree, gamma=gamma, coef0=coef0)
K_XY = polynomial_kernel(X, Y, degree=degree, gamma=gamma, coef0=coef0)
return _mmd2_and_variance(K_XX, K_XY, K_YY,
var_at_m=var_at_m, ret_var=ret_var)
def _sqn(arr):
flat = np.ravel(arr)
return flat.dot(flat)
def _mmd2_and_variance(K_XX, K_XY, K_YY, unit_diagonal=False,
mmd_est='unbiased', block_size=1024,
var_at_m=None, ret_var=True):
# based on
# https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/mmd.py
# but changed to not compute the full kernel matrix at once
m = K_XX.shape[0]
assert K_XX.shape == (m, m)
assert K_XY.shape == (m, m)
assert K_YY.shape == (m, m)
if var_at_m is None:
var_at_m = m
# Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if unit_diagonal:
diag_X = diag_Y = 1
sum_diag_X = sum_diag_Y = m
sum_diag2_X = sum_diag2_Y = m
else:
diag_X = np.diagonal(K_XX)
diag_Y = np.diagonal(K_YY)
sum_diag_X = diag_X.sum()
sum_diag_Y = diag_Y.sum()
sum_diag2_X = _sqn(diag_X)
sum_diag2_Y = _sqn(diag_Y)
Kt_XX_sums = K_XX.sum(axis=1) - diag_X
Kt_YY_sums = K_YY.sum(axis=1) - diag_Y
K_XY_sums_0 = K_XY.sum(axis=0)
K_XY_sums_1 = K_XY.sum(axis=1)
Kt_XX_sum = Kt_XX_sums.sum()
Kt_YY_sum = Kt_YY_sums.sum()
K_XY_sum = K_XY_sums_0.sum()
if mmd_est == 'biased':
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2 * K_XY_sum / (m * m))
else:
assert mmd_est in {'unbiased', 'u-statistic'}
mmd2 = (Kt_XX_sum + Kt_YY_sum) / (m * (m-1))
if mmd_est == 'unbiased':
mmd2 -= 2 * K_XY_sum / (m * m)
else:
mmd2 -= 2 * (K_XY_sum - np.trace(K_XY)) / (m * (m-1))
if not ret_var:
return mmd2
Kt_XX_2_sum = _sqn(K_XX) - sum_diag2_X
Kt_YY_2_sum = _sqn(K_YY) - sum_diag2_Y
K_XY_2_sum = _sqn(K_XY)
dot_XX_XY = Kt_XX_sums.dot(K_XY_sums_1)
dot_YY_YX = Kt_YY_sums.dot(K_XY_sums_0)
m1 = m - 1
m2 = m - 2
zeta1_est = (
1 / (m * m1 * m2) * (
_sqn(Kt_XX_sums) - Kt_XX_2_sum + _sqn(Kt_YY_sums) - Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 1 / (m * m * m1) * (
_sqn(K_XY_sums_1) + _sqn(K_XY_sums_0) - 2 * K_XY_2_sum)
- 2 / m**4 * K_XY_sum**2
- 2 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 2 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
zeta2_est = (
1 / (m * m1) * (Kt_XX_2_sum + Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 2 / (m * m) * K_XY_2_sum
- 2 / m**4 * K_XY_sum**2
- 4 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 4 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
var_est = (4 * (var_at_m - 2) / (var_at_m * (var_at_m - 1)) * zeta1_est
+ 2 / (var_at_m * (var_at_m - 1)) * zeta2_est)
return mmd2, var_est
def polynomial_mmd_averages(codes_g, codes_r, n_subsets=50, subset_size=1000, ret_var=True, output=sys.stdout, **kernel_args):
m = min(codes_g.shape[0], codes_r.shape[0])
mmds = np.zeros(n_subsets)
if ret_var:
vars = np.zeros(n_subsets)
choice = np.random.choice
for i in range(n_subsets):
g = codes_g[choice(len(codes_g), subset_size, replace=False)]
r = codes_r[choice(len(codes_r), subset_size, replace=False)]
o = polynomial_mmd(g, r, **kernel_args, var_at_m=m, ret_var=ret_var)
if ret_var:
mmds[i], vars[i] = o
else:
mmds[i] = o
return (mmds, vars) if ret_var else mmds
def load_mnist_gen(model_name, epoch, tensor_type, batch_size=64, **load_options):
name = model_name.lower()
if name not in mnist_model_names:
raise ValueError('Model name has be one of '
'{} and was'.format(mnist_model_names, name))
print('Loading ', name)
if name == 'dcgan':
# load a model from the shared folder
model_folder = glo.shared_resource_folder('prob_models', 'mnist_dcgan')
model_fname = 'mnist_dcgan_ep{}_bs{}.pt'.format(epoch, batch_size)
model_fpath = os.path.join(model_folder, model_fname)
print('Shared resource path at: {}'.format(shared_resource_path))
print('Model folder: {}'.format(model_folder))
print('Model file: ', model_fname)
# load the generator of type kmod.gen.PTNoiseTransformer
dcgan = net.SerializableModule.load(model_fpath, **load_options)
# make sure to respect the specified tensor_type
dcgan.tensor_type = tensor_type
return dcgan
elif ('gan' in name):
# load a model from the shared folder
model_folder = glo.shared_resource_folder('prob_models', 'mnist_{}'.format(name), str(epoch))
model_fname = '{}_G.pkl'.format(name.upper())
model_fpath = os.path.join(model_folder, model_fname)
print('Shared resource path at: {}'.format(shared_resource_path))
print('Model folder: {}'.format(model_folder))
print('Model file: ', model_fname)
from kmod.mnist.began import Generator as Generator_
# load the generator of type kmod.gen.PTNoiseTransformer
image_size = 28
z_dim = 62 #dimention of noise, this is fixed. so don't change
g = Generator_(input_dim=z_dim,input_size=image_size)
in_out_shapes = (z_dim, image_size)
def f_sample_noise(n):
return torch.rand((n, z_dim))
g.load(model_fpath, **load_options)
#print(g.fc[0].weight.is_cuda)
gan_model = gen.PTNoiseTransformerAdapter(module=g,
f_sample_noise=f_sample_noise, in_out_shapes=in_out_shapes,
tensor_type=tensor_type)
return gan_model
elif name == 'vae':
# load a model from the shared folder
model_folder = glo.shared_resource_folder('prob_models', 'mnist_{}'.format(name), str(epoch))
model_fname = '{}.pkl'.format(name.upper())
model_fpath = os.path.join(model_folder, model_fname)
print('Shared resource path at: {}'.format(shared_resource_path))
print('Model folder: {}'.format(model_folder))
print('Model file: ', model_fname)
from kmod.mnist.vae import VAE
# load the generator of type kmod.gen.PTNoiseTransformer
image_size = 28
z_dim = 20 #dimention of noise, this is fixed. so don't change
g = VAE()
in_out_shapes = (z_dim, image_size)
def f_sample_noise(n):
return torch.randn((n, z_dim))
g.load(model_fpath, **load_options)
#print(g.fc[0].weight.is_cuda)
vae = gen.PTNoiseTransformerAdapter(module=g,
f_sample_noise=f_sample_noise, in_out_shapes=in_out_shapes,
tensor_type=tensor_type)
return vae
| StarcoderdataPython |
3529524 | #!/usr/bin/env python
'''
Copyright (c) 2019, Robot Control and Pattern Recognition Group, Warsaw University of Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Warsaw University of Technology nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: <NAME>
'''
######################## -- BEGIN -- Auxiliary subsystem script ########################
from auxiliary_agent_ballcollector import *
def sendMessageToMotor(ser, moveCommand):
print("moveCommand=",moveCommand)
strSpeed=str(moveCommand.desiredSpeed.data)
cmdToSend=""
flag=False
if(moveCommand.cmd.data=="stop"):
ser.write(b"S")
elif(moveCommand.cmd.data=="rotate left"):
cmdToSend="D:-"+strSpeed+":"+strSpeed+":-"+strSpeed+":"+strSpeed+";"
print(cmdToSend)
flag=True
elif(moveCommand.cmd.data=="rotate right"):
cmdToSend="D:"+strSpeed+":-"+strSpeed+":"+strSpeed+":-"+strSpeed+";"
print(cmdToSend)
flag=True
else:
if(moveCommand.direction.data==0): # move front
cmdToSend="D:"+strSpeed+":-"+strSpeed+":-"+strSpeed+":"+strSpeed+";"
print(cmdToSend)
flag=True
elif(moveCommand.direction.data==180): # move backwards
cmdToSend="D:-"+strSpeed+":"+strSpeed+":"+strSpeed+":-"+strSpeed+";"
print(cmdToSend)
flag=True
elif(moveCommand.direction.data==270): # move left
cmdToSend="D:"+strSpeed+":"+strSpeed+":-"+strSpeed+":-"+strSpeed+";"
print(cmdToSend)
flag=True
elif(moveCommand.direction.data==90): # move right
cmdToSend="D:-"+strSpeed+":-"+strSpeed+":"+strSpeed+":"+strSpeed+";"
print(cmdToSend)
flag=True
print("EEEEEEEEEEEEEEEE move command=",moveCommand)
print("cmdToSend=",cmdToSend)
try:
if(flag and len(str(cmdToSend))>0):
ser.write(cmdToSend.encode('UTF-8'))
except:
print("Error connection with Arduino")
######################## -- END -- Auxiliary subsystem script ########################
| StarcoderdataPython |
5141971 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class SubscriptionProduct(object):
"""
Product description
"""
def __init__(self, **kwargs):
"""
Initializes a new SubscriptionProduct object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param part_number:
The value to assign to the part_number property of this SubscriptionProduct.
:type part_number: str
:param name:
The value to assign to the name property of this SubscriptionProduct.
:type name: str
:param unit_of_measure:
The value to assign to the unit_of_measure property of this SubscriptionProduct.
:type unit_of_measure: str
:param provisioning_group:
The value to assign to the provisioning_group property of this SubscriptionProduct.
:type provisioning_group: str
"""
self.swagger_types = {
'part_number': 'str',
'name': 'str',
'unit_of_measure': 'str',
'provisioning_group': 'str'
}
self.attribute_map = {
'part_number': 'partNumber',
'name': 'name',
'unit_of_measure': 'unitOfMeasure',
'provisioning_group': 'provisioningGroup'
}
self._part_number = None
self._name = None
self._unit_of_measure = None
self._provisioning_group = None
@property
def part_number(self):
"""
**[Required]** Gets the part_number of this SubscriptionProduct.
Product part numner
:return: The part_number of this SubscriptionProduct.
:rtype: str
"""
return self._part_number
@part_number.setter
def part_number(self, part_number):
"""
Sets the part_number of this SubscriptionProduct.
Product part numner
:param part_number: The part_number of this SubscriptionProduct.
:type: str
"""
self._part_number = part_number
@property
def name(self):
"""
**[Required]** Gets the name of this SubscriptionProduct.
Product name
:return: The name of this SubscriptionProduct.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this SubscriptionProduct.
Product name
:param name: The name of this SubscriptionProduct.
:type: str
"""
self._name = name
@property
def unit_of_measure(self):
"""
**[Required]** Gets the unit_of_measure of this SubscriptionProduct.
Unit of measure
:return: The unit_of_measure of this SubscriptionProduct.
:rtype: str
"""
return self._unit_of_measure
@unit_of_measure.setter
def unit_of_measure(self, unit_of_measure):
"""
Sets the unit_of_measure of this SubscriptionProduct.
Unit of measure
:param unit_of_measure: The unit_of_measure of this SubscriptionProduct.
:type: str
"""
self._unit_of_measure = unit_of_measure
@property
def provisioning_group(self):
"""
Gets the provisioning_group of this SubscriptionProduct.
Product provisioning group
:return: The provisioning_group of this SubscriptionProduct.
:rtype: str
"""
return self._provisioning_group
@provisioning_group.setter
def provisioning_group(self, provisioning_group):
"""
Sets the provisioning_group of this SubscriptionProduct.
Product provisioning group
:param provisioning_group: The provisioning_group of this SubscriptionProduct.
:type: str
"""
self._provisioning_group = provisioning_group
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
4862119 | <filename>infoqscraper/__init__.py<gh_stars>10-100
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ConversionError(Exception):
pass
class DownloadError(Exception):
pass
class AuthenticationError(Exception):
pass
import sys
import subprocess
# If Python 2.6, monkey patch subprocess module to add check_output
# Not sure if we really should do this...
if sys.hexversion < 0x02070000:
def _check_output_backport(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
subprocess.check_output = _check_output_backport | StarcoderdataPython |
3321184 | <reponame>rnui2k/vivisect
from ctypes import *
from ctypes import util
iokit = cdll.LoadLibrary(util.find_library('IOKit'))
cf = cdll.LoadLibrary(util.find_library('CoreFoundation'))
cf.CFStringCreateWithCString.argtypes = [c_void_p, c_char_p, c_int32]
cf.CFStringCreateWithCString.restype = c_void_p
cf.CFStringGetCStringPtr.argtypes = [c_void_p, c_uint32]
cf.CFStringGetCStringPtr.restype = c_char_p
kCFAllocatorDefault = c_void_p.in_dll(cf, "kCFAllocatorDefault")
kCFStringEncodingMacRoman = 0
kIOMasterPortDefault = c_void_p.in_dll(iokit, "kIOMasterPortDefault")
kIOPlatformSerialNumberKey = "IOPlatformSerialNumber".encode("mac_roman")
kIOPlatformUUIDKey = "IOPlatformUUID".encode("mac_roman")
iokit.IOServiceMatching.restype = c_void_p
iokit.IOServiceGetMatchingService.argtypes = [c_void_p, c_void_p]
iokit.IOServiceGetMatchingService.restype = c_void_p
iokit.IORegistryEntryCreateCFProperty.argtypes = [c_void_p, c_void_p, c_void_p, c_uint32]
iokit.IORegistryEntryCreateCFProperty.restype = c_void_p
iokit.IOObjectRelease.argtypes = [c_void_p]
def getHostId():
platformExpert = iokit.IOServiceGetMatchingService(kIOMasterPortDefault,
iokit.IOServiceMatching("IOPlatformExpertDevice"))
if platformExpert:
#key = cf.CFStringCreateWithCString(kCFAllocatorDefault, kIOPlatformSerialNumberKey, kCFStringEncodingMacRoman)
key = cf.CFStringCreateWithCString(kCFAllocatorDefault, kIOPlatformUUIDKey, kCFStringEncodingMacRoman)
serialNumberAsCFString = \
iokit.IORegistryEntryCreateCFProperty(platformExpert,
key,
kCFAllocatorDefault, 0);
if serialNumberAsCFString:
SERIAL = cf.CFStringGetCStringPtr(serialNumberAsCFString, 0)
iokit.IOObjectRelease(platformExpert)
return SERIAL
| StarcoderdataPython |
1890760 | # -*- coding: utf-8 -*-
"""
Numpy and Scipy script files that are common to both Keras+TF and PyTorch
"""
import numpy as np
import re
from scipy.spatial.distance import cdist
import torch
from torch.optim import Optimizer
__all__ = ['classes', 'eps', 'parse_name', 'rotation_matrix', 'get_gamma', 'get_accuracy']
# object categories of interest
classes = ['aeroplane', 'bicycle', 'boat', 'bottle', 'bus', 'car', 'chair', 'diningtable', 'motorbike', 'sofa', 'train', 'tvmonitor']
# numeric precision for my experiments
eps = 1e-6
# parse the name of the image to get model and pose parameters
def parse_name(image_name):
ind = [match.start() for match in re.finditer('_', image_name)]
synset_str = image_name[:ind[0]]
model_str = image_name[ind[0]+1:ind[1]]
az = float(image_name[ind[1]+2:ind[2]])
el = float(image_name[ind[2]+2:ind[3]])
ct = float(image_name[ind[3]+2:ind[4]])
d = float(image_name[ind[4]+2:])
return synset_str, model_str, az, el, ct, d
# get rotation matrix R(az, el, ct) given the three euler angles :
# azimuth az, elevation el, camera-tilt ct
def rotation_matrix(az, el, ct):
ca = np.cos(np.radians(az))
sa = np.sin(np.radians(az))
cb = np.cos(np.radians(el))
sb = np.sin(np.radians(el))
cc = np.cos(np.radians(ct))
sc = np.sin(np.radians(ct))
Ra = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1]])
Rb = np.array([[1, 0, 0], [0, cb, -sb], [0, sb, cb]])
Rc = np.array([[cc, -sc, 0], [sc, cc, 0], [0, 0, 1]])
R = np.dot(np.dot(Rc, Rb), Ra)
return R
def get_gamma(kmeans_dict):
N = kmeans_dict.shape[0]
D = cdist(kmeans_dict, kmeans_dict, 'sqeuclidean')
d = np.zeros(N)
for i in range(N):
d[i] = np.amin(D[i, np.arange(N) != i])
gamma = 1/(2*np.amin(d))
return gamma
# Implements variation of SGD (optionally with momentum)
class mySGD(Optimizer):
def __init__(self, params, c, alpha1=1e-6, alpha2=1e-8, momentum=0, dampening=0, weight_decay=0, nesterov=False):
defaults = dict(alpha1=alpha1, alpha2=alpha2, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
super(mySGD, self).__init__(params, defaults)
self.c = c
def __setstate__(self, state):
super(mySGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['step'] += 1
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# cyclical learning rate
t = (np.fmod(state['step']-1, self.c)+1)/self.c
if t <= 0.5:
step_size = (1-2*t)*group['alpha1'] + 2*t*group['alpha2']
else:
step_size = 2*(1-t)*group['alpha2'] + (2*t-1)*group['alpha1']
p.data.add_(-step_size, d_p)
return loss
def get_accuracy(ytrue, ypred, num_classes):
# print(ytrue.shape, ypred.shape)
acc = np.zeros(num_classes)
for i in range(num_classes):
acc[i] = np.sum((ytrue == i)*(ypred == i))/np.sum(ytrue == i)
# print(acc)
return np.mean(acc)
| StarcoderdataPython |
1952783 | import traceback
from typing import Any
from dotenv import find_dotenv, load_dotenv
from fastapi import FastAPI
from pydantic.fields import ModelField
from starlette.responses import PlainTextResponse
import main_dramatiq # type: ignore
load_dotenv(find_dotenv(".env"), verbose=True)
def create_app():
from trainings.runtimes.training_runtime_controller import router as training_runtime_router
from trainings import training_controller
from projects import project_controller
from deployments import deployment_controller
from models import model_controller
from deployments.runtimes import deployment_runtime_controller
app = FastAPI()
app.include_router(project_controller.router)
app.include_router(training_controller.router)
app.include_router(model_controller.router)
app.include_router(deployment_controller.router)
app.include_router(training_runtime_router)
app.include_router(deployment_runtime_controller.router)
app.add_event_handler('startup', on_start_up)
return app
def on_start_up():
from trainings.runtimes.training_runtime_service import TrainingRuntimeService
from docker_repositories import DockerRepositoryService
from trainings import TrainingService
DockerRepositoryService()
TrainingRuntimeService()
TrainingService()
def init_dramatiq():
import dramatiq
from dramatiq.brokers.rabbitmq import RabbitmqBroker
from dramatiq.results.backends import RedisBackend
from dramatiq.results import Results
result_backend = RedisBackend()
broker = RabbitmqBroker()
broker.add_middleware(Results(backend=result_backend))
dramatiq.set_broker(broker)
dramatiq.get_broker().flush_all()
app = create_app()
# Meeting the monkey here ...
# Monkey patch to hide fields from schema as suggested in https://github.com/tiangolo/fastapi/issues/1378 ...
# TODO: Might be obsolete in future
from pydantic import schema
def field_schema(field: ModelField, **kwargs: Any) -> Any:
if field.field_info.extra.get("hidden_from_schema", False):
raise schema.SkipField(f"{field.name} field is being hidden")
else:
return original_field_schema(field, **kwargs)
original_field_schema = schema.field_schema
schema.field_schema = field_schema
# TODO Remove on production
@app.exception_handler(Exception)
async def http_exception_handler(request, exc: Exception):
tb = ''.join(traceback.format_exception(etype=type(exc), value=exc, tb=exc.__traceback__))
return PlainTextResponse("Exception [%s]: %s\n%s" % (type(exc).__name__, str(exc), tb), status_code=500)
| StarcoderdataPython |
70049 | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import importlib
import sys
import time
from collections import defaultdict, deque
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Set, Tuple, Type, Optional
from .... import oscar as mo
from ....config import Config
from ....core import TileableGraph, ChunkGraph, ChunkGraphBuilder, \
Tileable, TileableType
from ....core.operand import Fetch, Fuse
from ....dataframe.core import DATAFRAME_CHUNK_TYPE
from ....optimization.logical.chunk import optimize as optimize_chunk_graph
from ....optimization.logical.tileable import optimize as optimize_tileable_graph
from ....utils import build_fetch
from ...cluster.api import ClusterAPI
from ...core import BandType
from ...meta.api import MetaAPI
from ..analyzer import GraphAnalyzer
from ..config import task_options
from ..core import Task, TaskResult, TaskStatus, Subtask, SubtaskResult, \
SubtaskStatus, SubtaskGraph, new_task_id
from ..errors import TaskNotExist
class TaskConfigurationActor(mo.Actor):
def __init__(self,
task_conf: Dict[str, Any],
task_processor_cls: Type["TaskProcessor"] = None):
for name, value in task_conf.items():
setattr(task_options, name, value)
self._task_processor_cls = task_processor_cls
def get_config(self):
return {
'task_options': task_options,
'task_processor_cls': self._task_processor_cls
}
class TaskProcessor:
__slots__ = '_task', 'tileable_graph', 'tile_context', \
'_config', 'tileable_optimization_records', \
'chunk_optimization_records_list', '_done'
tile_context: Dict[Tileable, Tileable]
def __init__(self,
task: Task,
tiled_context: Dict[Tileable, Tileable] = None,
config: Config = None):
self._task = task
self.tileable_graph = task.tileable_graph
self._config = config
self.tile_context = tiled_context
self.tileable_optimization_records = None
self.chunk_optimization_records_list = []
self._done = asyncio.Event()
def optimize(self) -> TileableGraph:
"""
Optimize tileable graph.
Returns
-------
optimized_graph: TileableGraph
"""
if self._config.optimize_tileable_graph:
# enable optimization
self.tileable_optimization_records = \
optimize_tileable_graph(self.tileable_graph)
return self.tileable_graph
def tile(self, tileable_graph: TileableGraph) -> Iterable[ChunkGraph]:
"""
Generate chunk graphs
Returns
-------
chunk_graph_generator: Generator
Chunk graphs.
"""
# iterative chunk graph builder
chunk_graph_builder = ChunkGraphBuilder(
tileable_graph, fuse_enabled=self._task.fuse_enabled,
tile_context=self.tile_context)
optimize = self._config.optimize_chunk_graph
meta_updated = set()
for chunk_graph in chunk_graph_builder.build():
# optimize chunk graph
if optimize:
self.chunk_optimization_records_list.append(
optimize_chunk_graph(chunk_graph))
yield chunk_graph
# update tileables' meta
self._update_tileables_params(tileable_graph, meta_updated)
@property
def done(self) -> bool:
return self._done.is_set()
@done.setter
def done(self, is_done: bool):
if is_done:
self._done.set()
else: # pragma: no cover
self._done.clear()
def get_tiled(self, tileable):
tileable = tileable.data if hasattr(tileable, 'data') else tileable
return self.tile_context[tileable]
@classmethod
def _update_tileable_params(cls,
tileable: TileableType,
tiled: TileableType):
tiled.refresh_params()
tileable.params = tiled.params
def _update_tileables_params(self,
tileable_graph: TileableGraph,
updated: Set[TileableType]):
for tileable in tileable_graph:
if tileable in updated:
continue
tiled_tileable = self.tile_context.get(tileable)
if tiled_tileable is not None:
self._update_tileable_params(tileable, tiled_tileable)
updated.add(tileable)
def __await__(self):
return self._done.wait().__await__()
class BandQueue:
def __init__(self):
self._queue = deque()
self._has_data = asyncio.Event()
def put(self, subtask: Optional[Subtask]):
self._queue.appendleft(subtask)
self._has_data.set()
def get(self) -> Subtask:
subtask = self._queue.popleft()
if len(self._queue) == 0:
self._has_data.clear()
return subtask
def __await__(self):
return self._has_data.wait().__await__()
class SubtaskGraphScheduler:
def __init__(self,
subtask_graph: SubtaskGraph,
bands: List[BandType],
task_stage_info: "TaskStageInfo",
meta_api: MetaAPI,
scheduling_api=None):
self._subtask_graph = subtask_graph
self._bands = bands
self._task_stage_info = task_stage_info
self._meta_api = meta_api
self._scheduling_api = scheduling_api
# gen subtask_id to subtask
self._subtask_id_to_subtask = {subtask.subtask_id: subtask
for subtask in subtask_graph}
self._subtask_to_bands: Dict[Subtask, BandType] = dict()
self._subtask_to_results: Dict[Subtask, SubtaskResult] = dict()
self._band_manager: Dict[BandType, mo.ActorRef] = dict()
self._band_queue: Dict[BandType, BandQueue] = defaultdict(BandQueue)
self._band_schedules = []
self._done = asyncio.Event()
self._cancelled = asyncio.Event()
async def _get_band_subtask_manager(self, band: BandType):
from ..worker.subtask import BandSubtaskManagerActor
if band in self._band_manager:
return self._band_manager[band]
manger_ref = await mo.actor_ref(
band[0], BandSubtaskManagerActor.gen_uid(band[1]))
self._band_manager[band] = manger_ref
return manger_ref
@functools.lru_cache(30)
def _calc_expect_band(self, inp_subtasks: Tuple[Subtask]):
if len(inp_subtasks) == 1 and inp_subtasks[0].virtual:
# virtual node, get predecessors of virtual node
calc_subtasks = self._subtask_graph.predecessors(inp_subtasks[0])
else:
calc_subtasks = inp_subtasks
# calculate a expect band
sorted_size_inp_subtask = sorted(
calc_subtasks, key=lambda st: self._subtask_to_results[st].data_size,
reverse=True)
expect_bands = [self._subtask_to_bands[subtask]
for subtask in sorted_size_inp_subtask]
return expect_bands
def _get_subtask_band(self, subtask: Subtask):
if subtask.expect_band is not None:
# start, already specified band
self._subtask_to_bands[subtask] = band = subtask.expect_band
return band
else:
inp_subtasks = self._subtask_graph.predecessors(subtask)
# calculate a expect band
expect_bands = self._calc_expect_band(tuple(inp_subtasks))
subtask.expect_bands = expect_bands
self._subtask_to_bands[subtask] = band = subtask.expect_band
return band
async def _direct_submit_subtasks(self, subtasks: List[Subtask]):
for subtask in subtasks[::-1]:
band = self._get_subtask_band(subtask)
# push subtask to queues
self._band_queue[band].put(subtask)
async def _schedule_subtasks(self, subtasks: List[Subtask]):
if self._scheduling_api is not None:
return await self._scheduling_api.submit_subtasks(
subtasks, [subtask.priority for subtask in subtasks])
else:
return await self._direct_submit_subtasks(subtasks)
async def _update_chunks_meta(self, chunk_graph: ChunkGraph):
get_meta = []
chunks = chunk_graph.result_chunks
for chunk in chunks:
if isinstance(chunk.op, Fuse):
chunk = chunk.chunk
fields = list(chunk.params)
if isinstance(chunk, DATAFRAME_CHUNK_TYPE):
fields.remove('dtypes')
fields.remove('columns_value')
get_meta.append(self._meta_api.get_chunk_meta.delay(
chunk.key, fields=fields))
metas = await self._meta_api.get_chunk_meta.batch(*get_meta)
for chunk, meta in zip(chunks, metas):
chunk.params = meta
async def set_subtask_result(self, result: SubtaskResult):
subtask_id = result.subtask_id
subtask = self._subtask_id_to_subtask[subtask_id]
self._subtask_to_results[subtask] = result
all_done = len(self._subtask_to_results) == len(self._subtask_graph)
error_or_cancelled = result.status in (SubtaskStatus.errored, SubtaskStatus.cancelled)
if all_done or error_or_cancelled:
if all_done and not error_or_cancelled:
# subtask graph finished, update result chunks' meta
await self._update_chunks_meta(
self._task_stage_info.chunk_graph)
self._schedule_done()
self.set_task_info(result.error, result.traceback)
return
# push success subtasks to queue if they are ready
to_schedule_subtasks = []
for succ_subtask in self._subtask_graph.successors(subtask):
if succ_subtask in self._subtask_to_results:
continue
pred_subtasks = self._subtask_graph.predecessors(succ_subtask)
if all(pred_subtask in self._subtask_to_results
for pred_subtask in pred_subtasks):
# all predecessors finished
to_schedule_subtasks.append(succ_subtask)
await self._schedule_subtasks(to_schedule_subtasks)
def _schedule_done(self):
self._done.set()
for q in self._band_queue.values():
# put None into queue to indicate done
q.put(None)
# cancel band schedules
if not self._cancelled.is_set():
_ = [schedule.cancel() for schedule in self._band_schedules]
async def _run_subtask(self,
subtask_runner,
subtask: Subtask,
tasks: Dict):
try:
await subtask_runner.run_subtask(subtask)
except: # noqa: E722 # pragma: no cover # pylint: disable=bare-except
_, err, traceback = sys.exc_info()
subtask_result = SubtaskResult(
subtask_id=subtask.subtask_id,
session_id=subtask.session_id,
task_id=subtask.task_id,
status=SubtaskStatus.errored,
error=err,
traceback=traceback)
await self.set_subtask_result(subtask_result)
del tasks[subtask]
@contextmanager
def _ensure_done_set(self):
try:
yield
finally:
self._done.set()
async def _schedule_band(self, band: BandType):
with self._ensure_done_set():
manager_ref = await self._get_band_subtask_manager(band)
tasks = dict()
q = self._band_queue[band]
while not self._done.is_set():
# wait for data
try:
# await finish when sth enqueued.
# note that now we don't get subtask from queue,
# just ensure the process can be continued.
# since the slot is released after subtask runner
# notifies task manager, we will get subtask when slot released,
# so that subtask with higher priority is fetched
await q
except asyncio.CancelledError:
pass
if not self._cancelled.is_set():
try:
subtask_runner = await manager_ref.get_free_slot()
except asyncio.CancelledError:
subtask_runner = None
else:
subtask_runner = None
# now get subtask, the subtask that can run with higher priority
# has been pushed before slot released
subtask = q.get()
done = subtask is None or self._done.is_set() or self._cancelled.is_set()
if done and subtask_runner:
# finished or cancelled, given back slot
await manager_ref.mark_slot_free(subtask_runner)
if self._cancelled.is_set():
# force to free running slots
free_slots = []
for subtask_runner, _ in tasks.values():
free_slots.append(
manager_ref.free_slot(subtask_runner))
await asyncio.gather(*free_slots)
elif not done:
coro = self._run_subtask(subtask_runner, subtask, tasks)
tasks[subtask] = (subtask_runner, asyncio.create_task(coro))
# done, block until all tasks finish
if not self._cancelled.is_set():
await asyncio.gather(*[v[1] for v in tasks.values()])
async def schedule(self):
if self._scheduling_api is None:
# use direct submit
for band in self._bands:
self._band_schedules.append(
asyncio.create_task(self._schedule_band(band)))
# schedule independent subtasks
indep_subtasks = list(self._subtask_graph.iter_indep())
await self._schedule_subtasks(indep_subtasks)
# wait for completion
await self._done.wait()
# wait for schedules to complete
await asyncio.gather(*self._band_schedules)
async def cancel(self):
if self._done.is_set():
# already finished, ignore cancel
return
self._cancelled.set()
_ = [s.cancel() for s in self._band_schedules]
await asyncio.gather(*self._band_schedules)
self._done.set()
def set_task_info(self, error=None, traceback=None):
self._task_stage_info.task_result = TaskResult(
self._task_stage_info.task_id, self._task_stage_info.task.session_id,
TaskStatus.terminated, error=error, traceback=traceback)
@dataclass
class TaskInfo:
task_id: str
task_name: str
session_id: str
tasks: List[Task]
task_processors: List[TaskProcessor]
aio_tasks: List[asyncio.Task]
task_stage_infos: List["TaskStageInfo"]
def __init__(self,
task_id: str,
task_name: str,
session_id: str):
self.task_id = task_id
self.task_name = task_name
self.session_id = session_id
self.tasks = []
self.task_processors = []
self.aio_tasks = []
self.task_stage_infos = []
@property
def task_result(self):
for task_stage in self.task_stage_infos:
if task_stage.task_result.error is not None:
return task_stage.task_result
# all succeeded, return the last task result
return self.task_stage_infos[-1].task_result
@dataclass
class TaskStageInfo:
task_id: str
task_info: TaskInfo
task: Task
chunk_graph: ChunkGraph = None
task_result: TaskResult = None
subtask_graph: SubtaskGraph = None
subtask_graph_scheduler: SubtaskGraphScheduler = None
subtask_results: Dict[str, SubtaskResult] = None
def __init__(self,
task_id: str,
task_info: TaskInfo,
task: Task):
self.task_id = task_id
self.task_info = task_info
self.task = task
self.task_result = TaskResult(
task_id, task_info.session_id, TaskStatus.pending)
self.subtask_results = dict()
@dataclass
class ResultTileableInfo:
tileable: Tileable
processor: TaskProcessor
class TaskManagerActor(mo.Actor):
_task_name_to_task_info: Dict[str, TaskInfo]
_task_id_to_task_info: Dict[str, TaskInfo]
_task_id_to_task_stage_info: Dict[str, TaskStageInfo]
_tileable_key_to_info: Dict[str, List[ResultTileableInfo]]
def __init__(self,
session_id,
use_scheduling=True):
self._session_id = session_id
self._config = None
self._task_processor_cls = None
self._task_name_to_task_info = dict()
self._task_id_to_task_info = dict()
self._task_id_to_task_stage_info = dict()
self._tileable_key_to_info = defaultdict(list)
self._meta_api = None
self._cluster_api = None
self._use_scheduling = use_scheduling
self._last_idle_time = None
async def __post_create__(self):
self._meta_api = await MetaAPI.create(self._session_id, self.address)
self._cluster_api = await ClusterAPI.create(self.address)
# get config
configuration_ref = await mo.actor_ref(
TaskConfigurationActor.default_uid(),
address=self.address)
task_conf = await configuration_ref.get_config()
self._config, self._task_processor_cls = \
task_conf['task_options'], task_conf['task_processor_cls']
self._task_processor_cls = self._get_task_processor_cls()
async def _get_available_band_slots(self) -> Dict[BandType, int]:
return await self._cluster_api.get_all_bands()
@staticmethod
def gen_uid(session_id):
return f'{session_id}_task_manager'
async def submit_tileable_graph(self,
graph: TileableGraph,
task_name: str = None,
fuse_enabled: bool = None,
extra_config: dict = None) -> str:
self._last_idle_time = None
if task_name is None:
task_id = task_name = new_task_id()
elif task_name in self._task_name_to_main_task_info:
# task with the same name submitted before
task_id = self._task_name_to_main_task_info[task_name].task_id
else:
task_id = new_task_id()
if task_name not in self._task_name_to_task_info:
# gen main task which mean each submission from user
task_info = TaskInfo(task_id, task_name, self._session_id)
self._task_name_to_task_info[task_name] = task_info
self._task_id_to_task_info[task_id] = task_info
else:
task_info = self._task_name_to_main_task_info[task_name]
if fuse_enabled is None:
fuse_enabled = self._config.fuse_enabled
# gen task
task = Task(task_id, self._session_id,
graph, task_name,
fuse_enabled=fuse_enabled,
extra_config=extra_config)
task_info.tasks.append(task)
# gen task processor
tiled_context = self._gen_tiled_context(graph)
task_processor = self._task_processor_cls(
task, tiled_context=tiled_context,
config=self._config)
task_info.task_processors.append(task_processor)
# start to run main task
aio_task = asyncio.create_task(
self._process_task(task_processor, task_info, task))
await asyncio.sleep(0)
task_info.aio_tasks.append(aio_task)
return task_id
def _gen_tiled_context(self, graph: TileableGraph):
# process graph, add fetch node to tiled context
tiled_context = dict()
for tileable in graph:
if isinstance(tileable.op, Fetch):
info = self._tileable_key_to_info[tileable.key][0]
tiled = info.processor.tile_context[info.tileable]
tiled_context[tileable] = build_fetch(tiled).data
return tiled_context
def _get_task_processor_cls(self):
if self._task_processor_cls is not None:
assert isinstance(self._task_processor_cls, str)
module, name = self._task_processor_cls.rsplit('.', 1)
return getattr(importlib.import_module(module), name)
else:
return TaskProcessor
async def _process_task(self,
task_processor: TaskProcessor,
task_info: TaskInfo,
task: Task):
loop = asyncio.get_running_loop()
# optimization, run it in executor,
# since optimization may be a CPU intensive operation
tileable_graph = await loop.run_in_executor(None, task_processor.optimize)
chunk_graph_iter = task_processor.tile(tileable_graph)
while True:
task_stage_info = TaskStageInfo(
new_task_id(), task_info, task)
def next_chunk_graph():
try:
return next(chunk_graph_iter)
except StopIteration:
return
future = loop.run_in_executor(None, next_chunk_graph)
try:
chunk_graph = await future
if chunk_graph is None:
break
task_info.task_stage_infos.append(task_stage_info)
task_id = task_stage_info.task_id
self._task_id_to_task_stage_info[task_id] = task_stage_info
except: # noqa: E722 # nosec # pylint: disable=bare-except # pragma: no cover
# something wrong while tiling
_, err, tb = sys.exc_info()
task_stage_info.task_result.status = TaskStatus.terminated
task_stage_info.task_result.error = err
task_stage_info.task_result.traceback = tb
task_info.task_stage_infos.append(task_stage_info)
task_id = task_stage_info.task_id
self._task_id_to_task_stage_info[task_id] = task_stage_info
break
task_stage_info.chunk_graph = chunk_graph
# get subtask graph
available_bands = await self._get_available_band_slots()
analyzer = GraphAnalyzer(chunk_graph, available_bands,
task.fuse_enabled, task.extra_config,
task_stage_info)
subtask_graph = analyzer.gen_subtask_graph()
task_stage_info.subtask_graph = subtask_graph
# schedule subtask graph
# TODO(qinxuye): pass scheduling API to scheduler when it's ready
subtask_scheduler = SubtaskGraphScheduler(
subtask_graph, list(available_bands), task_stage_info,
self._meta_api)
task_stage_info.subtask_graph_scheduler = subtask_scheduler
await subtask_scheduler.schedule()
# iterative tiling and execution finished,
# set task processor done
for tileable in tileable_graph.result_tileables:
info = ResultTileableInfo(tileable=tileable,
processor=task_processor)
self._tileable_key_to_info[tileable.key].append(info)
task_processor.done = True
@classmethod
async def _wait_for(cls, task_info: TaskInfo):
processors = task_info.task_processors
aio_tasks = task_info.aio_tasks
await asyncio.gather(*processors, *aio_tasks)
return task_info.task_result
async def _wait_task(self, task_id: str, timeout=None):
try:
task_info = self._task_id_to_task_info[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
if timeout is None:
return await self._wait_for(task_info)
loop = asyncio.get_running_loop()
future = loop.create_future()
task = asyncio.create_task(self._wait_for(task_info))
def cb(_):
try:
future.set_result(None)
except asyncio.InvalidStateError: # pragma: no cover
pass
task.add_done_callback(cb)
try:
await asyncio.wait_for(future, timeout)
return await task
except asyncio.TimeoutError:
return
async def wait_task(self,
task_id: str,
timeout: int = None):
# return coroutine to not block task manager
return self._wait_task(task_id, timeout=timeout)
async def _cancel_task(self, task_info: TaskInfo):
# cancel all stages
coros = [task_stage_info.subtask_graph_scheduler.cancel()
for task_stage_info in task_info.task_stage_infos]
await asyncio.gather(*coros)
async def cancel_task(self, task_id: str):
try:
task_info = self._task_id_to_task_info[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
# return coroutine to not block current actor
return self._cancel_task(task_info)
def get_task_result(self, task_id: str):
try:
return self._task_id_to_task_info[task_id].task_result
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
def get_task_result_tileables(self, task_id: str):
try:
task_info = self._task_id_to_task_info[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
processor = task_info.task_processors[-1]
tileable_graph = processor.tileable_graph
result = []
for result_tilable in tileable_graph.result_tileables:
tiled = processor.get_tiled(result_tilable)
result.append(build_fetch(tiled))
return result
async def set_subtask_result(self, subtask_result: SubtaskResult):
try:
task_stage_info = self._task_id_to_task_stage_info[subtask_result.task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {subtask_result.task_id} does not exist')
task_stage_info.subtask_results[subtask_result.subtask_id] = subtask_result
if subtask_result.status.is_done:
await task_stage_info.subtask_graph_scheduler.set_subtask_result(subtask_result)
def get_task_progress(self, task_id: str) -> float:
# first get all processors
try:
task_info = self._task_id_to_task_info[task_id]
except KeyError: # pragma: no cover
raise TaskNotExist(f'Task {task_id} does not exist')
tiled_percentage = 0.0
for task_processor in task_info.task_processors:
# get tileable proportion that is tiled
tileable_graph = task_processor.tileable_graph
tileable_context = task_processor.tile_context
tiled_percentage += len(tileable_context) / len(tileable_graph)
tiled_percentage /= len(task_info.task_processors)
# get progress of stages
subtask_progress = 0.0
for stage in task_info.task_stage_infos:
n_subtask = len(stage.subtask_graph)
progress = sum(result.progress for result
in stage.subtask_results.values())
subtask_progress += progress / n_subtask
subtask_progress /= len(task_info.task_stage_infos)
return subtask_progress * tiled_percentage
def last_idle_time(self):
if self._last_idle_time is None:
for task_info in self._task_id_to_task_info.values():
for task_processor in task_info.task_processors:
if not task_processor.done:
break
else:
for stage in task_info.task_stage_infos:
if stage.task_result.status != TaskStatus.terminated:
break
else:
continue
break
else:
self._last_idle_time = time.time()
return self._last_idle_time
| StarcoderdataPython |
20244 | <reponame>cloudify-cosmo/cloudify-manager-blueprints
#!/usr/bin/env python
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
# Most images already ship with the following packages:
#
# python-setuptools
# python-backports
# python-backports-ssl_match_hostname
#
# - as they are dependencies of cloud-init, which is extremely popular.
#
# However, cloud-init is irrelevant for certain IaaS (such as vSphere) so
# images used there may not have these packages preinstalled.
#
# We're currently considering whether to include these libraries in the
# manager resources package. Until then, we only validate that they're
# preinstalled, and if not - instruct the user to install them.
missing_packages = set()
for pkg in ['python-setuptools',
'python-backports',
'python-backports-ssl_match_hostname']:
ctx.logger.info('Ensuring {0} is installed'.format(pkg))
is_installed = utils.RpmPackageHandler.is_package_installed(pkg)
if not is_installed:
missing_packages.add(pkg)
if missing_packages:
ctx.abort_operation('Prerequisite packages missing: {0}. '
'Please ensure these packages are installed and '
'try again'.format(', '.join(missing_packages)))
| StarcoderdataPython |
5157005 | <gh_stars>10-100
from flask import Flask
from .root import root
from .maskmap import maskmap
from .indonesia import indonesia
def register_route(app: Flask):
app.register_blueprint(root, url_prefix='/')
app.register_blueprint(maskmap, url_prefix='/maskmap')
app.register_blueprint(indonesia, url_prefix='/id')
| StarcoderdataPython |
5063083 | """Implementations for metadata analysis are kept here."""
| StarcoderdataPython |
4822491 | <reponame>yup8j/ScholarPaperManagement
from backend.resources import *
from backend.handlers.InfoHandler import *
from flask_jwt_extended import jwt_required, get_jwt_identity
class GetInfo(API):
@jwt_required
def post(self):
"""
:return:
"""
''' 用户鉴权:获得userid '''
request.get_json(force=True)
userid = get_jwt_identity()
''' 获取参数中的document_id '''
parse = reqparse.RequestParser()
parse.add_argument('document_id', type=str)
args = parse.parse_args()
document_id = args['document_id']
''' 调用handler获得metadata和topic '''
docInfo, topic_list, topic_name = getInfo(userid, document_id)
metadata = docInfo.metadata
''' 封装返回响应报文 '''
author_list = ",".join(metadata.author)
resp = jsonify({
'title': metadata.title,
'author': author_list,
'year': metadata.publish_date,
'source': metadata.publish_source,
'score': metadata.user_score,
'paper_id': metadata.paper_id,
'link': metadata.link_url,
'topic': topic_list,
'topic_name': topic_name
})
self.response = make_response(resp)
self.response.status_code = 200
return self.response
class EditInfo(API):
@jwt_required
def post(self):
"""
"""
''' 用户鉴权:获得userid '''
userid = get_jwt_identity()
req_json = request.get_json(force=True)
editInfo(userid, req_json)
self.response = {
'status_code': 200,
'msg': 'ok'
}
return self.response
| StarcoderdataPython |
3589186 | <filename>CameraLib/cameraPi.py
import io
import time
import picamera
from picamera.array import PiRGBArray
from .baseCamera import BaseCamera
from IotLib.log import Log
class Camera(BaseCamera):
width = 1280
height = 720
def __init__(self, width=1280, height=720, crosshair=False):
""" initialize a PiCam with specified width and height """
Camera.width = width
Camera.height = height
self.cameraReady = False
try:
with picamera.PiCamera() as camera:
self.cameraReady = True
super(Camera, self).__init__(width, height, crosshair=crosshair)
except:
Log.error("Failed to initialize picamera")
@staticmethod
def frames():
with picamera.PiCamera() as camera:
# let camera warm up
time.sleep(1)
camera.resolution = (Camera.width, Camera.height)
rawCapture = PiRGBArray(camera, size=(Camera.width, Camera.height))
for _ in camera.capture_continuous(rawCapture, 'bgr', use_video_port=True):
img = rawCapture.array
yield img
# reset rawCapture for next frame
rawCapture.truncate(0)
def isOpened(self):
""" whether the camera is ready and availabe """
return self.cameraReady
@staticmethod
def createCamera(config):
""" create a Camera using settings defined in config """
width = config.getOrAddInt('camera.width', 1280)
height = config.getOrAddInt('camera.height', 720)
crosshair = config.getOrAddBool('camera.drawCrosshair', 'true')
camera = Camera(width=width, height=height, crosshair=crosshair)
return camera
| StarcoderdataPython |
12837444 | import numpy as np
import reciprocalspaceship as rs
import tensorflow as tf
from tensorflow import keras as tfk
from IPython import embed
class SelfAttentionBlock(tfk.layers.Layer):
def __init__(self, attention_dims, num_heads, ff_dims=None):
super().__init__()
attention_dims = attention_dims
if ff_dims is None:
ff_dims = attention_dims
self.ff_dims = ff_dims
self.att = tfk.layers.MultiHeadAttention(
num_heads = num_heads,
key_dim = attention_dims,
value_dim = attention_dims,
output_shape = attention_dims,
)
self.ff = tfk.Sequential([
tfk.layers.Dense(ff_dims, activation='ReLU', kernel_initializer='identity'),
tfk.layers.Dense(attention_dims, kernel_initializer='identity'),
])
self.layer_norm = tfk.layers.LayerNormalization()
def build(self, shapes):
mask = shapes[-1]
self.reflections_per_image = mask[-1]
def call(self, inputs):
qkv, mask = inputs
attention_mask = mask@tf.transpose(mask, [0, 2, 1])
out = self.att(qkv, qkv, qkv, attention_mask)
out = self.layer_norm(out + qkv)
out = self.layer_norm(self.ff(out) + out)
out = out*mask
return out, mask
class Assigner(tfk.models.Model):
def __init__(self, attention_blocks, attention_dims, num_heads, ff_dims=None, hmax=50):
super().__init__()
if ff_dims is None:
ff_dims = attention_dims
self.embed = tfk.layers.Dense(attention_dims, kernel_initializer='identity')
self.encoder_layers = []
for i in range(attention_blocks):
self.encoder_layers.append(SelfAttentionBlock(attention_dims, num_heads, ff_dims=ff_dims))
self.decoder_layers = []
self.decoder_layers.append(tfk.layers.Dense(3 * (2*hmax + 1), kernel_initializer='identity'))
self.decoder_layers.append(tfk.layers.Reshape((-1, 3, 2*hmax+1)))
self.decoder_layers.append(tfk.layers.Softmax(axis=-1))
def call(self, inputs):
"""
inputs : (xypos, mask)
"""
qkv, mask = inputs
#Preprocess qkv a bit
qkv = self.embed(qkv)
out = (qkv, mask)
for layer in self.encoder_layers:
out = layer(out)
out = out[0]
for layer in self.decoder_layers:
out = layer(out)
return out
| StarcoderdataPython |
13422 | <filename>core/gf/test.py
import pytest
import server
@pytest.fixture(scope="session")
def authorship_grammar():
with open("test_grammars/Authorship.gf", "r") as f:
abstract = {"content": f.read()}
with open("test_grammars/AuthorshipEng.gf", "r") as f:
inst = {"content": f.read(), "key": "Eng"}
return server.compile_grammar("Authorship", abstract, [inst])
def test_compile_grammar(authorship_grammar):
result = authorship_grammar
print(result)
assert result
langs = result.languages
assert len(langs) == 1
assert "AuthorshipEng" in langs
def test_generation_results(authorship_grammar):
expressions = server.generate_expressions(authorship_grammar)
results = list([(k, server.generate_variants(expressions, concrete))
for k, concrete in authorship_grammar.languages.items()])
print(results)
(_, r0) = results[0]
assert set(r0) == set([
"good {{TITLE}} is authored by {{AUTHOR}}",
"good {{TITLE}} is written by {{AUTHOR}}",
"excellent {{TITLE}} is authored by {{AUTHOR}}",
"excellent {{TITLE}} is written by {{AUTHOR}}",
"{{AUTHOR}} is the author of excellent {{TITLE}}",
"{{AUTHOR}} is the author of good {{TITLE}}",
"{{AUTHOR}} was authored by good {{TITLE}}",
"{{AUTHOR}} was authored by excellent {{TITLE}}",
])
| StarcoderdataPython |
4908689 | <gh_stars>0
from model.utils import *
from model.DL_ClassifierModel import *
dataClass = DataClass('data.txt', validSize=0.2, testSize=0.0, kmers=3)
dataClass.vectorize("char2vec", feaSize=64)
s, f, k, d = 64, 128, 3, 64
model = TextClassifier_SPPCNN(classNum=5, embedding=dataClass.vector['embedding'], SPPSize=s, feaSize=d, filterNum=f, contextSizeList=[1, 3, 5], embDropout=0.3, fcDropout=0.5, useFocalLoss=True, device="cuda")
model.cv_train(dataClass, trainSize=1, batchSize=16, stopRounds=200, earlyStop=10, epoch=100, kFold=5, savePath=f"out/DeepLncLoc_s{s}_f{f}_k{k}_d{d}", report=['ACC', 'MaF', 'MiAUC', 'MaAUC']) | StarcoderdataPython |
9606980 | '''
Author : <NAME>
Codeforces ID : Kazi_Amit_Hasan
Problem: Codeforces 71A (Way Too long words)
'''
class WayTooLongWords:
def solve(self, strInputs):
strOutputs = list()
for line in strInputs:
if (len(line) > 10):
line = line[0] + str(len(line) - 2) + line[-1]
strOutputs.append(line)
return strOutputs
if __name__ == "__main__":
i = 0
strInputs = list()
n = int(input())
while (i < n):
strInputs.append(input())
i += 1
wtlw = WayTooLongWords()
for line_output in wtlw.solve(strInputs):
print (line_output)
| StarcoderdataPython |
3555985 | from PreprocessData.all_class_files.Intangible import Intangible
import global_data
class Brand(Intangible):
def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, aggregateRating=None, logo=None, review=None):
Intangible.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url)
self.aggregateRating = aggregateRating
self.logo = logo
self.review = review
def set_aggregateRating(self, aggregateRating):
self.aggregateRating = aggregateRating
def get_aggregateRating(self):
return self.aggregateRating
def set_logo(self, logo):
self.logo = logo
def get_logo(self):
return self.logo
def set_review(self, review):
self.review = review
def get_review(self):
return self.review
def __setattr__(self, key, value_list):
if type(value_list).__name__ == "NoneType" or key == "node_id":
self.__dict__[key] = value_list
return
for value in value_list:
str_value = type(value).__name__
if str_value not in global_data.get_table()[key]:
raise ValueError("非法类型!")
self.__dict__[key] = value_list
| StarcoderdataPython |
190037 | <reponame>joshlyman/Josh-LeetCode
# refer from:
# https://leetcode.com/problems/flatten-binary-tree-to-linked-list/solution/
# 2. Iterative Morris traversal
class Solution:
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
# Handle the null scenario
if not root:
return None
node = root
while node:
# If the node has a left child
if node.left:
# Find the rightmost node
rightmost = node.left
while rightmost.right:
rightmost = rightmost.right
# rewire the connections
rightmost.right = node.right
node.right = node.left
node.left = None
# move on to the right side of the tree
node = node.right
# Time: O(N)
# Space:O(1)
# 3.Use reversed preorder traversal
# find rightmost node first, then back to left from bottom to top
# preorder is root -> left -> right, here is right -> left -> root
def __init__(self):
self.prev = None
def flatten(self, root):
if not root:
return None
self.flatten(root.right)
self.flatten(root.left)
root.right = self.prev
root.left = None
self.prev = root
# Time: O(N)
# Space:O(1) | StarcoderdataPython |
1873327 | """
Stage class designed to be inherited by PISA Pi services, such that all basic
functionality is built-in.
"""
from __future__ import absolute_import, division
from collections import OrderedDict
from numba import SmartArray
from pisa.core.base_stage import BaseStage
from pisa.core.binning import MultiDimBinning
from pisa.core.container import ContainerSet
from pisa.utils.log import logging
from pisa.utils.format import arg_to_tuple
from pisa.utils.profiler import profile
__all__ = ["PiStage"]
__version__ = "Pi"
__author__ = "<NAME> (<EMAIL>)"
class PiStage(BaseStage):
"""
PISA Pi stage base class. Should be used to implement PISA Pi stages
Specialization should be done via subclasses.
Parameters
----------
data : ContainerSet or None
object to be passed along
params
expected_params
input_names : str, iterable thereof, or None
output_names : str, iterable thereof, or None
debug_mode : None, bool, or str
If ``bool(debug_mode)`` is False, run normally. Otherwise, run in debug
mode. See `pisa.core.base_stage.BaseStage` for more information
error_method : None, bool, or str
If ``bool(error_method)`` is False, run without computing errors.
Otherwise, specifies a particular method for applying arrors.
input_specs : pisa.core.binning.MultiDimBinning, str=='events', or None
Specify the inputs (i.e. what did the last stage output, or None)
calc_specs : pisa.core.binning.MultiDimBinning, str=='events', or None
Specify in what to do the calculation
output_specs : pisa.core.binning.MultiDimBinning, str=='events', or None
Specify how to generate the outputs
input_apply_keys : str, iterable thereof, or None
keys needed by the apply function data (usually 'weights')
output_apply_keys : str, iterable thereof, or None
keys of the output data (usually 'weights')
input_calc_keys : str, iterable thereof, or None
external keys of data the compute function needs
output_calc_keys : str, iterable thereof, or None
output keys of the calculation (not intermediate results)
map_output_key : str or None
When producing outputs as a :obj:`Map`, this key is used to set the nominal
values. If `None` (default), no :obj:`Map` output can be produced.
map_output_error_key : str or None
When producing outputs as a :obj:`Map`, this key is used to set the errors (i.e.
standard deviations) in the :obj:`Map`. If `None` (default), maps will have no
errors.
"""
def __init__(
self,
data=None,
params=None,
expected_params=None,
input_names=None,
output_names=None,
debug_mode=None,
error_method=None,
input_specs=None,
calc_specs=None,
output_specs=None,
input_apply_keys=None,
output_apply_keys=None,
input_calc_keys=None,
output_calc_keys=None,
map_output_key=None,
map_output_error_key=None,
):
super().__init__(
params=params,
expected_params=expected_params,
input_names=input_names,
output_names=output_names,
debug_mode=debug_mode,
error_method=error_method,
)
self.input_specs = input_specs
self.calc_specs = calc_specs
self.output_specs = output_specs
self.map_output_key = map_output_key
self.map_output_error_key = map_output_error_key
self.data = data
if isinstance(self.input_specs, MultiDimBinning):
self.input_mode = "binned"
elif self.input_specs == "events":
self.input_mode = "events"
elif self.input_specs is None:
self.input_mode = None
else:
raise ValueError("Cannot understand `input_specs` %s" % input_specs)
if isinstance(self.calc_specs, MultiDimBinning):
self.calc_mode = "binned"
elif self.calc_specs == "events":
self.calc_mode = "events"
elif self.calc_specs is None:
self.calc_mode = None
else:
raise ValueError("Cannot understand `calc_specs` %s" % calc_specs)
if isinstance(self.output_specs, MultiDimBinning):
self.output_mode = "binned"
elif self.output_specs == "events":
self.output_mode = "events"
elif self.output_specs is None:
self.output_mode = None
else:
raise ValueError("Cannot understand `output_specs` %s" % output_specs)
self.input_calc_keys = arg_to_tuple(input_calc_keys)
self.output_calc_keys = arg_to_tuple(output_calc_keys)
self.input_apply_keys = arg_to_tuple(input_apply_keys)
self.output_apply_keys = arg_to_tuple(output_apply_keys)
# make a string of the modes for convenience
mode = ["N", "N", "N"]
if self.input_mode == "binned":
mode[0] = "B"
elif self.input_mode == "events":
mode[0] = "E"
if self.calc_mode == "binned":
mode[1] = "B"
elif self.calc_mode == "events":
mode[1] = "E"
if self.output_mode == "binned":
mode[2] = "B"
elif self.output_mode == "events":
mode[2] = "E"
self.mode = "".join(mode)
self.param_hash = None
# cake compatibility
self.outputs = None
def setup(self):
# check that data is a ContainerSet (downstream modules assume this)
if self.data is not None:
if not isinstance(self.data, ContainerSet):
raise TypeError("`data` must be a `pisa.core.container.ContainerSet`")
# check that the arrays in `data` is stored as numba `SmartArrays`
# the downstream stages generally assume this
# a common problem is if the user copies data before passing it to th stage then
# a bug in SmartArray means the result is a numoy array, rather than a
# SmartArray
if self.data is not None:
for container in self.data:
for key, array in container.array_data.items():
if not isinstance(array, SmartArray):
raise TypeError(
"Array `%s` in `data` should be a `numba.SmartArray`, but"
" is a %s" % (key, type(array))
)
# call the user-defined setup function
self.setup_function()
# invalidate param hash:
self.param_hash = -1
def setup_function(self):
"""Implement in services (subclasses of PiStage)"""
pass
@profile
def compute(self):
if len(self.params) == 0 and len(self.output_calc_keys) == 0:
return
# simplest caching algorithm: don't compute if params didn't change
new_param_hash = self.params.values_hash
if new_param_hash == self.param_hash:
logging.trace("cached output")
return
self.data.data_specs = self.input_specs
# convert any inputs if necessary:
if self.mode[:2] == "EB":
for container in self.data:
for key in self.input_calc_keys:
container.array_to_binned(key, self.calc_specs)
elif self.mode == "EBE":
for container in self.data:
for key in self.input_calc_keys:
container.binned_to_array(key)
#elif self.mode == "BBE":
# for container in self.data:
# for key in self.input_calc_keys:
# container.binned_to_array(key)
self.data.data_specs = self.calc_specs
self.compute_function()
self.param_hash = new_param_hash
# convert any outputs if necessary:
if self.mode[1:] == "EB":
for container in self.data:
for key in self.output_calc_keys:
container.array_to_binned(key, self.output_specs)
elif self.mode[1:] == "BE":
for container in self.data:
for key in self.output_calc_keys:
container.binned_to_array(key)
def compute_function(self):
"""Implement in services (subclasses of PiStage)"""
pass
@profile
def apply(self):
self.data.data_specs = self.input_specs
# convert any inputs if necessary:
if self.mode[0] + self.mode[2] == "EB":
for container in self.data:
for key in self.input_apply_keys:
container.array_to_binned(key, self.output_specs)
# elif self.mode == 'BBE':
# pass
elif self.mode[0] + self.mode[2] == "BE":
for container in self.data:
for key in self.input_apply_keys:
container.binned_to_array(key)
# if self.input_specs is not None:
# self.data.data_specs = self.input_specs
# else:
self.data.data_specs = self.output_specs
self.apply_function()
if self.mode == "BBE":
for container in self.data:
for key in self.output_apply_keys:
container.binned_to_array(key)
def apply_function(self):
"""Implement in services (subclasses of PiStage)"""
pass
def run(self, inputs=None):
if not inputs is None:
raise ValueError("PISA pi requires there not be any inputs.")
self.compute()
self.apply()
return None
def get_outputs(self, output_mode=None, force_standard_output=True):
"""Get the outputs of the PISA stage
Depending on `self.output_mode`, this may be a binned object, or the event container itself
add option to force an output mode
force_standard_output: in binned mode, force the return of a single mapset
"""
# Figure out if the user has specifiec an output mode
if output_mode is None:
output_mode = self.output_mode
else:
assert output_mode == 'binned' or output_mode == 'events', 'ERROR: user-specified output mode is unrecognized'
# Handle the binned case
if output_mode == 'binned':
if force_standard_output:
# If we want the error on the map counts to be specified by something
# other than something called "error" use the key specified in map_output_key
# (see pi_resample for an example)
if self.map_output_key:
self.outputs = self.data.get_mapset(
self.map_output_key,
error=self.map_output_error_key,
)
# Very specific case where the output has two keys and one of them is error (compatibility)
elif len(self.output_apply_keys) == 2 and 'errors' in self.output_apply_keys:
other_key = [key for key in self.output_apply_keys if not key == 'errors'][0]
self.outputs = self.data.get_mapset(other_key, error='errors')
# return the first key in output_apply_key as the map output. add errors to the
# map only if "errors" is part of the list of output keys
else:
if 'errors' in self.output_apply_keys:
self.outputs = self.data.get_mapset(self.output_apply_keys[0], error='errors')
else:
self.outputs = self.data.get_mapset(self.output_apply_keys[0])
# More generally: produce one map per output key desired, in a dict
else:
self.outputs = OrderedDict()
for key in self.output_apply_keys:
self.outputs[key] = self.data.get_mapset(key)
# Handle Events mode
elif output_mode == "events":
self.outputs = self.data
# Throw warning that output mode failed
else:
self.outputs = None
logging.warning('pi_stage.py: Cannot create CAKE style output mapset')
return self.outputs
| StarcoderdataPython |
109209 | from app import jwt, app
from werkzeug.exceptions import HTTPException
# 处理全局未授权错误
@jwt.unauthorized_loader
def handle_unauthorized_error(e):
res = { "code": 401, "msg": str(e) }
return res
@app.errorhandler(Exception)
def handle_error(e):
code = 500
msg = str(e)
if isinstance(e, HTTPException):
code = e.code
msg = e.data
res = { "code": code, "msg": msg }
return res
| StarcoderdataPython |
9767792 | <reponame>wjsi/mars<filename>mars/core/entity/__init__.py
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .chunks import Chunk, ChunkData, CHUNK_TYPE
from .core import Entity, EntityData, ENTITY_TYPE
from .executable import ExecutableTuple, _ExecuteAndFetchMixin
from .fuse import FuseChunk, FuseChunkData, FUSE_CHUNK_TYPE
from .objects import (
ObjectChunk,
ObjectChunkData,
Object,
ObjectData,
OBJECT_CHUNK_TYPE,
OBJECT_TYPE,
)
from .output_types import (
OutputType,
register_output_types,
get_output_types,
register_fetch_class,
get_fetch_class,
get_tileable_types,
get_chunk_types,
)
from .tileables import (
Tileable,
TileableData,
TILEABLE_TYPE,
HasShapeTileable,
HasShapeTileableData,
NotSupportTile,
register,
unregister,
)
from .utils import tile, recursive_tile
| StarcoderdataPython |
11252654 | """
Abstract base class and subclasses for producing different embeddings from
standard computer vision models
"""
import tensorflow as tf
import numpy as np
from abc import ABC, abstractmethod
class Predictor(object):
def __init__(self, args):
self.batch_size = args.batch_size
self.model = self.model()
@abstractmethod
def model(self):
pass
def predict(self, batch):
return self.model.predict_on_batch(batch)
class VGG16Predictor(Predictor):
def __init__(self, args):
super().__init__(args)
def model(self):
# build the VGG16 network with false colour start
base_model = tf.keras.applications.VGG16(
weights='imagenet',
include_top=False,
pooling='max'
)
return base_model
predictors_options = {'vgg16': VGG16Predictor}
embedding_sizes = {'vgg16': 512}
| StarcoderdataPython |
4945279 | from typing import Dict
from typing import List
from botocore.paginate import Paginator
class GetOfferingStatus(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListArtifacts(Paginator):
def paginate(self, arn: str, type: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListDeviceInstances(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListDevicePools(Paginator):
def paginate(self, arn: str, type: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class ListDevices(Paginator):
def paginate(self, arn: str = None, filters: List = None, PaginationConfig: Dict = None) -> Dict:
pass
class ListInstanceProfiles(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListJobs(Paginator):
def paginate(self, arn: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListNetworkProfiles(Paginator):
def paginate(self, arn: str, type: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class ListOfferingPromotions(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListOfferingTransactions(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListOfferings(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListProjects(Paginator):
def paginate(self, arn: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class ListRemoteAccessSessions(Paginator):
def paginate(self, arn: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListRuns(Paginator):
def paginate(self, arn: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListSamples(Paginator):
def paginate(self, arn: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListSuites(Paginator):
def paginate(self, arn: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListTests(Paginator):
def paginate(self, arn: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListUniqueProblems(Paginator):
def paginate(self, arn: str, PaginationConfig: Dict = None) -> Dict:
pass
class ListUploads(Paginator):
def paginate(self, arn: str, type: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class ListVPCEConfigurations(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
| StarcoderdataPython |
1852880 | <gh_stars>1000+
"""!
@brief Unit-tests for Oscillatory Neural Network based on Kuramoto model.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest;
# Generate images without having a window appear.
import matplotlib;
matplotlib.use('Agg');
from pyclustering.nnet.tests.sync_templates import SyncTestTemplates;
from pyclustering.nnet import solve_type, conn_type;
from pyclustering.nnet.sync import sync_network, sync_dynamic, sync_visualizer;
from pyclustering.utils import pi;
class SyncUnitTest(unittest.TestCase):
def testCreateNetwork(self):
SyncTestTemplates.templateCreateNetwork(1, False);
SyncTestTemplates.templateCreateNetwork(10, False);
SyncTestTemplates.templateCreateNetwork(55, False);
def testConnectionsApi(self):
SyncTestTemplates.templateConnectionsApi(1, False);
SyncTestTemplates.templateConnectionsApi(5, False);
SyncTestTemplates.templateConnectionsApi(10, False);
def testSyncOrderSingleOscillator(self):
# Check for order parameter of network with one oscillator
network = sync_network(1, 1, ccore=False);
assert network.sync_order() == 1;
def testSyncOrderNetwork(self):
# Check for order parameter of network with several oscillators
network = sync_network(2, 1, ccore=False);
sync_state = 1;
tolerance = 0.1;
network.simulate(50, 20, solve_type.RK4);
assert (abs(network.sync_order() - sync_state) < tolerance) == True;
def testSyncLocalOrderSingleOscillator(self):
network = sync_network(1, 1);
assert network.sync_local_order() == 0;
def testOutputNormalization(self):
network = sync_network(20, 1, ccore=False);
output_dynamic = network.simulate(50, 20, solve_type.RK4);
t = output_dynamic.time;
dyn = output_dynamic.output;
for iteration in range(len(dyn)):
for index_oscillator in range(len(dyn[iteration])):
assert (dyn[iteration][index_oscillator] >= 0);
assert (dyn[iteration][index_oscillator] <= 2.0 * pi);
def testFastSolution(self):
# Check for convergence when solution using fast way of calculation of derivative
SyncTestTemplates.templateSimulateTest(10, 1, solve_type.FAST, False);
def testRK4Solution(self):
# Check for convergence when solution using RK4 function of calculation of derivative
SyncTestTemplates.templateSimulateTest(10, 1, solve_type.RK4, False);
def testLargeNetwork(self):
# Check for convergence of phases in large network - network that contains large number of oscillators
SyncTestTemplates.templateSimulateTest(128, 1, solve_type.FAST, False);
def testOutputDynamicAroundZero(self):
phases = [ [ 0.01, 0.02, 0.04, 6.27, 6.28, 6.25, 0.03] ];
time = [ 10.0 ];
output_sync_dynamic = sync_dynamic(phases, time, None);
assert len(output_sync_dynamic.allocate_sync_ensembles(0.2)) == 1;
assert len(output_sync_dynamic.allocate_sync_ensembles(0.1)) == 1;
phases = [ [ 1.02, 1.05, 1.52, 5.87, 5.98, 5.14] ];
output_sync_dynamic = sync_dynamic(phases, time, None);
assert len(output_sync_dynamic.allocate_sync_ensembles(3.0)) == 1;
assert len(output_sync_dynamic.allocate_sync_ensembles(2.0)) == 1;
def testDynamicSimulationAllToAll(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(10, 1, conn_type.ALL_TO_ALL, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(50, 1, conn_type.ALL_TO_ALL, False);
def testDynamicSimulationGridFour(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(9, 1, conn_type.GRID_FOUR, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(25, 1, conn_type.GRID_FOUR, False);
def testDynamicSimulationGridEight(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(9, 1, conn_type.GRID_FOUR, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(25, 1, conn_type.GRID_FOUR, False);
def testDynamicSimulationBidir(self):
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(5, 1, conn_type.LIST_BIDIR, False);
SyncTestTemplates.templateDynamicSimulationConnectionTypeTest(10, 1, conn_type.LIST_BIDIR, False);
def testTwoOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(2, 1, conn_type.ALL_TO_ALL, False);
def testThreeOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(3, 1, conn_type.ALL_TO_ALL, False);
def testFourOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(4, 1, conn_type.ALL_TO_ALL, False);
def testFiveOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(5, 1, conn_type.ALL_TO_ALL, False);
def testSixOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(6, 1, conn_type.ALL_TO_ALL, False);
def testSevenOscillatorDynamic(self):
SyncTestTemplates.templateDynamicSimulationConvergence(7, 1, conn_type.ALL_TO_ALL, False);
def testOutputDynamicLengthSimulation(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate(10, 10, solution = solve_type.FAST, collect_dynamic = True);
assert len(output_dynamic) == 11; # 10 steps without initial values.
def testOutputDynamicLengthStaticSimulation(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_static(10, 10, solution = solve_type.FAST, collect_dynamic = True);
assert len(output_dynamic) == 11; # 10 steps without initial values.
def testOutputDynamicLengthStaticSimulationWithouCollecting(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_static(10, 10, solution = solve_type.FAST, collect_dynamic = False);
assert len(output_dynamic) == 1; # 10 steps without initial values.
def testOutputDynamicLengthDynamicSimulation(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_dynamic(solution = solve_type.FAST, collect_dynamic = True);
assert len(output_dynamic) > 1;
def testOutputDynamicLengthDynamicSimulationWithoutCollecting(self):
net = sync_network(5, ccore=False);
output_dynamic = net.simulate_dynamic(solution = solve_type.FAST, collect_dynamic = False);
assert len(output_dynamic) == 1;
def testInfoAllicationWithNoSimulation(self):
output_dynamic = sync_dynamic(None, None, None);
ensembles = output_dynamic.allocate_sync_ensembles();
assert ensembles == [];
matrix = output_dynamic.allocate_correlation_matrix();
assert matrix == [];
def testOutputDynamicCalculateOrderParameter(self):
SyncTestTemplates.templateOutputDynamicCalculateOrderParameter(False);
def testOutputDynamicCalculateLocalOrderParameter(self):
SyncTestTemplates.templateOutputDynamicCalculateLocalOrderParameter(False);
def testVisualizerOrderParameterNoFailures(self):
net = sync_network(10, ccore = False);
output_dynamic = net.simulate_static(20, 10, solution = solve_type.FAST, collect_dynamic = True);
sync_visualizer.show_order_parameter(output_dynamic);
sync_visualizer.show_order_parameter(output_dynamic, 0);
sync_visualizer.show_order_parameter(output_dynamic, 5);
sync_visualizer.show_order_parameter(output_dynamic, 5, 20);
def testVisualizeLocalOrderParameterNoFailures(self):
net = sync_network(10, ccore = False);
output_dynamic = net.simulate_static(20, 10, solution = solve_type.FAST, collect_dynamic = True);
sync_visualizer.show_local_order_parameter(output_dynamic, net);
sync_visualizer.show_local_order_parameter(output_dynamic, net, 0);
sync_visualizer.show_local_order_parameter(output_dynamic, net, 5);
sync_visualizer.show_local_order_parameter(output_dynamic, net, 5, 20);
def testVisualizerNoFailures(self):
SyncTestTemplates.templateVisualizerNoFailures(5, 10, False);
| StarcoderdataPython |
11216838 | #Exercício Python 15:
# Escreva um programa que pergunte a quantidade de Km percorridos por um carro alugado e a quantidade de #dias
# pelos quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$60 por dia e R$0,15 #por Km rodado.
print('------------------------------')
print(' LOCADORA DE VEÍCULOS ')
print('------------------------------')
print('| Dia de aluguel R$ 60.00 |')
print('| R$ 0.15 por Km rodado |')
print('------------------------------')
print()
dias = int(input('Quantos dias de aluguel?'))
km = float(input('Quantos kilomêtros foram percorridos? '))
#Calculo
t_dias= dias * 60
t_km = km * 0.15
print()
print('Valor total a pagar R$ {:5.2f}'.format(t_dias + t_km))
print()
print('------ Relatório de despesas ------')
print(f'RS {t_dias:5.2f} referente aos dias de aluguel do veículo')
print(f'R$ {t_km} referente a {km} km rodados') | StarcoderdataPython |
6428625 | <reponame>mltony/nestts
import copy
import numbers
import sys
import topology
from topology import ValidationError
import util
from variable import Variable
class TopologyError(Exception):
'''Topology error. Might indicate that there is a loop in the network.'''
pass
class Connection:
'''Represents a connection between two neurons.
It is considered to belong to {to_id} neuron.
The direction of data flow depends on {reserve} field.
If reserve is False, then the information flows from {from_id} to {to_id}.
'''
def __init__(self):
self.to_id = None
self.from_id = None
self.reverse = False
self.weight = Variable()
def get_variables(self):
return [self.weight]
def __str__(self):
direction = "->"
if self.reverse:
direction = "<-"
return "%d%s%d:%.2f" % (self.from_id, direction, self.to_id, self.weight.value)
class Neuron:
def __init__(self):
self.id = None
self.network = None
self.x = 0.0
self.connections = [] # outgoing connections
self.incoming_connections = []
def get_inputs(self):
outgoing = [c for c in self.connections if c.reverse]
incoming = [c for c in self.incoming_connections if not c.reverse]
return dict(
[(c.from_id, c) for c in incoming] +
[(c.to_id, c) for c in outgoing]
)
def get_outputs(self):
outgoing = [c for c in self.connections if not c.reverse]
incoming = [c for c in self.incoming_connections if c.reverse]
return dict(
[(c.from_id, c) for c in incoming] +
[(c.to_id, c) for c in outgoing]
)
def get_variables(self):
return [v for v in c.get_variables() for c in self.connections]
def precompute(self):
self.inputs = self.get_inputs()
self.outputs = self.get_outputs()
def compute(self):
m = self.inputs
self.x = 0
for id,c in m.items():
self.x += self.network.neurons[id].x * c.weight.value
self.x = util.sigmoid(self.x)
class ArtificialNeuralNetwork:
def __init__(self):
self.id_counter = 1
self.neurons = {}
self.inputs = []
self.outputs = []
def get_next_id(self):
result = self.id_counter
self.id_counter += 1
return result
def create_neuron(self):
id = self.get_next_id()
neuron = Neuron()
neuron.id = id
neuron.network = self
self.neurons[id] = neuron
return neuron
def precompute(self):
[n.precompute() for n in self.neurons.values()]
order = self.topological_sort()
input_ids = set([input.id for input in self.inputs])
self.order = [i for i in order if i not in input_ids]
def topological_sort(self):
outputs = dict([
(neuron.id, set(neuron.outputs.keys()))
for neuron in self.neurons.values()
])
empty = [id for id,d in outputs.items() if len(d) == 0]
order = []
while len(empty) > 0:
i = empty.pop()
order.append(i)
for j in self.neurons[i].inputs.keys():
outputs[j].discard(i)
if len(outputs[j]) == 0:
empty.append(j)
if len(order) != len([neuron for neuron in self.neurons if neuron is not None]):
raise TopologyError("Topological sort failed")
return order[::-1]
def compute(self, input):
assert(len(input) == len(self.inputs))
assert(all([isinstance(x, numbers.Number) for x in input]))
for i in range(len(input)):
self.inputs[i].x = input[i]
# Assume precompute() has been invoked
for id in self.order:
self.neurons[id].compute()
return [output.x for output in self.outputs]
def validate(self):
try:
self.precompute()
except TopologyError as e:
raise ValidationError(e)
for id,neuron in self.neurons.items():
assert(id == neuron.id)
if len(neuron.inputs) == 0:
if neuron not in self.inputs:
pass
#raise ValidationError("Neuron %d has no inputs." % neuron.id)
if len(neuron.outputs) == 0:
if neuron not in self.inputs and neuron not in self.outputs:
pass
#raise ValidationError("Neuron %d has no outputs." % neuronlid)
connected_ids = [c.to_id for c in neuron.connections]
connected_ids += [c.from_id for c in neuron.incoming_connections]
if len(connected_ids) != len(set(connected_ids)):
# There are duplicates
raise ValidationError("Duplicated connections for neuron %d" % neuron.id)
for c in neuron.connections:
if neuron.id != c.from_id:
raise RuntimeError("Invalid graph state: neuron %d connection %d >> %d" % (neuron.id, c.from_id, c.to_id))
if c not in self.neurons[c.to_id].incoming_connections:
raise RuntimeError("Invalid graph state: connection %d >> %d" % (neuron.id, c.to_id))
for c in neuron.incoming_connections:
if neuron.id != c.to_id:
raise RuntimeError("Invalid graph state: neuron %d connection %d << %d" % (neuron.id, c.from_id, c.to_id))
if c not in self.neurons[c.from_id].connections:
raise RuntimeError("Invalid graph state: connection %d << %d" % (neuron.id, c.to_id))
for neuron in self.inputs:
if len(neuron.inputs) > 0:
raise ValidationError("Input neuron %d has an input connection" % neuron.id)
for neuron in self.outputs:
if len(neuron.outputs) > 0:
# Not sure if this should be a valid configuration
#raise ValidationError("Output neuron %d has output connections" % neuron.id))
pass
def connect(self, from_id, to_id, reverse, weight=None):
assert(from_id in self.neurons.keys())
assert(to_id in self.neurons.keys())
assert(from_id != to_id)
c = Connection()
c.from_id = from_id
c.to_id = to_id
c.reverse = reverse
if weight is None:
c.weight.value = 1.0
else:
c.weight = copy.deepcopy(weight)
self.neurons[from_id].connections.append(c)
self.neurons[to_id].incoming_connections.append(c)
return c
def disconnect(self, connection):
assert(connection in self.neurons[connection.from_id].connections)
self.neurons[connection.from_id].connections.remove(connection)
assert(connection not in self.neurons[connection.from_id].connections)
assert(connection in self.neurons[connection.to_id].incoming_connections)
self.neurons[connection.to_id].incoming_connections.remove(connection)
assert(connection not in self.neurons[connection.to_id].incoming_connections)
def split(self, connection):
self.disconnect(connection)
neuron = self.create_neuron()
c1 = self.connect(connection.from_id, neuron.id, connection.reverse, weight=connection.weight)
c2 = self.connect(neuron.id, connection.to_id, connection.reverse)
return (neuron, c1, c2)
def delete(self, neuron):
assert(neuron in self.neurons.values())
for c in neuron.connections + neuron.incoming_connections:
self.disconnect(c)
del self.neurons[neuron.id]
def get_size(self):
return len(self.neurons) - len(self.inputs) - len(self.outputs)
def print(self, f, prefix=""):
print(self.__str__(prefix), file=f)
def __str__(self, prefix=""):
result = ""
#input_ids = [neuron.id for neuron in self.inputs]
for id in self.order:
m = self.neurons[id].inputs
ss = ["%d=%.2f" % (id, c.weight.value) for id,c in m.items()]
result += prefix + "%d: %s" % (id, ",".join(ss))
result += "\n"
return result
class ANNTopology(topology.Topology):
def __init__(self):
super().__init__()
self.network = None
def numerical_optimize(self):
pass
def validate(self):
self.network.validate()
def get_size(self):
return self.network.get_size()
def print(self, f=sys.stdout, prefix=""):
try:
parent_id = self.parent.id
parent_id = str(parent_id)
except AttributeError:
parent_id = "None"
attrs = ""
if self.is_winner:
attrs += "WINNER "
if self.is_pivot:
attrs += "pivot "
if self.is_progenitor:
attrs += "progenitor "
print(prefix + "Org %d, parent %s %s EC=%d" % (self.id, parent_id, attrs.strip(), self.explored_count), file=f)
prefix += " "
print(prefix + "Size=%d, Children=%d, PivotHits=%d, ProgenitorHits=%d" % (self.get_size(), len(self.children), self.pivot_hits, self.progenitor_hits), file=f)
print(prefix + "Network:", file=f)
self.network.print(f, prefix=prefix+" ")
print(prefix + "Mutations:", file=f)
mprefix = prefix + " "
for m in self.mutations:
print(mprefix + m, file=f)
class BooleanFunctionTopology(ANNTopology):
'''Represents a problem of a boolean function, such as XOR.'''
# list of (input, output) tuples.
# input and output are vectors of possible inputs and expected outputs.
def __init__(self):
super().__init__()
def compute(self, input):
outputs = self.network.compute(input)
assert(len(outputs) == 1)
output = outputs[0]
assert(0 <= output <= 1)
#return int(round(output))
return output
def get_fitness(self, random=None):
self.outputs = []
fitness = 0.0
for test_input, test_output in self.tests:
output = self.compute(test_input)
self.outputs.append(output)
fitness +=abs(output - test_output)
fitness = 4.0- fitness
assert(0 <= fitness <= 4)
fitness **= 2
fitness -= self.NEURON_COST *len(self.network.neurons)
if fitness >= self.fitness_threshold:
self.is_winner = True
return fitness
def print(self, f=sys.stdout, prefix=""):
super().print(f, prefix=prefix)
prefix += " "
if self.parent is not None:
parent_fitness = self.parent.fitness
fitness_increment = self.fitness - self.parent.fitness
fitness_increment_str = "increment_p: %f" % fitness_increment
else:
fitness_increment_str = ""
fitness_increment_original_str = "increment_o: %f" % (self.fitness - self.original_fitness)
print(prefix + "Fitness: %.2f %s %s" % (self.fitness, fitness_increment_str, fitness_increment_original_str ), file=f)
outputs_str = ["%.2f" % x for x in self.outputs]
print(prefix + "Outputs: %s" % ", ".join(outputs_str), file=f)
| StarcoderdataPython |
3483149 | import sys
import numpy as np
from .skeleton import Skeleton, Bone
from . import posquat as pq
fbxsdkpath = r'D:\Software\fbx_python37_x64'
if fbxsdkpath not in sys.path:
sys.path.append(fbxsdkpath)
import FbxCommon as fb
import fbx
def find_mesh_node(pScene):
def _get_mesh(pNode):
if isinstance(pNode.GetNodeAttribute(), fbx.FbxMesh):
return pNode
for i in range(pNode.GetChildCount()):
ret = _get_mesh(pNode.GetChild(i))
if ret:
return ret
node = _get_mesh(pScene.GetRootNode())
if node :
return node
return None
def read_vertices_buffer(lMeshNode):
lMesh = lMeshNode.GetNodeAttribute()
lControlPointsCount = lMesh.GetControlPointsCount()
lControlPoints = lMesh.GetControlPoints()
m = lMeshNode.EvaluateGlobalTransform()
# 3pos, 3normal
vertexstride = 6
vertices = np.zeros((lControlPointsCount, vertexstride), dtype=np.float32)
for i in range(lControlPointsCount):
# get positions
vertices[i, :3] = list(m.MultT(lControlPoints[i]))[:3]
# get normals
for j in range(lMesh.GetLayerCount()):
leNormals = lMesh.GetLayer(j).GetNormals()
if leNormals:
if leNormals.GetMappingMode() == fbx.FbxLayerElement.eByControlPoint:
if leNormals.GetReferenceMode() == fbx.FbxLayerElement.eDirect:
vertices[i, 3:6] = list(m.MultT(leNormals.GetDirectArray().GetAt(i)))[:3]
return vertices
def read_index_buffer(lMeshNode):
lMesh = lMeshNode.GetNodeAttribute()
lPolygonCount = lMesh.GetPolygonCount()
faces = np.zeros(lPolygonCount * 10, dtype=np.int)
arrayid = 0
for i in range(lPolygonCount):
lPolygonSize = lMesh.GetPolygonSize(i)
# retriangulate
for j in range(2, lPolygonSize):
faces[arrayid] = lMesh.GetPolygonVertex(i, j - 2)
arrayid += 1
faces[arrayid] = lMesh.GetPolygonVertex(i, j - 1)
arrayid += 1
faces[arrayid] = lMesh.GetPolygonVertex(i, j)
arrayid += 1
return faces[:arrayid]
def read_skeleton(pScene):
skeleton = Skeleton()
def _skel(pNode, pParent):
bone = Bone(pNode.GetName(), pParent)
if pParent > -1:
skeleton.bones[pParent].children.append(bone)
skeleton.bones.append(bone)
boneid = len(skeleton.bones) - 1
m = pNode.EvaluateGlobalTransform()
for i in range(4):
for j in range(4):
skeleton.bindpose[boneid, i, j] = m.Get(i, j)
skeleton.initialpose[boneid, i, j] = m.Get(i, j)
for i in range(pNode.GetChildCount()):
childnode = pNode.GetChild(i)
if isinstance(childnode.GetNodeAttribute(), fbx.FbxMesh) == False:
_skel(childnode, boneid)
lRootNode = pScene.GetRootNode()
_skel(lRootNode.GetChild(0), -1)
#add cop bone
cop = Bone('COP', 0)
skeleton.bones[0].children.append(cop)
skeleton.bones.append(cop)
skeleton.bindpose = skeleton.bindpose[:len(skeleton.bones), :, :]
skeleton.initialpose = skeleton.initialpose[:len(skeleton.bones), :, :]
skeleton.parentlist = [bone.parent for bone in skeleton.bones]
skeleton.upleglength = np.linalg.norm(skeleton.initialpose[skeleton.boneid('Model:LeftUpLeg'), 3, :3] -
skeleton.initialpose[skeleton.boneid('Model:LeftLeg'), 3, :3])
skeleton.leglength = np.linalg.norm(skeleton.initialpose[skeleton.boneid('Model:LeftLeg'), 3, :3] -
skeleton.initialpose[skeleton.boneid('Model:LeftFoot'), 3, :3])
skeleton.hipsid = skeleton.boneid('Model:Hips')
skeleton.leftlegids = [skeleton.boneid('Model:LeftUpLeg'),
skeleton.boneid('Model:LeftLeg'),
skeleton.boneid('Model:LeftFoot')]
skeleton.rightlegids = [skeleton.boneid('Model:RightUpLeg'),
skeleton.boneid('Model:RightLeg'),
skeleton.boneid('Model:RightFoot')]
skeleton.leftfootid = skeleton.leftlegids[-1]
skeleton.rightfootid = skeleton.rightlegids[-1]
skeleton.copid = skeleton.boneid('COP')
skeleton.bindpose[skeleton.copid, ...] = np.eye(4)
skeleton.initialpose[skeleton.copid, ...] = np.eye(4)
skeleton.localinitialpq = skeleton.global_to_local(pq.pose_to_pq(skeleton.initialpose))
return skeleton
def read_bindpose(lMeshNode, skeleton):
lMesh = lMeshNode.GetNodeAttribute()
skin = lMesh.GetDeformer(0,fbx.FbxDeformer.eSkin)
clustercount = skin.GetClusterCount()
for clusterid in range(clustercount):
cluster = skin.GetCluster(clusterid)
linkedNode = cluster.GetLink()
boneid = skeleton.boneid(linkedNode.GetName())
if boneid < 0:
raise Exception('bone {} not found in skeleton'.format(linkedNode.GetName()))
m = fbx.FbxAMatrix()
m = cluster.GetTransformLinkMatrix(m)
m = m.Inverse()
for i in range(4):
for j in range(4):
skeleton.bindpose[boneid,i,j] = m.Get(i,j)
def read_skinning(lMeshNode, skeleton):
lMesh = lMeshNode.GetNodeAttribute()
lControlPointsCount = lMesh.GetControlPointsCount()
weights = np.zeros([lControlPointsCount, 8])
indices = np.zeros([lControlPointsCount, 8], dtype=np.int32)
counts = np.zeros([lControlPointsCount], dtype=np.int32)
skin = lMesh.GetDeformer(0, fbx.FbxDeformer.eSkin)
clustercount = skin.GetClusterCount()
for clusterid in range(clustercount):
cluster = skin.GetCluster(clusterid)
linkedNode = cluster.GetLink()
boneid = skeleton.boneid(linkedNode.GetName())
if boneid < 0:
raise Exception('bone {} not found in skeleton'.format(linkedNode.GetName()))
vertcount = cluster.GetControlPointIndicesCount()
for k in range(vertcount):
vertindex = cluster.GetControlPointIndices()[k]
index = counts[vertindex]
indices[vertindex, index] = boneid
weights[vertindex, index] = cluster.GetControlPointWeights()[k]
counts[vertindex] += 1
ind = np.argsort(weights)[:,-4:]
normalizeweights = np.zeros([lControlPointsCount, 4])
normalizeindices = np.zeros([lControlPointsCount, 4], dtype=np.int32)
for i in range(lControlPointsCount):
normalizeweights[i,:] = weights[i,ind[i]]
normalizeweights[i, :] /= np.sum(normalizeweights[i, :])
normalizeindices[i, :] = indices[i, ind[i]]
return normalizeindices, normalizeweights
def read_animations(pScene, skeleton):
animations = {}
time = fbx.FbxTime()
lRootNode = pScene.GetRootNode()
mapping = {bone.name:lRootNode.FindChild(bone.name,True,True) for bone in skeleton.bones }
for i in range(pScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId))):
lAnimStack = pScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), i)
pScene.SetCurrentAnimationStack(lAnimStack)
start = lAnimStack.LocalStart.Get()
stop = lAnimStack.LocalStop.Get()
name = lAnimStack.GetName()
animlen = stop.GetFrameCount() - start.GetFrameCount() + 1
bonelen = len(skeleton.bones)
animation = np.repeat(skeleton.initialpose[np.newaxis,...], animlen, axis=0)
for frame in range(start.GetFrameCount(), stop.GetFrameCount() + 1):
animframe = frame - start.GetFrameCount()
time.SetFrame(frame)
for boneid in range(bonelen):
bone = skeleton.bones[boneid]
if bone.name in mapping and mapping[bone.name] is not None:
localMatrix = mapping[bone.name].EvaluateGlobalTransform(time)
for i in range(4):
for j in range(4):
animation[animframe, boneid, i, j] = localMatrix.Get(i, j)
animations[name] = animation
return animations
class FbxReader(object):
def __init__(self, path):
lSdkManager, lScene = fb.InitializeSdkObjects()
status = fb.LoadScene(lSdkManager, lScene, path)
if not status:
raise Exception('error in fbx file')
self._scene = lScene
self._mesh = find_mesh_node(self._scene)
self._vertices = None
self._indices = None
self._skinning = None
self._skeleton = None
self._animations = None
def vertices_and_indices(self):
if self._mesh:
if self._vertices is None:
self._vertices = read_vertices_buffer(self._mesh)
if self._indices is None:
self._indices = read_index_buffer(self._mesh)
return self._vertices, self._indices
raise Exception('no mesh')
def skeleton(self):
if self._skeleton is None:
self._skeleton = read_skeleton(self._scene)
if self._mesh:
read_bindpose(self._mesh, self._skeleton)
return self._skeleton
def skinning_indices_weights(self):
if self._mesh:
if self._skinning is None:
self._skinning = read_skinning(self._mesh, self.skeleton())
return self._skinning
raise Exception('no mesh')
def animation_dictionary(self, skeleton=None):
if self._animations is None:
if skeleton is None:
skeleton = self.skeleton()
self._animations = read_animations(self._scene, skeleton)
return self._animations
| StarcoderdataPython |
88818 | <reponame>mlewis1973/pyosirix
# test_dictionary.py
"""Test suite for dicom_dictionary.py"""
# Copyright (c) 2008 <NAME>
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import unittest
from dicom.tag import Tag
from dicom.datadict import DicomDictionary, CleanName, all_names_for_tag, dictionary_description
class DictTests(unittest.TestCase):
def testCleanName(self):
"""dicom_dictionary: CleanName returns correct strings............."""
self.assertTrue(CleanName(0x00100010) == "PatientsName")
self.assertTrue(CleanName(Tag((0x0010, 0x0010))) == "PatientsName")
def testTagNotFound(self):
"""dicom_dictionary: CleanName returns blank string for unknown tag"""
self.assertTrue(CleanName(0x99991111) == "")
def testNameFinding(self):
"""dicom_dictionary: get long and short names for a data_element name"""
names = all_names_for_tag(Tag(0x300a00b2)) # Treatment Machine Name
expected = ['TreatmentMachineName']
self.assertEqual(names, expected, "Expected %s, got %s" % (expected, names))
names = all_names_for_tag(Tag(0x300A0120))
expected = ['BeamLimitingDeviceAngle', 'BLDAngle']
self.assertEqual(names, expected, "Expected %s, got %s" % (expected, names))
def testRepeaters(self):
"""dicom_dictionary: Tags with "x" return correct dict info........"""
self.assertEqual(dictionary_description(0x280400), 'Transform Label')
self.assertEqual(dictionary_description(0x280410), 'Rows For Nth Order Coefficients')
class PrivateDictTests(unittest.TestCase):
def testPrivate1(self):
"""private dict: """
self.assertTrue(CleanName(0x00100010) == "PatientsName")
self.assertTrue(CleanName(Tag((0x0010, 0x0010))) == "PatientsName")
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3348315 | # md5 : 7cf8d5549f3c4b0a7870d0e515a9c033
# sha1 : 2be54bd63ab58118a845ed5fd57ad35ee54301b4
# sha256 : b3094ce056324c5d330849d020e0c3a2e4a7359b17434be3ae13e0f8354ce14d
ord_names = {
1: b'OleUIAddVerbMenuA',
2: b'OleUICanConvertOrActivateAs',
3: b'OleUIInsertObjectA',
4: b'OleUIPasteSpecialA',
5: b'OleUIEditLinksA',
6: b'OleUIChangeIconA',
7: b'OleUIConvertA',
8: b'OleUIBusyA',
9: b'OleUIUpdateLinksA',
10: b'OleUIPromptUserA',
11: b'OleUIObjectPropertiesA',
12: b'OleUIChangeSourceA',
13: b'OleUIAddVerbMenuW',
14: b'OleUIBusyW',
15: b'OleUIChangeIconW',
16: b'OleUIChangeSourceW',
17: b'OleUIConvertW',
18: b'OleUIEditLinksW',
19: b'OleUIInsertObjectW',
20: b'OleUIObjectPropertiesW',
21: b'OleUIPasteSpecialW',
22: b'OleUIPromptUserW',
23: b'OleUIUpdateLinksW',
} | StarcoderdataPython |
4887067 | <filename>trustMonitor/trust_monitor/verifier/ra_verifier.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ra_verifier.py: execute the integrity analyses
#
# Copyright (C) 2014 Politecnico di Torino, Italy
# TORSEC group -- http://security.polito.it
#
# Author: <NAME> <<EMAIL>>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see
# <http://www.gnu.org/licenses/>.
import gc
import os
import sys
import getopt
import string
import traceback
from connection import *
import pycassa
from graph import *
from structs import *
from statistics import *
from aggregation import *
from action import *
from analysis import *
import networkx as nx
from django.conf import settings
from informationDigest import InformationDigest
import logging
# use logging system of django.
logger = logging.getLogger('verifier')
# if graph type is 'auto', RA Verifier determines the best choice depending
# on available information from IMA measurements list
graph_types = ['auto', 'digests', 'lsm', 'lsm+inode', 'lsm+selinux']
class RaVerifier():
def __del__(self):
logger.debug('delete RaVerifier and clean structures')
Digest.digests_dict = {}
Digest.digests_query_done = False
Digest.packages_query_done = False
Digest.packages_query = set()
Package.pkg_dict = {}
Subject.subj_label_dict = {}
Object.obj_label_dict = {}
def __init__(self):
logger.info('Set structures')
Analysis.analysis_list = []
def verifier(self, distro, analysis, infoDigest,
checked_containers, report_id, known_digests, port, ip):
logger.info('In verifier method of RaVerifier.')
cassandraHost = (ip + ':' + port)
logger.info('Define the Cassandra host: %s', cassandraHost)
graph_type = 'auto'
keyspace = 'PackagesDB'
selinux = False
selinux_policy_path = None
results_dir = '.'
graph = nx.DiGraph()
try:
logger.debug('verify conncection cassandra')
conn = DBConnection(keyspace, [cassandraHost])
except pycassa.pool.AllServersUnavailable as e:
logger.error('error connection cassandra %s', e)
return 2
lsm_fields = ['subj', 'obj', 'bprm-subj']
lsm_inode_fields = lsm_fields + ['lw']
if 'check-cert' in analysis:
logger.info('Analysis is check-cert')
for item in analysis.split(','):
if item.startswith('cert_digest'):
add_known_digest(item.split('=')[1])
break
logger.info('Define the type of graph')
if graph_type == 'auto':
if IMARecord.default_template() in ['ima', 'ima-ng',
'ima-cont-id']:
graph_type = 'digests'
elif IMARecord.default_template_contains_fields(lsm_inode_fields):
graph_type = 'lsm+inode'
elif IMARecord.default_template_contains_fields(lsm_fields):
graph_type = 'lsm'
logger.info('The type of graph is %s', str(graph_type))
if graph_type == 'auto':
logger.error('Graph type cannot be determined, exiting.')
return 2
if graph_type == 'digests':
logger.info('Define query to cassandra for graph_type %s',
str(graph_type))
FileTypeAggregation(conn, distro, graph, known_digests)
DBLibrariesAction(conn, distro, graph, known_digests)
logger.info('Aggregation and DBLibrariesAction are done')
# no distinction is possible between code and data
elif graph_type == 'lsm':
LSMLabelAggregation(conn, distro, graph)
LSMLabelLoadAction(conn, distro, graph)
LSMLabelAggregationRunTime(conn, distro, graph)
LSMLabelFlowAction(conn, distro, graph)
elif graph_type == 'lsm+inode':
LSMLabelInodeAggregation(conn, distro, graph)
LSMLabelLoadAction(conn, distro, graph)
LSMLabelInodeFlowAction(conn, distro, graph)
elif graph_type == 'lsm+selinux':
LSMLabelAggregation(conn, distro, graph)
LSMLabelSELinuxAction(conn, distro, graph, selinux_policy_path)
Statistics.set_elapsed_time('time_build_graph')
global_result = True
analysis_name = analysis.split(',')[0]
analysis_params = analysis[len(analysis_name) + 1:]
load_time_requirement = []
load_time_topic = 'code'
load_time_prop_only = True
draw_graph = False
priv_processes_check = True
target = ''
tcb = []
priv_processes = []
cert_digest = None
if analysis_name not in ['load-time', 'run-time', 'load-time+run-time',
'check-cert', 'load-time+check-cert',
'load-time+cont-check']:
logger.error('Unknown analysis %s' % analysis_name)
return 2
for item in analysis_params.split(','):
offset = len(item.split('=')[0]) + 1
if item.startswith('tcb='):
tcb = item[offset:].split('|')
elif item.startswith('target='):
target = item[offset:]
elif item.startswith('draw_graph='):
draw_graph = eval(item[offset:])
elif item.startswith('priv_check='):
priv_processes_check = eval(item[offset:])
elif item.startswith('l_req='):
load_time_requirement = item[offset:].split('|')
elif item.startswith('l_topic='):
load_time_topic = item[offset:]
elif item.startswith('l_prop_only='):
load_time_prop_only = eval(item[offset:])
elif item.startswith('cert_digest='):
cert_digest = item[offset:]
elif item.startswith('cont-list='):
checked_containers = item[offset:]
else:
logger.error('Unknown parameter %s' % item)
return 2
# TCB for graph built with LSM labels and last write information
tcb_init_t_inode = ['sendmail_t', 'initrc_t', 'chronyd_t', 'udev_t',
'systemd_tmpfiles_t', 'getty_t',
'NetworkManager_t']
# TCB for graph build with LSM labels only and open events
tcb_init_t_lsm = tcb_init_t_inode + ['crond_t', 'system_dbusd_t']
# TCB for graph build with LSM labels from execution events and
# interactions inferred from the SELinux policy
tcb_init_t_selinux = tcb_init_t_lsm + ['insmod_t', 'fsadm_t',
'kernel_t', 'mount_t',
'setfiles_t',
'iptables_t', 'netutils_t',
'chkpwd_t', 'ifconfig_t',
'auditctl_t', 'audisp_t',
'policykit_t']
for item in tcb:
if 'demo_inode' in tcb:
tcb.remove('demo_inode')
tcb += tcb_init_t_inode
elif 'demo_lsm'in tcb:
tcb.remove('demo_lsm')
tcb += tcb_init_t_lsm
elif 'demo_selinux' in tcb:
tcb.remove('demo_selinux')
tcb += tcb_init_t_selinux
elif 'predecessors' in tcb:
tcb.remove('predecessors')
if len(target) == 0:
logger.error('Missing target parameter')
return 2
try:
a = ProcTransAnalysis(conn, distro, graph, target=target)
tcb += list(a.get_predecessors(target))
if draw_graph:
a.view_graph()
except Exception as e:
logger.error(e)
return 2
# Perform the ProcWrite analysis to see if some processed changed their
# context or that of the next execve(). If one or more processes are
# found different actions are done depending on the analyses to be
# executed.
# For the load-time analysis, perform the propagation with topic
# code+data
# (the configuration files affect the context written to /proc).
# For the run-time analysis, processes are added to the chosen tcb to
# detect whether an untrusted process tried to compromise their
# integrity.
# Further, if a requirement has been provided for the load-time
# analysis, this is concatenated with a new requirement on privileged
# processes:their severity level must be 'ok' because otherwise it
# would be not possible to correctly associate the code executed and
# configuration files read to subject labels (privileged processes can
# take an arbitrary context).
if priv_processes_check and graph_type != 'digests':
a = ProcWriteAnalysis()
priv_processes = a.get_subj_list()
if ((len(target) > 0 or len(tcb) > 0) and
target not in priv_processes
and len(set(tcb) & set(priv_processes)) == 0):
tcb.extend(priv_processes)
error_message = {}
if 'load-time' in analysis_name:
logger.info('Analysis is load-time')
try:
a = LoadTimeAnalysis(conn, distro, graph,
target=target, tcb=tcb,
results_dir=results_dir,
report_id=report_id,
informationDigest=infoDigest,
known_digests=known_digests)
a.propagate_errors(load_time_topic)
if len(priv_processes) > 0 and 'data' not in load_time_topic:
a.propagate_errors('data', priv_processes)
except Exception as e:
logger.error(e)
return 2
if len(load_time_requirement) > 0:
logger.debug('analysis: %s ', load_time_requirement)
global_result &= a.satisfies_requirement(load_time_requirement,
error_message)
logger.info(
'The value of global_result after satisfies_requirement of'
' analysis process is %s', global_result)
if len(priv_processes) > 0:
global_result &= a.satisfies_requirement_priv(
priv_processes, error_message)
logger.info(
'The value of global result after'
' satisfies_requirement_priv is %s', global_result)
if draw_graph:
a.view_graph(only_prop_true=load_time_prop_only)
Statistics.set_elapsed_time('time_load_time_analysis')
if 'run-time' in analysis_name:
logger.info('Analysis type is run-time')
if IMARecord.default_template() in ['ima', 'ima-ng']:
logger.error('Run-time analysis is not supported for'
' template%s', IMARecord.default_template())
return 2
if len(tcb) == 0 and len(target) == 0:
logger.error(
'Missing parameters (tcb, target) for run-time analysis')
return 2
try:
a = RunTimeAnalysis(conn, distro, graph, target=target,
tcb=tcb, results_dir=results_dir,
report_id=report_id)
except Exception as e:
logger.error(e)
return 2
global_result &= a.satisfies_requirement(error_message)
if draw_graph:
a.view_graph()
Statistics.set_elapsed_time('time_run_time_analysis')
if 'check-cert' in analysis_name:
logger.info('Analysis type is check-cert')
result = cert_digest in Digest.digests_dict.keys()
if not result:
error_message['cert'] = ['not found']
global_result &= result
Statistics.set_current_time('time_total')
logger.info(
'The global result of attestation is: %s' %
('trusted' if global_result else 'untrusted'))
global_result_list = [global_result, infoDigest]
return global_result_list
| StarcoderdataPython |
1792881 | <reponame>Decathlon/decavision<filename>decavision/utils/data_utils.py
import os
from random import shuffle
import sys
import tarfile
import urllib.request
import zipfile
import numpy as np
import PIL
from PIL import Image
import tensorflow as tf
def prepare_image(image_path, target_size, rescaling=255):
"""
Load and convert image to numpy array to feed it to a neural network. Image is resized, converted to RGB
and its pixels are normalized if required. An extra dimension is added to the array.
Arguments:
image_path (str): path to image to be converted
target_size (tuple(int,int)): desired size for the image
rescaling (int): divide all the pixels of the image by this number
Returns:
numpy array: processed image, with shape (1,target_size,3)
"""
image = Image.open(image_path)
# reshape the image
image = image.resize(target_size, PIL.Image.BILINEAR).convert("RGB")
# convert the image into a numpy array, and expend to a size 4 tensor
image = tf.keras.preprocessing.image.img_to_array(image)
image = np.expand_dims(image, axis=0)
# rescale the pixels to a 0-1 range
image = image.astype(np.float32) / rescaling
return image
def check_RGB(path, target_size=None):
"""
Convert all images in a folder into RGB format and resize them if desired. Images that
can't be opened are deleted. Folder must contain a subfolder for each category.
Arguments:
path (str): path to the image directory
target_size (tuple(int,int)): if specified, images are resized to this size
"""
classes = os.listdir(path)
classes_paths = [os.path.abspath(os.path.join(path, i)) for i in classes]
counter = 0
for i in classes_paths:
imgs = os.listdir(i)
imgs_paths = [os.path.abspath(os.path.join(i, j)) for j in imgs]
# Loop through all the images in the path
for img in imgs_paths:
# try to open it
try:
if target_size is not None:
jpg = Image.open(img).resize(target_size, PIL.Image.BILINEAR).convert('RGB')
else:
jpg = Image.open(img).convert('RGB')
jpg.save(str(img))
except:
# delete the file
print('Deleting', img)
os.remove(img)
counter += 1
if counter % 1000 == 1:
print('Verified', counter, 'images')
def create_dir(path):
"""
Check if directory exists and create it if it does not.
Arguments:
path (str): path to directory to create
"""
if not os.path.exists(path):
os.mkdir(path)
def split_train(path='data/image_dataset', split=0.1, with_test=False):
"""
Separate images randomly into a training, a validation and potentially a test dataset.
Images must be located in a folder called train, which contains a subfolder per category.
Val and potentially test folders will be created amd images moved into it from train.
Arguments:
path (str): path to the image_dataset directory
split (float): fraction of each category that we move to the validation (val) subdirectory
with_test (bool): determine if one image of each category is moved to test dataset
"""
# Create a val subdirectory
create_dir(path + '/val')
# Create a test subdirectory
if with_test:
create_dir(path + '/test')
# Loop through all the categories in the train directory
for i in os.listdir(path + '/train'):
# Create the folder in the val subdirectory
create_dir(path + '/val/' + i)
# extract and shuffle all the images
images = os.listdir(path + '/train/' + i)
shuffle(images)
# Move a fraction of the images to the val directory
for j in range(int(split * len(images))):
os.rename(path + '/train/' + i + '/' + images[j], path + '/val/' + i + '/' + images[j])
# Move one of the images to the test directory
if with_test:
# Create the folder in the val subdirectory
create_dir(path + '/test/' + i)
for j in range(int(split * len(images)), 2 * int(split * len(images))):
os.rename(path + '/train/' + i + '/' + images[j], path + '/test/' + i + '/' + images[j])
print('Training dataset has been split.')
def print_download_progress(count, block_size, total_size):
"""
Function used for printing the download progress. Inspired by:
https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/download.py
"""
# Percentage completion.
pct_complete = float(count * block_size) / total_size
# Limit it because rounding errors may cause it to exceed 100%.
pct_complete = min(1.0, pct_complete)
# Status-message. Note the \r which means the line should overwrite itself.
msg = "\r- Download progress: {0:.1%}".format(pct_complete)
# Print it.
sys.stdout.write(msg)
sys.stdout.flush()
def download_dataset(download_dir='data/',
url='http://data.csail.mit.edu/places/places365/places365standard_easyformat.tar'):
"""
Download a dataset in format .zip, .tar, .tar.gz or .tgz and extract the data.
Inspired by: https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/download.py
Arguments:
download_dir (str): folder to store the data
url (str): location of the dataset on the internet
"""
# Filename for saving the file downloaded from the internet.
# Use the filename from the URL and add it to the download_dir.
filename = url.split('/')[-1]
file_path = os.path.join(download_dir, filename)
# Check if the file already exists.
# If it exists then we assume it has also been extracted,
# otherwise we need to download and extract it now.
if not os.path.exists(file_path):
# Check if the download directory exists, otherwise create it.
if not os.path.exists(download_dir):
os.makedirs(download_dir)
# Download the file from the internet.
file_path, _ = urllib.request.urlretrieve(url=url,
filename=file_path,
reporthook=print_download_progress)
print("\n Download finished. Extracting files. \n")
if file_path.endswith(".zip"):
# Unpack the zip-file.
with zipfile.ZipFile(file=file_path, mode="r") as f:
f.extractall(download_dir)
elif file_path.endswith((".tar.gz", ".tgz")):
# Unpack the tar-ball.
with tarfile.open(name=file_path, mode="r:gz") as f:
f.extractall(download_dir)
elif file_path.endswith(".tar"):
# Unpack tar file.
with tarfile.open(name=file_path, mode="r") as f:
f.extractall(download_dir)
print("Done.")
else:
print("Data has apparently already been downloaded and unpacked.")
| StarcoderdataPython |
120729 | """
OpenVINO DL Workbench
Interfaces class for error processing classes
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from typing import Optional
class ErrorMessageProcessor:
general_ie_error_pattern = re.compile(r'(?:\[ ERROR \] (?P<error_message>.*))')
@staticmethod
def _broken_device_message(device_name: str) -> str:
return f'Cannot infer this model on {device_name}. ' \
'Possible causes: Drivers setup failed. Update the drivers or run inference on a CPU.'
@staticmethod
def _unsupported_model(device_name: str = 'this device'):
return f'Cannot infer this model on {device_name}. ' \
'Possible causes: The device does not support some layers of this model. Try to run inference on a CPU.'
match_error = {
'Failed to create plugin .* for device GPU':
_broken_device_message.__func__('Intel(R) Processor Graphics (GPU)'),
'Can not init Myriad device: NC_ERROR':
_broken_device_message.__func__('Intel(R) Movidius(TM) Neural Compute Stick 2 (NCS 2)'),
'Event sending failed':
_broken_device_message.__func__('Intel(R) Movidius(TM) Neural Compute Stick 2 (NCS 2)'),
'Unknown Layer Type:':
_unsupported_model.__func__(),
}
@staticmethod
def _general_error(stage: str = None) -> str:
if stage:
return 'Failed in the stage {}'.format(stage)
return 'Inference Engine failed with unrecognized error'
@staticmethod
def _recognize_general_ie_message(error_message: str) -> Optional[str]:
error_match = ErrorMessageProcessor.general_ie_error_pattern.search(error_message)
return error_match.group('error_message') if error_match else None
@classmethod
def _find_message(cls, error_message: str) -> Optional[str]:
for pattern, message in cls.match_error.items():
pattern = r'.*'.join(pattern.lower().split(' '))
if re.search(r'.*{s}.*'.format(s=pattern), error_message.lower()):
return message
return None
@classmethod
def recognize_error(cls, error_message: str, stage: str = None) -> str:
message = cls._find_message(error_message)
if message:
return message
message = ErrorMessageProcessor._recognize_general_ie_message(error_message)
if message:
return message
message = cls._general_error(stage)
return message
| StarcoderdataPython |
1901771 | name = "langsci"
#__all__ = ['asciify', 'assignproofreaders', 'autoindex', 'bibnouns',
#'delatex', 'doc2tex', 'extractaw','fixindex','langscibibtex','normalizebib','sanitycheck','sanitygit','zenodo']
| StarcoderdataPython |
3402861 | """Entire flask app."""
from datetime import datetime
from flask import Flask, jsonify, request, Response
from flask_sqlalchemy import SQLAlchemy
from flask_httpauth import HTTPTokenAuth
from functools import wraps
import json
import os
from passlib.hash import pbkdf2_sha256 as hasher
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', '')
auth = HTTPTokenAuth(scheme='Token')
db = SQLAlchemy(app)
from .models import Task, Profile
INCOMING_DATE_FMT = '%d/%m/%Y %H:%M:%S'
def forbidden_response():
"""Return an HTTP response when the user is forbidden."""
response = Response(
mimetype="application/json",
response=json.dumps({'error': 'You do not have permission to access this profile.'}),
status=403
)
return response
def notfound_response():
"""Return an HTTP response when a nonexistant profile has been searched for."""
response = Response(
mimetype="application/json",
response=json.dumps({'error': 'The profile does not exist'}),
status=404
)
return response
def get_profile(username):
"""Check if the requested profile exists."""
return Profile.query.filter_by(username=username).first()
@auth.verify_token
def verify_token(token):
"""Verify that the incoming request has the expected token."""
if token:
username = token.split(':')[0]
profile = get_profile(username)
return token == profile.token
def authenticate(response, profile):
"""Authenticate an outgoing response with the user's token."""
token = f'{profile.username}:{profile.token}'
response.set_cookie('auth_token', value=token)
return response
@app.route('/api/v1')
def index():
"""List of routes for this API."""
output = {
'info': 'GET /api/v1',
'register': 'POST /api/v1/accounts',
'single profile detail': 'GET /api/v1/accounts/<username>',
'edit profile': 'PUT /api/v1/accounts/<username>',
'delete profile': 'DELETE /api/v1/accounts/<username>',
'login': 'POST /api/v1/accounts/login',
'logout': 'GET /api/v1/accounts/logout',
"user's tasks": 'GET /api/v1/accounts/<username>/tasks',
"create task": 'POST /api/v1/accounts/<username>/tasks',
"task detail": 'GET /api/v1/accounts/<username>/tasks/<id>',
"task update": 'PUT /api/v1/accounts/<username>/tasks/<id>',
"delete task": 'DELETE /api/v1/accounts/<username>/tasks/<id>'
}
response = jsonify(output)
return response
@app.route('/api/v1/accounts', methods=['POST'])
def register():
"""Add a new user profile if it doesn't already exist."""
needed = ['username', 'email', 'password', '<PASSWORD>']
if all([key in request.form for key in needed]):
username = request.form['username']
profile = get_profile(username)
if not profile:
if request.form['password'] == request.form['<PASSWORD>']:
new_profile = Profile(
username=username,
email=request.form['email'],
password=<PASSWORD>(request.form['password']),
)
db.session.add(new_profile)
db.session.commit()
response = Response(
response=json.dumps({"msg": 'Profile created'}),
status=201,
mimetype="application/json"
)
return authenticate(response, new_profile)
response = jsonify({"error": "Passwords don't match"})
response.status_code = 400
return response
response = jsonify({'error': f'Username "{username}" is already taken'})
response.status_code = 400
return response
response = jsonify({'error': 'Some fields are missing'})
response.status_code = 400
return response
@app.route('/api/v1/accounts/login', methods=['POST'])
def login():
"""Authenticate a user."""
needed = ['username', 'password']
if all([key in request.forms for key in needed]):
profile = get_profile(request.forms['username'])
if profile and hasher.verify(request.forms['password'], profile.password):
response = Response(
response=json.dumps({'msg': 'Authenticated'}),
mimetype="application/json",
status=200
)
return authenticate(response, profile)
response.status_code = 400
return {'error': 'Incorrect username/password combination.'}
response.status_code = 400
return {'error': 'Some fields are missing'}
@app.route('/api/v1/accounts/logout', methods=["GET"])
def logout():
"""Log a user out."""
return jsonify({'msg': 'Logged out.'})
@app.route('/api/v1/accounts/<username>', methods=["GET"])
@auth.login_required
def profile_detail(username):
"""Get the detail for an individual profile."""
profile = get_profile(username)
if profile:
response = Response(
mimetype="application/json",
response=json.dumps(profile.to_dict()),
)
return authenticate(response, profile)
return notfound_response()
@app.route('/api/v1/accounts/<username>/tasks', methods=['GET'])
@auth.login_required
def task_list(username):
"""List all of the tasks for one user."""
profile = get_profile(username)
if profile:
tasks = [task.to_dict() for task in profile.tasks.all()]
output = {'username': username, 'tasks': tasks}
response = Response(
mimetype="application/json",
response=json.dumps(output)
)
return authenticate(response, profile)
return notfound_response()
@app.route('/api/v1/accounts/<username>/tasks', methods=['POST'])
@auth.login_required
def create_task(username):
"""List all of the tasks for one user."""
profile = get_profile(username)
if profile:
task = Task(
name=request.form['name'],
note=request.form['note'],
creation_date=datetime.now(),
due_date=datetime.strptime(due_date, INCOMING_DATE_FMT) if due_date else None,
completed=request.form['completed'],
profile_id=profile.id,
)
db.session.add(task)
db.session.commit()
output = {'msg': 'posted'}
response = Response(
mimetype="application/json",
response=json.dumps(output),
status=201
)
return authenticate(response, profile)
return notfound_response()
@app.route('/api/v1/accounts/<username>/tasks/<int:task_id>', methods=['GET'])
@auth.login_required
def task_detail(username, task_id):
"""Get the detail for one task if that task belongs to the provided user."""
profile = get_profile(username)
if profile:
task = Task.query.get(task_id)
if task in profile.tasks:
output = {'username': username, 'task': task.to_dict()}
response = Response(
mimetype="application/json",
response=json.dumps(output)
)
return authenticate(response, profile)
return notfound_response()
@app.route('/api/v1/accounts/<username>/tasks/<int:task_id>', methods=['PUT'])
@auth.login_required
def task_update(username, task_id):
"""Update one task if that task belongs to the provided user."""
profile = get_profile(username)
if profile:
task = Task.query.get(task_id)
if task in profile.tasks:
if 'name' in request.form:
task.name = request.form['name']
if 'note' in request.form:
task.note = request.form['note']
if 'completed' in request.form:
task.completed = request.form['completed']
if 'due_date' in request.form:
due_date = request.form['due_date']
task.due_date = datetime.strptime(due_date, INCOMING_DATE_FMT) if due_date else None
db.session.add(task)
db.session.commit()
output = {'username': username, 'task': task.to_dict()}
response = Response(
mimetype="application/json",
response=json.dumps(output)
)
return authenticate(response, profile)
return notfound_response()
@app.route('/api/v1/accounts/<username>/tasks/<int:task_id>', methods=['GET'])
@auth.login_required
def task_delete(username, task_id):
"""Delete one task if that task belongs to the provided user."""
profile = get_profile(username)
if profile:
task = Task.query.get(task_id)
if task in profile.tasks:
db.session.delete(task)
db.session.commit()
output = {'username': username, 'msg': 'Deleted.'}
response = Response(
mimetype="application/json",
response=json.dumps(output)
)
return authenticate(response, profile)
return notfound_response()
| StarcoderdataPython |
54155 | # Multiple Comparisons
# the way vs. the better way
# simplify chained comparison
# <NAME>
# <NAME> day of 2020
time_of_the_day = 6
day_of_the_week = 'mon'
# this way
if time_of_the_day < 12 and time_of_the_day > 6:
print('Good morning')
# a better way
if 6 < time_of_the_day < 12:
print('Good morning')
# this way
if day_of_the_week == "Mon" or day_of_the_week == "Wed" or day_of_the_week == "Fri" or day_of_the_week == "Sun":
print('its just a week day')
# a better way
if day_of_the_week in "Mon Wed Fri Sun".split(): # you can also specify a tuple ("Mon", "Wed", "Fri", "Sun")
print('its just a week day')
# this way
if time_of_the_day < 17 and time_of_the_day > 10 and day_of_the_week == 'mon':
print('its a working day')
# a better way
if all(time_of_the_day < 17, time_of_the_day > 10, day_of_the_week == 'mon'):
print('its a working day')
# similar way use 'any' for logical operator 'or'
# The way is on the way | StarcoderdataPython |
6500984 | import statistics
from colors import Colors
from shared.utils import Utils
class Informer():
def __init__(self, averageTimes):
self.times = averageTimes
"""
Preliminar report with overall data
"""
def print_response_times_data(self, times):
# TODO: implement full report with enhanced UI
median = statistics.median(times)
maxDelta = Utils.maxDelta(times)
maxRatio = maxDelta / median
print("\n>> Max Time Diff: {:.6f}".format(maxDelta))
print(">> Aggregate Median: {:.6f}".format(median))
# print(">> MaxDiff ratio: {:.2f}".format(maxRatio * 100) + "%")
if maxRatio < 10.0:
print(Colors.WARNING)
print("\n[!] Warning: Max Time Difference to median is too low ( < 10% ).")
print("[!] The site may not be vulnerable or no user existed from given list.\033[00m")
report = input("\n[-] Scan done. Output full report? (y/N) \n")
if report == 'y' or report == 'Y':
self.print_per_user_data(median)
"""
Exhaustive per-user data
"""
def print_per_user_data(self, median):
for user in self.times:
av_time = self.times[user]
deviation_ratio = av_time / median
user_stats = "{:15}".format(user)
deviation_stats = "{:9.2f}".format(deviation_ratio)
if deviation_ratio >= 100:
self.color_print(Colors.FAIL, user_stats, deviation_stats)
elif deviation_ratio < 100 and deviation_ratio >= 20:
self.color_print(Colors.WARNING, user_stats, deviation_stats)
elif deviation_ratio < 20 and deviation_ratio >= 5:
self.color_print(Colors.OKGREEN, user_stats, deviation_stats)
elif deviation_ratio < 5 and deviation_ratio >= 1:
self.color_print(Colors.BGREEN, user_stats, deviation_stats)
else:
self.color_print(Colors.OKCYAN, user_stats, deviation_stats)
"""
Verbose output based on ASCII color codes
"""
def color_print(self, COLOR, user_stats, deviation_stats):
print("| User: " + COLOR + user_stats + "\033[00m | Deviation: " + COLOR + deviation_stats + "\033[00m |") | StarcoderdataPython |
5058506 | # Copyright 2018 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from distutils.version import LooseVersion
from requests.exceptions import HTTPError
@pytest.fixture(scope='function')
def nameserver(mgmt_root):
resource = mgmt_root.tm.ltm.dns.nameservers.nameserver.create(
name='ns1',
address='1.1.1.1',
port=53
)
yield resource
resource.delete()
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'),
reason='This collection is fully implemented on 12.0.0 or greater.'
)
class TestAuditLogs(object):
def test_update_refresh(self, nameserver):
assert nameserver.kind == 'tm:ltm:dns:nameserver:nameserverstate'
assert nameserver.port == 53
nameserver.update(port=1234)
nameserver.refresh()
assert nameserver.port == 1234
def test_load_no_object(self, mgmt_root):
with pytest.raises(HTTPError) as err:
mgmt_root.tm.ltm.dns.nameservers.nameserver.load(name='not-found')
assert err.value.response.status_code == 404
def test_load(self, nameserver, mgmt_root):
r1 = mgmt_root.tm.ltm.dns.nameservers.nameserver.load(name='ns1')
assert r1.kind == 'tm:ltm:dns:nameserver:nameserverstate'
r2 = mgmt_root.tm.ltm.dns.nameservers.nameserver.load(name='ns1')
assert r1.kind == r2.kind
assert r1.selfLink == r2.selfLink
def test_collection(self, nameserver, mgmt_root):
collection = mgmt_root.tm.ltm.dns.nameservers.get_collection()
assert len(collection) == 1
| StarcoderdataPython |
179932 | # -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
import unittest
from .. import SingleElementinaSortedArray
class test_SingleElementinaSortedArray(unittest.TestCase):
solution = SingleElementinaSortedArray.Solution()
def test_singleNonDuplicate(self):
self.assertEqual(self.solution.singleNonDuplicate([1, 1, 2, 2, 3, 4, 4]), 3)
self.assertEqual(self.solution.singleNonDuplicate([1, 1, 2, 3, 3, 4, 4, 8, 8]), 2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11359410 | <gh_stars>10-100
""" Command line interface for working with cached data for YATSM algorithms
"""
import fnmatch
import logging
import os
import time
import click
from . import options
from .. import io
from ..cache import (get_line_cache_name, get_line_cache_pattern,
update_cache_file, write_cache_file)
from ..config_parser import parse_config_file
from ..utils import csvfile_to_dataframe, distribute_jobs, get_image_IDs
logger = logging.getLogger('yatsm')
@click.command(short_help='Create or update cached timeseries data for YATSM')
@options.arg_config_file
@options.arg_job_number
@options.arg_total_jobs
@click.option('--update', 'update_pattern', metavar='<pattern>',
help='Create new cache files by updating old cache files '
'matching provided pattern')
@click.option('--interlace', is_flag=True,
help='Assign rows interlaced by job instead of sequentially')
@click.pass_context
def cache(ctx, config, job_number, total_jobs, update_pattern, interlace):
cfg = parse_config_file(config)
if not os.path.isdir(cfg['dataset']['cache_line_dir']):
os.makedirs(cfg['dataset']['cache_line_dir'])
df = csvfile_to_dataframe(cfg['dataset']['input_file'],
cfg['dataset']['date_format'])
df['image_IDs'] = get_image_IDs(df['filename'])
nrow, ncol, nband, dtype = io.get_image_attribute(df['filename'][0])
# Determine lines to work on
job_lines = distribute_jobs(job_number, total_jobs, nrow,
interlaced=interlace)
logger.debug('Responsible for lines: {l}'.format(l=job_lines))
# Determine file reader
if cfg['dataset']['use_bip_reader']:
logger.debug('Reading in data from disk using BIP reader')
image_reader = io.bip_reader
else:
logger.debug('Reading in data from disk using GDAL')
image_reader = io.gdal_reader
# Attempt to update cache files
previous_cache = None
if update_pattern:
previous_cache = fnmatch.filter(
os.listdir(cfg['dataset']['cache_line_dir']), update_pattern)
if not previous_cache:
logger.warning('Could not find cache files to update with pattern '
'%s' % update_pattern)
else:
logger.debug('Found %s previously cached files to update' %
len(previous_cache))
for job_line in job_lines:
cache_filename = get_line_cache_name(cfg['dataset'], len(df),
job_line, nband)
logger.debug('Caching line {l} to {f}'.format(
l=job_line, f=cache_filename))
start_time = time.time()
# Find matching cache file
update = False
if previous_cache:
pattern = get_line_cache_pattern(job_line, nband, regex=False)
potential = fnmatch.filter(previous_cache, pattern)
if not potential:
logger.info('Found zero previous cache files for '
'line {l}'.format(l=job_line))
elif len(potential) > 1:
logger.info('Found more than one previous cache file for '
'line {l}. Keeping first'.format(l=job_line))
update = os.path.join(cfg['dataset']['cache_line_dir'],
potential[0])
else:
update = os.path.join(cfg['dataset']['cache_line_dir'],
potential[0])
logger.info('Updating from cache file {f}'.format(f=update))
if update:
update_cache_file(df['filename'], df['image_IDs'],
update, cache_filename,
job_line, image_reader)
else:
if cfg['dataset']['use_bip_reader']:
# Use BIP reader
logger.debug('Reading in data from disk using BIP reader')
Y = io.bip_reader.read_row(df['filename'], job_line)
else:
# Read in data just using GDAL
logger.debug('Reading in data from disk using GDAL')
Y = io.gdal_reader.read_row(df['filename'], job_line)
write_cache_file(cache_filename, Y, df['image_IDs'])
logger.debug('Took {s}s to cache the data'.format(
s=round(time.time() - start_time, 2)))
| StarcoderdataPython |
4981884 | <gh_stars>0
import numpy as np
import time
current_dir = 0 #0 = up , 1 = down ||, 2 = left , 3 = right
orientation = 0
def horizontal_move(direction):
if(direction == 'left'):
current_dir = 2
print 'moving left' + str(current_dir)
time.sleep(2)
elif(direction == 'right'):
current_dir = 3
print 'moving left' + str(current_dir)
time.sleep(2)
def vertical_move(direction):
if(direction == 'up'):
print 'moving forward'
orientation = 0
time.sleep(1)
elif(direction == 'down'):
print 'moving backward'
time.sleep(1)
orientation = 1
elif(direction == 'same'):
print 'stopping'
r = np.load('Route_path_2.npy')
# print r
# ---- store initial position
prev_x,prev_y = r[0]
print prev_x,prev_y
# ----
for x,y in r:
print x
print y
if (x > prev_x):
vertical_move('up')
elif (x == prev_x):
vertical_move('same')
elif(x < prev_x):
vertical_move('down')
prev_x = x
prev_y = y
| StarcoderdataPython |
3588320 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from pathlib import Path
from itertools import islice
class My_dict(dict):
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
class Df():
def __init__(self, raw_data_location):
df = pd.read_csv(raw_data_location, header = 0)
self.df = df
def tsp_df(self):
df = self.df
df1 = pd.DataFrame()
for pid, g in df.groupby(['auto_participant_id']):
g['index_raw'] = g.index
n = 0
for i, r in g.iterrows():
n = n + 1
g.at[i, 'index_raw'] = n
df1 = pd.concat([df1, g], sort=False)
df1.rename(columns= {
"trial_task_type":"type",
"answer_index":"target_ans",
"response_answer":"participant_ans",
"response_enabled_time":"time_response_enabled",
"response_time":"time_response_submitted",
"stimulus_on":"time_start",
"response_correct":"accuracy",
}, inplace=True
)
# delete all columns but these
df1 = df1[[
'index_raw',
'auto_participant_id',
'participant_age',
'participant_gender',
'type',
'trial_type',
'trial_index',
'time_elapsed',
'time_start',
'time_response_enabled',
'time_response_submitted',
'participant_ans',
'target_ans',
'accuracy'
]]
self.df = df1
def tsp_scan(self):
df = self.df
# determine how many nonanswered trials were there
xx = (len(df['participant_ans']))
#drops non-answered trials
df = df[~df['participant_ans'].isnull()]
yy = (len(df['participant_ans']))
# number of trials missed
zz = xx - yy
# number of trials missed as a % out of total
vv = zz / xx * 100
vv = int(vv)
no_participants = df['auto_participant_id'].nunique()
df = df.drop_duplicates(subset='auto_participant_id', keep='last', inplace=False)
mean_age = df['participant_age'].mean()
mean_age = int(mean_age)
xx = df['participant_gender'].value_counts()
gender = {}
gender_two = {'M':0, 'F':0}
for field, value in xx.iteritems():
gender[field] = value
for index in gender:
male = index.lower() == 'male'
female = index.lower() == 'female'
if male==True:
gender_two['M'] = gender_two['M'] + gender[index]
if female==True:
gender_two['F'] = gender_two['F'] + gender[index]
details = {'no_missed_trials':zz,'percent_trials_missed':vv,'no_participants':no_participants,'mean_age':mean_age,'no_genders':gender_two}
self.stack = details
def tsp_struct(self):
df = self.df
df1 = pd.DataFrame()
df2 = pd.DataFrame()
df3 = pd.DataFrame()
df4 = pd.DataFrame()
df5 = pd.DataFrame()
# 'changed' column
df['response_time'] = np.where((
df['trial_type'] == 'ts-trial') & (df['time_response_submitted'] == np.nan), 3,
df['time_response_submitted'].sub(df['time_response_enabled'], axis=0)
)
for index, row in df.iterrows():
if row['trial_type'] == 'ts-info-card':
df.at[index, 'changed'] = 1
else:
df.at[index, 'changed'] = 0
df1 = df
df1['changed_shifted'] = df1['changed'].shift(1)
df2 = df1.drop(columns = ['changed'])
# # 'block' column
# for gi, gv in df1.groupby(['auto_participant_id']):
# j = 1
# for index1, row1 in gv.iterrows():
# if row1['trial_type'] == 'ts-block-break':
# j = j + 1
# else:
# j = j
# gv.at[index1, 'block'] = j
# df2 = pd.concat([df2, gv], sort=False)
# 'switchtype' column
for gri, grv in df2.groupby(['auto_participant_id']):
for indx, rowx in grv['trial_type'].iteritems():
if rowx == 'ts-trial':
continue
else:
grv.drop(indx, inplace=True)
df3 = pd.concat([df3, grv], sort=False)
for rindex, rgroup in df3.groupby(['auto_participant_id']):
rgroup.sort_values('index_raw')
for index, row in rgroup.iterrows():
if np.logical_and(row['changed_shifted'] == 1, row['index_raw'] == 2):
if row['type'] == 'ts-trial-digit-span':
st = 'NONE-DS'
if row['type'] == 'ts-trial-spatial-span':
st = 'NONE-SS'
if row['type'] == 'ts-trial-spatial-rotation':
st = 'NONE-SR'
rgroup.at[index, 'switch_type'] = str(st)
row_iterator = rgroup.iterrows()
_, previous = next(row_iterator)
for index, row in row_iterator:
st = 'none'
if row['changed_shifted'] == 1:
if row['type'] == 'ts-trial-digit-span' and previous['type'] == 'ts-trial-spatial-span':
st = 'SS-DS'
if row['type'] == 'ts-trial-digit-span' and previous['type'] == 'ts-trial-spatial-rotation':
st = 'SR-DS'
if row['type'] == 'ts-trial-spatial-span' and previous['type'] == 'ts-trial-digit-span':
st = 'DS-SS'
if row['type'] == 'ts-trial-spatial-span' and previous['type'] == 'ts-trial-spatial-rotation':
st = 'SR-SS'
if row['type'] == 'ts-trial-spatial-rotation' and previous['type'] == 'ts-trial-digit-span':
st = 'DS-SR'
if row['type'] == 'ts-trial-spatial-rotation' and previous['type'] == 'ts-trial-spatial-span':
st = 'SS-SR'
if row['type'] == 'ts-trial-spatial-span' and previous['type'] == 'ts-trial-spatial-span':
pass
if row['type'] == 'ts-trial-spatial-rotation' and previous['type'] == 'ts-trial-spatial-rotation':
pass
if row['type'] == 'ts-trial-spatial-span' and previous['type'] == 'ts-trial-digit-span':
pass
previous = row
rgroup.at[index, 'switch_type'] = str(st)
df4 = pd.concat([df4, rgroup], sort=False)
# 'occurence' column
for group_index, group_value in df4.groupby(['auto_participant_id', 'type']):
k = 0
for iindex, rrow in group_value.iterrows():
if rrow['changed_shifted'] == 1:
k = k + 1
else:
k = k
group_value.at[iindex, 'occurence'] = k
df5 = pd.concat([df5, group_value], sort=False)
df5 = df5.drop(columns=['time_response_submitted', 'time_response_enabled', 'time_elapsed'])
self.df = df5
def tsp_mrt(self):
df = self.df
df = df.set_index(['auto_participant_id', 'type', 'occurence'])
RTs = My_dict()
# mean response time (mrt) calc. for each occurence per type
for title, df_mrt1 in df.groupby(level=[0,1,2]):
df_mrt1 = df_mrt1.apply(pd.to_numeric, errors = 'coerce').dropna(how = 'all')
mrt = df_mrt1['response_time'].mean()
SD = df_mrt1['response_time'].std()
med = df_mrt1['response_time'].median()
srt = df_mrt1['response_time'].iloc[0]
# print('For',title,':')
# print('MRT=', mrt, 'SD=', SD, 'MED RT=', med, 'SWITCH RT=', srt)
# print('****************************************************************************************')
#Need to create an array w/ the values calculated above
a = np.array([mrt, med, SD, srt])
for indexx, roww in df_mrt1.iterrows():
RTs.key = indexx
RTs.value = a
RTs.add(RTs.key, RTs.value)
# This stays indented here! :)
self.RTs = RTs
def tsp_switchrt(self):
df = self.df
df1 = pd.DataFrame()
df2 = pd.DataFrame()
df3 = pd.DataFrame()
for group_i, group_v in df.groupby(['type', 'occurence']):
for index, row in group_v['response_time'].iteritems():
if pd.notnull(row):
continue
else:
group_v.at[index, 'response_time'] = np.NaN
df1 = pd.concat([df1, group_v], sort=False)
for gi, gv in df1.groupby(['type', 'occurence']):
n = 0
for index, row in gv.iterrows():
n = n + 1
# ----- can change to % of trials ( m = x% of n) -----
# here dicates over how many trials the RT is averaged over (m), dependant on how many
# trials are in the overall group (n).
##
# eg, when the number of overall trials in the group is less than 3 (if n < 3), then
# the number of trials to average over is 0 (m = 0), and the rows are left empty (np.nan).
if n < 3:
m = 1
for i, r in gv.iterrows():
gv.at[i, 'first_switch_rt'] = np.NaN
elif n >= 3 and n < 5:
m = 1
elif n >= 5:
m = 1
number_of_trials = 0
overall_rt = 0
# the 'islice' tells pandas to iterate with iterrows over the first 'm' rows
for ind, ro in islice(gv.iterrows(), m):
number_of_trials = number_of_trials + 1
overall_rt = overall_rt + ro['response_time']
j = (overall_rt/number_of_trials)
gv.at[index, 'first_switch_rt'] = j
df2 = pd.concat([df2, gv], sort=False)
self.df = df2
# when a group has less than 3 trials in it, the switch_rt is not calculated (m = 0).
# if there are NaN values in any of the rows of a column, that column returns NaN as a t-test
# value for any t-test calculations it is involved in. therefore i have excluded those rows below:
for gri, grv in df2.groupby(['type', 'occurence']):
for indx, rowx in grv['first_switch_rt'].iteritems():
if pd.notnull(rowx):
continue
else:
grv.drop(indx, inplace=True)
df3 = pd.concat([df3, grv], sort=False)
df4 = df3.set_index(['type', 'occurence'])
first_switch_rt = My_dict()
for indexx, roww in df4.iterrows():
first_switch_rt.key = indexx
first_switch_rt.value = roww['first_switch_rt']
first_switch_rt.add(first_switch_rt.key, first_switch_rt.value)
self.first_switch_rt = first_switch_rt
# Calculate Accuracy
def tsp_accuracy(self):
df = self.df
df = df.set_index(['auto_participant_id', 'type', 'occurence'])
#OVERALL % ACC
number_correct = df['accuracy'].sum()
number_of_trials = df['participant_ans'].count()
overall_accuracy = (number_correct / number_of_trials) * 100
print('OVERALL ACCURACY =', overall_accuracy)
ACC = My_dict()
#GROUPED % ACC
for group_i, group_v in df.groupby(level=[0,1,2]):
for gi, gv in group_v.iterrows():
overall_accuracy = 0
corr = group_v['accuracy'].sum()
total = len(group_v['accuracy'])
overall_accuracy = (corr / total) * 100
a = np.array([overall_accuracy])
for indexx, roww in group_v.iterrows():
ACC.key = indexx
ACC.value = a
ACC.add(ACC.key, ACC.value)
self.ACC = ACC
# print("MIXED EFFECTS*************************************************************************************")
# md1 = smf.mixedlm("first_switch_rt ~ switch_type + occurence + auto_participant_id ", df_behavstats, groups=df_behavstats["auto_participant_id"])
# mdf1 = md1.fit()
# print(mdf1.summary())
# print("*************************************************************************************")
| StarcoderdataPython |
6518660 | <filename>examples/recsys/multivae.py
# flake8: noqa
from typing import Dict, List
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from catalyst import dl, metrics
from catalyst.contrib.datasets import MovieLens
from catalyst.contrib.layers import Normalize
from catalyst.utils.misc import set_global_seed
def collate_fn_train(batch: List[torch.Tensor]) -> Dict[str, torch.Tensor]:
targets = [u_items.gt(0).to(torch.float32) for u_items in batch]
return {"inputs": torch.stack(targets), "targets": torch.stack(targets)}
def collate_fn_valid(batch: List[torch.Tensor]) -> Dict[str, torch.Tensor]:
test_prop = 0.2
targets = [u_items.gt(0).to(torch.float32) for u_items in batch]
inputs = []
for u_items in targets:
num_test_items = int(test_prop * torch.count_nonzero(u_items))
u_input_items = u_items.clone()
idx = u_items.multinomial(num_samples=num_test_items, replacement=False)
u_input_items[idx] = 0
inputs.append(u_input_items)
return {"inputs": torch.stack(inputs), "targets": torch.stack(targets)}
class MultiVAE(nn.Module):
def __init__(self, p_dims, q_dims=None, dropout=0.5):
super().__init__()
self.p_dims = p_dims
if q_dims:
assert (
q_dims[0] == p_dims[-1]
), "In and Out dimensions must equal to each other"
assert (
q_dims[-1] == p_dims[0]
), "Latent dimension for p- and q- network mismatches."
self.q_dims = q_dims
else:
self.q_dims = p_dims[::-1]
# Last dimension of q- network is for mean and variance
self.encoder = nn.Sequential()
self.encoder.add_module("normalize", Normalize())
self.encoder.add_module("dropout", nn.Dropout(dropout))
for i, (d_in, d_out) in enumerate(zip(self.q_dims[:-2], self.q_dims[1:-1])):
self.encoder.add_module(f"encoder_fc_{i + 1}", nn.Linear(d_in, d_out))
self.encoder.add_module(f"encoder_tanh_{i + 1}", nn.Tanh())
self.encoder.add_module(
f"encoder_fc_{len(self.q_dims) - 1}",
nn.Linear(self.q_dims[-2], self.q_dims[-1] * 2),
)
self.decoder = nn.Sequential()
for i, (d_in, d_out) in enumerate(zip(self.p_dims[:-2], self.p_dims[1:-1])):
self.decoder.add_module(f"decoder_fc_{i + 1}", nn.Linear(d_in, d_out))
self.decoder.add_module(f"decoder_tanh_{i + 1}", nn.Tanh())
self.decoder.add_module(
f"decoder_fc_{len(self.p_dims) - 1}",
nn.Linear(self.p_dims[-2], self.p_dims[-1]),
)
self.encoder.apply(self.init_weights)
self.decoder.apply(self.init_weights)
def forward(self, x):
z = self.encoder(x)
mu, logvar = z[:, : self.q_dims[-1]], z[:, self.q_dims[-1] :]
z = self.reparameterize(mu, logvar)
z = self.decoder(z)
return z, mu, logvar
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
else:
return mu
def init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0)
class RecSysRunner(dl.Runner):
def on_loader_start(self, runner):
super().on_loader_start(runner)
self.meters = {
key: metrics.AdditiveMetric(compute_on_call=False)
for key in ["loss_ae", "loss_kld", "loss"]
}
def handle_batch(self, batch):
x = batch["inputs"]
x_true = batch["targets"]
x_recon, mu, logvar = self.model(x)
anneal = min(
self.hparams["anneal_cap"],
self.batch_step / self.hparams["total_anneal_steps"],
)
loss_ae = -torch.mean(torch.sum(F.log_softmax(x_recon, 1) * x, -1))
loss_kld = -0.5 * torch.mean(
torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
)
loss = loss_ae + anneal * loss_kld
self.batch.update({"logits": x_recon, "inputs": x, "targets": x_true})
self.batch_metrics.update(
{"loss_ae": loss_ae, "loss_kld": loss_kld, "loss": loss}
)
for key in ["loss_ae", "loss_kld", "loss"]:
self.meters[key].update(self.batch_metrics[key].item(), self.batch_size)
def on_loader_end(self, runner):
for key in ["loss_ae", "loss_kld", "loss"]:
self.loader_metrics[key] = self.meters[key].compute()[0]
super().on_loader_end(runner)
if __name__ == "__main__":
set_global_seed(42)
train_dataset = MovieLens(root=".", train=True, download=True)
test_dataset = MovieLens(root=".", train=False, download=True)
loaders = {
"train": DataLoader(train_dataset, batch_size=32, collate_fn=collate_fn_train),
"valid": DataLoader(test_dataset, batch_size=32, collate_fn=collate_fn_valid),
}
item_num = len(train_dataset[0])
model = MultiVAE([200, 600, item_num], dropout=0.5)
optimizer = optim.Adam(model.parameters(), lr=0.001)
lr_scheduler = StepLR(optimizer, step_size=20, gamma=0.1)
engine = dl.Engine()
hparams = {
"anneal_cap": 0.2,
"total_anneal_steps": 6000,
}
callbacks = [
dl.NDCGCallback("logits", "targets", [20, 50, 100]),
dl.MAPCallback("logits", "targets", [20, 50, 100]),
dl.MRRCallback("logits", "targets", [20, 50, 100]),
dl.HitrateCallback("logits", "targets", [20, 50, 100]),
dl.BackwardCallback("loss"),
dl.OptimizerCallback("loss", accumulation_steps=1),
dl.SchedulerCallback(),
]
runner = RecSysRunner()
runner.train(
model=model,
optimizer=optimizer,
engine=engine,
hparams=hparams,
scheduler=lr_scheduler,
loaders=loaders,
num_epochs=100,
verbose=True,
timeit=False,
callbacks=callbacks,
logdir="./logs_multivae",
)
| StarcoderdataPython |
6625417 | from __future__ import absolute_import
import logging
import threading
from galaxy.web.stack import register_postfork_function
from .sleeper import Sleeper
log = logging.getLogger(__name__)
DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT = 5
class Monitors(object):
def _init_monitor_thread(self, name, target_name=None, target=None, start=False, config=None):
self.monitor_join_sleep = getattr(config, "monitor_thread_join_timeout", DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT)
self.monitor_join = self.monitor_join_sleep > 0
self.monitor_sleeper = Sleeper()
self.monitor_running = True
if target is not None:
assert target_name is None
monitor_func = target
else:
target_name = target_name or "monitor"
monitor_func = getattr(self, target_name)
self.sleeper = Sleeper()
self.monitor_thread = threading.Thread(name=name, target=monitor_func)
self.monitor_thread.setDaemon(True)
self._start = start
register_postfork_function(self.start_monitoring)
def start_monitoring(self):
if self._start:
self.monitor_thread.start()
def stop_monitoring(self):
self.monitor_running = False
def _monitor_sleep(self, sleep_amount):
self.sleeper.sleep(sleep_amount)
def shutdown_monitor(self):
self.stop_monitoring()
self.sleeper.wake()
if self.monitor_join:
log.debug("Joining monitor thread")
self.monitor_thread.join(self.monitor_join_sleep)
| StarcoderdataPython |
168138 | <reponame>splumb/PuTTY
import sys
import numbers
import itertools
assert sys.version_info[:2] >= (3,0), "This is Python 3 code"
from numbertheory import *
class AffinePoint(object):
"""Base class for points on an elliptic curve."""
def __init__(self, curve, *args):
self.curve = curve
if len(args) == 0:
self.infinite = True
self.x = self.y = None
else:
assert len(args) == 2
self.infinite = False
self.x = ModP(self.curve.p, args[0])
self.y = ModP(self.curve.p, args[1])
self.check_equation()
def __neg__(self):
if self.infinite:
return self
return type(self)(self.curve, self.x, -self.y)
def __mul__(self, rhs):
if not isinstance(rhs, numbers.Integral):
raise ValueError("Elliptic curve points can only be multiplied by integers")
P = self
if rhs < 0:
rhs = -rhs
P = -P
toret = self.curve.point()
n = 1
nP = P
while rhs != 0:
if rhs & n:
rhs -= n
toret += nP
n += n
nP += nP
return toret
def __rmul__(self, rhs):
return self * rhs
def __sub__(self, rhs):
return self + (-rhs)
def __rsub__(self, rhs):
return (-self) + rhs
def __str__(self):
if self.infinite:
return "inf"
else:
return "({},{})".format(self.x, self.y)
def __repr__(self):
if self.infinite:
args = ""
else:
args = ", {}, {}".format(self.x, self.y)
return "{}.Point({}{})".format(type(self.curve).__name__,
self.curve, args)
def __eq__(self, rhs):
if self.infinite or rhs.infinite:
return self.infinite and rhs.infinite
return (self.x, self.y) == (rhs.x, rhs.y)
def __ne__(self, rhs):
return not (self == rhs)
def __lt__(self, rhs):
raise ValueError("Elliptic curve points have no ordering")
def __le__(self, rhs):
raise ValueError("Elliptic curve points have no ordering")
def __gt__(self, rhs):
raise ValueError("Elliptic curve points have no ordering")
def __ge__(self, rhs):
raise ValueError("Elliptic curve points have no ordering")
def __hash__(self):
if self.infinite:
return hash((True,))
else:
return hash((False, self.x, self.y))
class CurveBase(object):
def point(self, *args):
return self.Point(self, *args)
class WeierstrassCurve(CurveBase):
class Point(AffinePoint):
def check_equation(self):
assert (self.y*self.y ==
self.x*self.x*self.x +
self.curve.a*self.x + self.curve.b)
def __add__(self, rhs):
if self.infinite:
return rhs
if rhs.infinite:
return self
if self.x == rhs.x and self.y != rhs.y:
return self.curve.point()
x1, x2, y1, y2 = self.x, rhs.x, self.y, rhs.y
xdiff = x2-x1
if xdiff != 0:
slope = (y2-y1) / xdiff
else:
assert y1 == y2
slope = (3*x1*x1 + self.curve.a) / (2*y1)
xp = slope*slope - x1 - x2
yp = -(y1 + slope * (xp-x1))
return self.curve.point(xp, yp)
def __init__(self, p, a, b):
self.p = p
self.a = ModP(p, a)
self.b = ModP(p, b)
def cpoint(self, x, yparity=0):
if not hasattr(self, 'sqrtmodp'):
self.sqrtmodp = RootModP(2, self.p)
rhs = x**3 + self.a.n * x + self.b.n
y = self.sqrtmodp.root(rhs)
if (y - yparity) % 2:
y = -y
return self.point(x, y)
def __repr__(self):
return "{}(0x{:x}, {}, {})".format(
type(self).__name__, self.p, self.a, self.b)
class MontgomeryCurve(CurveBase):
class Point(AffinePoint):
def check_equation(self):
assert (self.curve.b*self.y*self.y ==
self.x*self.x*self.x +
self.curve.a*self.x*self.x + self.x)
def __add__(self, rhs):
if self.infinite:
return rhs
if rhs.infinite:
return self
if self.x == rhs.x and self.y != rhs.y:
return self.curve.point()
x1, x2, y1, y2 = self.x, rhs.x, self.y, rhs.y
xdiff = x2-x1
if xdiff != 0:
slope = (y2-y1) / xdiff
elif y1 != 0:
assert y1 == y2
slope = (3*x1*x1 + 2*self.curve.a*x1 + 1) / (2*self.curve.b*y1)
else:
# If y1 was 0 as well, then we must have found an
# order-2 point that doubles to the identity.
return self.curve.point()
xp = self.curve.b*slope*slope - self.curve.a - x1 - x2
yp = -(y1 + slope * (xp-x1))
return self.curve.point(xp, yp)
def __init__(self, p, a, b):
self.p = p
self.a = ModP(p, a)
self.b = ModP(p, b)
def cpoint(self, x, yparity=0):
if not hasattr(self, 'sqrtmodp'):
self.sqrtmodp = RootModP(2, self.p)
rhs = (x**3 + self.a.n * x**2 + x) / self.b
y = self.sqrtmodp.root(int(rhs))
if (y - yparity) % 2:
y = -y
return self.point(x, y)
def __repr__(self):
return "{}(0x{:x}, {}, {})".format(
type(self).__name__, self.p, self.a, self.b)
class TwistedEdwardsCurve(CurveBase):
class Point(AffinePoint):
def check_equation(self):
x2, y2 = self.x*self.x, self.y*self.y
assert (self.curve.a*x2 + y2 == 1 + self.curve.d*x2*y2)
def __neg__(self):
return type(self)(self.curve, -self.x, self.y)
def __add__(self, rhs):
x1, x2, y1, y2 = self.x, rhs.x, self.y, rhs.y
x1y2, y1x2, y1y2, x1x2 = x1*y2, y1*x2, y1*y2, x1*x2
dxxyy = self.curve.d*x1x2*y1y2
return self.curve.point((x1y2+y1x2)/(1+dxxyy),
(y1y2-self.curve.a*x1x2)/(1-dxxyy))
def __init__(self, p, d, a):
self.p = p
self.d = ModP(p, d)
self.a = ModP(p, a)
def point(self, *args):
# This curve form represents the identity using finite
# numbers, so it doesn't need the special infinity flag.
# Detect a no-argument call to point() and substitute the pair
# of integers that gives the identity.
if len(args) == 0:
args = [0, 1]
return super(TwistedEdwardsCurve, self).point(*args)
def cpoint(self, y, xparity=0):
if not hasattr(self, 'sqrtmodp'):
self.sqrtmodp = RootModP(self.p)
y = ModP(self.p, y)
y2 = y**2
radicand = (y2 - 1) / (self.d * y2 - self.a)
x = self.sqrtmodp.root(radicand.n)
if (x - xparity) % 2:
x = -x
return self.point(x, y)
def __repr__(self):
return "{}(0x{:x}, {}, {})".format(
type(self).__name__, self.p, self.d, self.a)
def find_montgomery_power2_order_x_values(p, a):
# Find points on a Montgomery elliptic curve that have order a
# power of 2.
#
# Motivation: both Curve25519 and Curve448 are abelian groups
# whose overall order is a large prime times a small factor of 2.
# The approved base point of each curve generates a cyclic
# subgroup whose order is the large prime. Outside that cyclic
# subgroup there are many other points that have large prime
# order, plus just a handful that have tiny order. If one of the
# latter is presented to you as a Diffie-Hellman public value,
# nothing useful is going to happen, and RFC 7748 says we should
# outlaw those values. And any actual attempt to outlaw them is
# going to need to know what they are, either to check for each
# one directly, or to use them as test cases for some other
# approach.
#
# In a group of order p 2^k, an obvious way to search for points
# with order dividing 2^k is to generate random group elements and
# raise them to the power p. That guarantees that you end up with
# _something_ with order dividing 2^k (even if it's boringly the
# identity). And you also know from theory how many such points
# you expect to exist, so you can count the distinct ones you've
# found, and stop once you've got the right number.
#
# But that isn't actually good enough to find all the public
# values that are problematic! The reason why not is that in
# Montgomery key exchange we don't actually use a full elliptic
# curve point: we only use its x-coordinate. And the formulae for
# doubling and differential addition on x-coordinates can accept
# some values that don't correspond to group elements _at all_
# without detecting any error - and some of those nonsense x
# coordinates can also behave like low-order points.
#
# (For example, the x-coordinate -1 in Curve25519 is such a value.
# The reference ECC code in this module will raise an exception if
# you call curve25519.cpoint(-1): it corresponds to no valid point
# at all. But if you feed it into the doubling formula _anyway_,
# it doubles to the valid curve point with x-coord 0, which in
# turn doubles to the curve identity. Bang.)
#
# So we use an alternative approach which discards the group
# theory of the actual elliptic curve, and focuses purely on the
# doubling formula as an algebraic transformation on Z_p. Our
# question is: what values of x have the property that if you
# iterate the doubling map you eventually end up dividing by zero?
# To answer that, we must solve cubics and quartics mod p, via the
# code in numbertheory.py for doing so.
E = EquationSolverModP(p)
def viableSolutions(it):
for x in it:
try:
yield int(x)
except ValueError:
pass # some field-extension element that isn't a real value
def valuesDoublingTo(y):
# The doubling formula for a Montgomery curve point given only
# by x coordinate is (x+1)^2(x-1)^2 / (4(x^3+ax^2+x)).
#
# If we want to find a point that doubles to some particular
# value, we can set that formula equal to y and expand to get the
# quartic equation x^4 + (-4y)x^3 + (-4ay-2)x^2 + (-4y)x + 1 = 0.
return viableSolutions(E.solve_monic_quartic(-4*y, -4*a*y-2, -4*y, 1))
queue = []
qset = set()
pos = 0
def insert(x):
if x not in qset:
queue.append(x)
qset.add(x)
# Our ultimate aim is to find points that end up going to the
# curve identity / point at infinity after some number of
# doublings. So our starting point is: what values of x make the
# denominator of the doubling formula zero?
for x in viableSolutions(E.solve_monic_cubic(a, 1, 0)):
insert(x)
while pos < len(queue):
y = queue[pos]
pos += 1
for x in valuesDoublingTo(y):
insert(x)
return queue
p256 = WeierstrassCurve(0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff, -3, 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b)
p256.G = p256.point(0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296,0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5)
p256.G_order = 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551
p384 = WeierstrassCurve(0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff, -3, 0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef)
p384.G = p384.point(0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7, 0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f)
p384.G_order = 0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973
p521 = WeierstrassCurve(0x01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, -3, 0x0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00)
p521.G = p521.point(0x00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66,0x011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650)
p521.G_order = 0x01fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409
curve25519 = MontgomeryCurve(2**255-19, 0x76d06, 1)
curve25519.G = curve25519.cpoint(9)
curve448 = MontgomeryCurve(2**448-2**224-1, 0x262a6, 1)
curve448.G = curve448.cpoint(5)
ed25519 = TwistedEdwardsCurve(2**255-19, 0x52036cee2b6ffe738cc740797779e89800700a4d4141d8ab75eb4dca135978a3, -1)
ed25519.G = ed25519.point(0x216936d3cd6e53fec0a4e231fdd6dc5c692cc7609525a7b2c9562d608f25d51a,0x6666666666666666666666666666666666666666666666666666666666666658)
ed25519.G_order = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed
ed448 = TwistedEdwardsCurve(2**448-2**224-1, -39081, +1)
ed448.G = ed448.point(0x4f1970c66bed0ded221d15a622bf36da9e146570470f1767ea6de324a3d3a46412ae1af72ab66511433b80e18b00938e2626a82bc70cc05e,0x693f46716eb6bc248876203756c9c7624bea73736ca3984087789c1e05a0c2d73ad3ff1ce67c39c4fdbd132c4ed7c8ad9808795bf230fa14)
ed448.G_order = 0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffff7cca23e9c44edb49aed63690216cc2728dc58f552378c292ab5844f3
| StarcoderdataPython |
5127245 | <filename>chalk/__init__.py
import math
from functools import reduce
from typing import Iterable, List, Tuple, Optional
try:
from importlib import metadata
except ImportError: # for Python<3.8
import importlib_metadata as metadata # type: ignore
from chalk.core import Diagram, Empty, Primitive
from chalk.shape import (
Arc,
Circle,
Rectangle,
Path,
Text,
Image,
Spacer,
Latex,
)
from chalk.point import Point, Vector
from chalk.trail import Trail
# Set library name the same as on PyPI
# must be the same as setup.py:setup(name=?)
__libname__: str = "chalk-diagrams" # custom dunder attribute
__version__ = metadata.version(__libname__)
ignore = [Trail, Vector]
def empty() -> Diagram:
return Empty()
def make_path(
coords: List[Tuple[float, float]], arrow: bool = False
) -> Diagram:
return Primitive.from_shape(Path.from_list_of_tuples(coords, arrow))
def circle(radius: float) -> Diagram:
return Primitive.from_shape(Circle(radius))
def arc(radius: float, angle0: float, angle1: float) -> Diagram:
return Primitive.from_shape(Arc(radius, angle0, angle1))
def arc_between(
point1: Tuple[float, float], point2: Tuple[float, float], height: float
) -> Diagram:
"""Makes an arc starting at point1 and ending at point2, with the midpoint
at a distance of abs(height) away from the straight line from point1 to
point2. A positive value of height results in an arc to the left of the
line from point1 to point2; a negative value yields one to the right.
The implementaion is based on the the function arcBetween from Haskell's
diagrams:
https://hackage.haskell.org/package/diagrams-lib-1.4.5.1/docs/src/Diagrams.TwoD.Arc.html#arcBetween
"""
p = Point(*point1)
q = Point(*point2)
h = abs(height)
v = q - p
d = v.length
if h < 1e-6:
# Draw a line if the height is too small
shape: Diagram = make_path([(0, 0), (d, 0)])
else:
# Determine the arc's angle θ and its radius r
θ = math.acos((d**2 - 4 * h**2) / (d**2 + 4 * h**2))
r = d / (2 * math.sin(θ))
if height > 0:
# bend left
φ = -math.pi / 2
dy = r - h
else:
# bend right
φ = +math.pi / 2
dy = h - r
shape = (
Primitive.from_shape(Arc(r, -θ, θ)).rotate(φ).translate(d / 2, dy)
)
return shape.rotate(v.angle).translate(p.x, p.y)
def polygon(sides: int, radius: float, rotation: float = 0) -> Diagram:
return Primitive.from_shape(Path.polygon(sides, radius, rotation))
def regular_polygon(sides: int, side_length: float) -> Diagram:
return Primitive.from_shape(Path.regular_polygon(sides, side_length))
def hrule(length: float) -> Diagram:
return Primitive.from_shape(Path.hrule(length))
def vrule(length: float) -> Diagram:
return Primitive.from_shape(Path.vrule(length))
def triangle(width: float) -> Diagram:
return regular_polygon(3, width)
def rectangle(
width: float, height: float, radius: Optional[float] = None
) -> Diagram:
return Primitive.from_shape(Rectangle(width, height, radius))
def image(local_path: str, url_path: Optional[str]) -> Diagram:
return Primitive.from_shape(Image(local_path, url_path))
def square(side: float) -> Diagram:
return Primitive.from_shape(Rectangle(side, side))
def text(t: str, size: Optional[float]) -> Diagram:
return Primitive.from_shape(Text(t, font_size=size))
def latex(t: str) -> Diagram:
return Primitive.from_shape(Latex(t))
def atop(diagram1: Diagram, diagram2: Diagram) -> Diagram:
return diagram1.atop(diagram2)
def beside(diagram1: Diagram, diagram2: Diagram) -> Diagram:
return diagram1.beside(diagram2)
def place_at(
diagrams: Iterable[Diagram], points: List[Tuple[float, float]]
) -> Diagram:
return concat(d.translate(x, y) for d, (x, y) in zip(diagrams, points))
def place_on_path(diagrams: Iterable[Diagram], path: Path) -> Diagram:
return concat(d.translate(p.x, p.y) for d, p in zip(diagrams, path.points))
def above(diagram1: Diagram, diagram2: Diagram) -> Diagram:
return diagram1.above(diagram2)
def concat(diagrams: Iterable[Diagram]) -> Diagram:
return reduce(atop, diagrams, empty())
def hstrut(width: Optional[float]) -> Diagram:
if width is None:
return empty()
return Primitive.from_shape(Spacer(width, 0))
def hcat(diagrams: Iterable[Diagram], sep: Optional[float] = None) -> Diagram:
diagrams = iter(diagrams)
start = next(diagrams, None)
if start is None:
return empty()
return reduce(lambda a, b: a | hstrut(sep) | b, diagrams, start)
def vstrut(height: Optional[float]) -> Diagram:
if height is None:
return empty()
return Primitive.from_shape(Spacer(0, height))
def vcat(diagrams: Iterable[Diagram], sep: Optional[float] = None) -> Diagram:
diagrams = iter(diagrams)
start = next(diagrams, None)
if start is None:
return empty()
return reduce(lambda a, b: a / vstrut(sep) / b, diagrams, start)
def connect(diagram: Diagram, name1: str, name2: str) -> Diagram:
return connect_outer(diagram, name1, "C", name2, "C")
def connect_outer(
diagram: Diagram,
name1: str,
c1: str,
name2: str,
c2: str,
arrow: bool = False,
) -> Diagram:
bb1 = diagram.get_subdiagram_bounding_box(name1)
bb2 = diagram.get_subdiagram_bounding_box(name2)
assert bb1 is not None, f"Name {name1} not found"
assert bb2 is not None, f"Name {name2} not found"
points = [bb1.cardinal(c1), bb2.cardinal(c2)]
return Primitive.from_shape(Path(points, arrow))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.